repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/detail/rng_impl.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/detail/macros.hpp>
#include <raft/random/rng_device.cuh>
#include <raft/random/rng_state.hpp>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/detail/cub_wrappers.cuh>
#include <raft/util/scatter.cuh>
namespace raft {
namespace random {
namespace detail {
/**
* This macro will invoke function `func` with the correct instantiation of
* device state as the first parameter, and passes all subsequent macro
* arguments through to the function.
* Note that you can call this macro with incomplete template specializations
* as well as triple chevron kernel calls, see the following code example
* @code
* template <int C1, int C2, typename GenType>
* RAFT_KERNEL my_kernel(DeviceState<GenType> state, int arg1) { ... }
*
* template <int C1, typename GenType, int C2 = 2>
* void foo(DeviceState<GenType> state, int arg1) {
* my_kernel<C1, C2><<<1, 1>>>(state, arg1);
* }
*
* RAFT_CALL_RNG_FUNC(rng_state, foo<1>, 5);
* RAFT_CALL_RNG_FUNC(rng_state, (my_kernel<1, 2><<<1, 1>>>), 5);
* @endcode
*/
#define RAFT_CALL_RNG_FUNC(rng_state, func, ...) \
switch ((rng_state).type) { \
case raft::random::GeneratorType::GenPhilox: { \
raft::random::DeviceState<raft::random::PhiloxGenerator> r_phil{(rng_state)}; \
RAFT_DEPAREN(func)(r_phil, ##__VA_ARGS__); \
break; \
} \
case raft::random::GeneratorType::GenPC: { \
raft::random::DeviceState<raft::random::PCGenerator> r_pc{(rng_state)}; \
RAFT_DEPAREN(func)(r_pc, ##__VA_ARGS__); \
break; \
} \
default: RAFT_FAIL("Unexpected generator type '%d'", int((rng_state).type)); \
}
template <int ITEMS_PER_CALL, typename GenType, typename... ArgsT>
void call_rng_kernel(DeviceState<GenType> const& dev_state,
RngState& rng_state,
cudaStream_t stream,
ArgsT... args)
{
auto n_threads = 256;
auto n_blocks = 4 * getMultiProcessorCount();
rngKernel<ITEMS_PER_CALL><<<n_blocks, n_threads, 0, stream>>>(dev_state, args...);
rng_state.advance(uint64_t(n_blocks) * n_threads, 16);
}
template <typename OutType, typename LenType = int>
void uniform(
RngState& rng_state, OutType* ptr, LenType len, OutType start, OutType end, cudaStream_t stream)
{
static_assert(std::is_floating_point<OutType>::value,
"Type for 'uniform' can only be floating point!");
UniformDistParams<OutType> params;
params.start = start;
params.end = end;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
}
template <typename OutType, typename LenType = int>
void uniformInt(
RngState& rng_state, OutType* ptr, LenType len, OutType start, OutType end, cudaStream_t stream)
{
static_assert(std::is_integral<OutType>::value, "Type for 'uniformInt' can only be integer!");
ASSERT(end > start, "'end' must be greater than 'start'");
if (sizeof(OutType) == 4) {
UniformIntDistParams<OutType, uint32_t> params;
params.start = start;
params.end = end;
params.diff = uint32_t(params.end - params.start);
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
} else {
UniformIntDistParams<OutType, uint64_t> params;
params.start = start;
params.end = end;
params.diff = uint64_t(params.end - params.start);
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
}
}
template <typename OutType, typename LenType = int>
void normal(
RngState& rng_state, OutType* ptr, LenType len, OutType mu, OutType sigma, cudaStream_t stream)
{
static_assert(std::is_floating_point<OutType>::value,
"Type for 'normal' can only be floating point!");
NormalDistParams<OutType> params;
params.mu = mu;
params.sigma = sigma;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<2>, rng_state, stream, ptr, len, params);
}
template <typename IntType, typename LenType = int>
void normalInt(
RngState& rng_state, IntType* ptr, LenType len, IntType mu, IntType sigma, cudaStream_t stream)
{
static_assert(std::is_integral<IntType>::value, "Type for 'normalInt' can only be integer!");
NormalIntDistParams<IntType> params;
params.mu = mu;
params.sigma = sigma;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<2>, rng_state, stream, ptr, len, params);
}
template <typename OutType, typename LenType = int>
void normalTable(RngState& rng_state,
OutType* ptr,
LenType n_rows,
LenType n_cols,
const OutType* mu_vec,
const OutType* sigma_vec,
OutType sigma,
cudaStream_t stream)
{
NormalTableDistParams<OutType, LenType> params;
params.n_rows = n_rows;
params.n_cols = n_cols;
params.mu_vec = mu_vec;
params.sigma = sigma;
params.sigma_vec = sigma_vec;
LenType len = n_rows * n_cols;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<2>, rng_state, stream, ptr, len, params);
}
template <typename OutType, typename LenType = int>
void fill(RngState& rng_state, OutType* ptr, LenType len, OutType val, cudaStream_t stream)
{
InvariantDistParams<OutType> params;
params.const_val = val;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
}
template <typename Type, typename OutType = bool, typename LenType = int>
void bernoulli(RngState& rng_state, OutType* ptr, LenType len, Type prob, cudaStream_t stream)
{
BernoulliDistParams<Type> params;
params.prob = prob;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
}
template <typename OutType, typename LenType = int>
void scaled_bernoulli(
RngState& rng_state, OutType* ptr, LenType len, OutType prob, OutType scale, cudaStream_t stream)
{
static_assert(std::is_floating_point<OutType>::value,
"Type for 'scaled_bernoulli' can only be floating point!");
ScaledBernoulliDistParams<OutType> params;
params.prob = prob;
params.scale = scale;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
}
template <typename OutType, typename LenType = int>
void gumbel(
RngState& rng_state, OutType* ptr, LenType len, OutType mu, OutType beta, cudaStream_t stream)
{
GumbelDistParams<OutType> params;
params.mu = mu;
params.beta = beta;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
}
template <typename OutType, typename LenType = int>
void lognormal(
RngState& rng_state, OutType* ptr, LenType len, OutType mu, OutType sigma, cudaStream_t stream)
{
LogNormalDistParams<OutType> params;
params.mu = mu;
params.sigma = sigma;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<2>, rng_state, stream, ptr, len, params);
}
template <typename OutType, typename LenType = int>
void logistic(
RngState& rng_state, OutType* ptr, LenType len, OutType mu, OutType scale, cudaStream_t stream)
{
LogisticDistParams<OutType> params;
params.mu = mu;
params.scale = scale;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
}
template <typename OutType, typename LenType = int>
void exponential(
RngState& rng_state, OutType* ptr, LenType len, OutType lambda, cudaStream_t stream)
{
ExponentialDistParams<OutType> params;
params.lambda = lambda;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
}
template <typename OutType, typename LenType = int>
void rayleigh(RngState& rng_state, OutType* ptr, LenType len, OutType sigma, cudaStream_t stream)
{
RayleighDistParams<OutType> params;
params.sigma = sigma;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
}
template <typename OutType, typename LenType = int>
void laplace(
RngState& rng_state, OutType* ptr, LenType len, OutType mu, OutType scale, cudaStream_t stream)
{
LaplaceDistParams<OutType> params;
params.mu = mu;
params.scale = scale;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, ptr, len, params);
}
template <typename GenType, typename OutType, typename WeightType, typename IdxType>
void call_sample_with_replacement_kernel(DeviceState<GenType> const& dev_state,
RngState& rng_state,
cudaStream_t stream,
OutType* out,
const WeightType* weights_csum,
IdxType sampledLen,
IdxType len)
{
IdxType n_threads = 256;
IdxType n_blocks = raft::ceildiv(sampledLen, n_threads);
sample_with_replacement_kernel<<<n_blocks, n_threads, 0, stream>>>(
dev_state, out, weights_csum, sampledLen, len);
rng_state.advance(uint64_t(n_blocks) * n_threads, 1);
}
template <typename OutType, typename WeightType, typename IndexType = OutType>
std::enable_if_t<std::is_integral_v<OutType>> discrete(RngState& rng_state,
OutType* ptr,
const WeightType* weights,
IndexType sampledLen,
IndexType len,
cudaStream_t stream)
{
// Compute the cumulative sums of the weights
size_t temp_storage_bytes = 0;
rmm::device_uvector<WeightType> weights_csum(len, stream);
cub::DeviceScan::InclusiveSum(
nullptr, temp_storage_bytes, weights, weights_csum.data(), len, stream);
rmm::device_uvector<uint8_t> temp_storage(temp_storage_bytes, stream);
cub::DeviceScan::InclusiveSum(
temp_storage.data(), temp_storage_bytes, weights, weights_csum.data(), len, stream);
// Sample indices with replacement
RAFT_CALL_RNG_FUNC(rng_state,
call_sample_with_replacement_kernel,
rng_state,
stream,
ptr,
weights_csum.data(),
sampledLen,
len);
}
template <typename DataT, typename WeightsT, typename IdxT = int>
void sampleWithoutReplacement(RngState& rng_state,
DataT* out,
IdxT* outIdx,
const DataT* in,
const WeightsT* wts,
IdxT sampledLen,
IdxT len,
cudaStream_t stream)
{
ASSERT(sampledLen <= len, "sampleWithoutReplacement: 'sampledLen' cant be more than 'len'.");
rmm::device_uvector<WeightsT> expWts(len, stream);
rmm::device_uvector<WeightsT> sortedWts(len, stream);
rmm::device_uvector<IdxT> inIdx(len, stream);
rmm::device_uvector<IdxT> outIdxBuff(len, stream);
auto* inIdxPtr = inIdx.data();
// generate modified weights
SamplingParams<WeightsT, IdxT> params;
params.inIdxPtr = inIdxPtr;
params.wts = wts;
RAFT_CALL_RNG_FUNC(rng_state, call_rng_kernel<1>, rng_state, stream, expWts.data(), len, params);
///@todo: use a more efficient partitioning scheme instead of full sort
// sort the array and pick the top sampledLen items
IdxT* outIdxPtr = outIdxBuff.data();
rmm::device_uvector<char> workspace(0, stream);
sortPairs(workspace, expWts.data(), sortedWts.data(), inIdxPtr, outIdxPtr, (int)len, stream);
if (outIdx != nullptr) {
RAFT_CUDA_TRY(cudaMemcpyAsync(
outIdx, outIdxPtr, sizeof(IdxT) * sampledLen, cudaMemcpyDeviceToDevice, stream));
}
scatter<DataT, IdxT>(out, in, outIdxPtr, sampledLen, stream);
}
template <typename IdxT>
void affine_transform_params(RngState const& rng_state, IdxT n, IdxT& a, IdxT& b)
{
// always keep 'a' to be coprime to 'n'
std::mt19937_64 mt_rng(rng_state.seed + rng_state.base_subsequence);
a = mt_rng() % n;
while (gcd(a, n) != 1) {
++a;
if (a >= n) a = 0;
}
// the bias term 'b' can be any number in the range of [0, n)
b = mt_rng() % n;
}
}; // end namespace detail
}; // end namespace random
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/detail/multi_variable_gaussian.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "curand_wrappers.hpp"
#include <cmath>
#include <memory>
#include <optional>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cusolver_dn_handle.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/detail/cusolver_wrappers.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/random/random_types.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <stdio.h>
#include <type_traits>
// mvg.cuh takes in matrices that are column major (as in fortran)
#define IDX2C(i, j, ld) (j * ld + i)
namespace raft::random {
namespace detail {
enum Filler : unsigned char {
LOWER, // = 0
UPPER // = 1
}; // used in memseting upper/lower matrix
/**
* @brief Reset values within the epsilon absolute range to zero
* @tparam T the data type
* @param eig the array
* @param epsilon the range
* @param size length of the array
* @param stream cuda stream
*/
template <typename T>
void epsilonToZero(T* eig, T epsilon, int size, cudaStream_t stream)
{
raft::linalg::unaryOp(
eig,
eig,
size,
[epsilon] __device__(T in) { return (in < epsilon && in > -epsilon) ? T(0.0) : in; },
stream);
}
/**
* @brief Broadcast addition of vector onto a matrix
* @tparam the data type
* @param out the output matrix
* @param in_m the input matrix
* @param in_v the input vector
* @param scalar scalar multiplier
* @param rows number of rows in the input matrix
* @param cols number of cols in the input matrix
* @param stream cuda stream
*/
template <typename T>
void matVecAdd(
T* out, const T* in_m, const T* in_v, T scalar, int rows, int cols, cudaStream_t stream)
{
raft::linalg::matrixVectorOp(
out,
in_m,
in_v,
cols,
rows,
true,
true,
[=] __device__(T mat, T vec) { return mat + scalar * vec; },
stream);
}
// helper kernels
template <typename T>
RAFT_KERNEL combined_dot_product(int rows, int cols, const T* W, T* matrix, int* check)
{
int m_i = threadIdx.x + blockDim.x * blockIdx.x;
int Wi = m_i / cols;
if (m_i < cols * rows) {
if (W[Wi] >= 0.0)
matrix[m_i] = pow(W[Wi], 0.5) * (matrix[m_i]);
else
check[0] = Wi; // reports Wi'th eigen values is negative.
}
}
template <typename T> // if uplo = 0, lower part of dim x dim matrix set to
// value
RAFT_KERNEL fill_uplo(int dim, Filler uplo, T value, T* A)
{
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i < dim && j < dim) {
// making off-diagonals == value
if (i < j) {
if (uplo == 1) A[IDX2C(i, j, dim)] = value;
} else if (i > j) {
if (uplo == 0) A[IDX2C(i, j, dim)] = value;
}
}
}
template <typename T>
class multi_variable_gaussian_impl {
public:
enum Decomposer : unsigned char { chol_decomp, jacobi, qr };
private:
// adjustable stuff
const int dim;
const int nPoints = 1;
const double tol = 1.e-7;
const T epsilon = 1.e-12;
const int max_sweeps = 100;
cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER;
const Decomposer method;
// not so much
T *P = 0, *X = 0, *x = 0, *workspace_decomp = 0, *eig = 0;
int *info, Lwork, info_h;
syevjInfo_t syevj_params = NULL;
curandGenerator_t gen;
raft::resources const& handle;
cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR;
bool deinitilized = false;
public: // functions
multi_variable_gaussian_impl() = delete;
multi_variable_gaussian_impl(raft::resources const& handle, const int dim, Decomposer method)
: handle(handle), dim(dim), method(method)
{
auto cusolverHandle = resource::get_cusolver_dn_handle(handle);
CURAND_CHECK(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(gen, 28)); // SEED
if (method == chol_decomp) {
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnpotrf_bufferSize(
cusolverHandle, uplo, dim, P, dim, &Lwork));
} else if (method == jacobi) { // jacobi init
RAFT_CUSOLVER_TRY(cusolverDnCreateSyevjInfo(&syevj_params));
RAFT_CUSOLVER_TRY(cusolverDnXsyevjSetTolerance(syevj_params, tol));
RAFT_CUSOLVER_TRY(cusolverDnXsyevjSetMaxSweeps(syevj_params, max_sweeps));
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnsyevj_bufferSize(
cusolverHandle, jobz, uplo, dim, P, dim, eig, &Lwork, syevj_params));
} else { // method == qr
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnsyevd_bufferSize(
cusolverHandle, jobz, uplo, dim, P, dim, eig, &Lwork));
}
}
std::size_t get_workspace_size()
{
// malloc workspace_decomp
std::size_t granularity = 256, offset = 0;
workspace_decomp = (T*)offset;
offset += raft::alignTo(sizeof(T) * Lwork, granularity);
eig = (T*)offset;
offset += raft::alignTo(sizeof(T) * dim, granularity);
info = (int*)offset;
offset += raft::alignTo(sizeof(int), granularity);
return offset;
}
void set_workspace(T* workarea)
{
workspace_decomp = (T*)((std::size_t)workspace_decomp + (std::size_t)workarea);
eig = (T*)((std::size_t)eig + (std::size_t)workarea);
info = (int*)((std::size_t)info + (std::size_t)workarea);
}
void give_gaussian(const int nPoints, T* P, T* X, const T* x = 0)
{
auto cusolverHandle = resource::get_cusolver_dn_handle(handle);
auto cublasHandle = resource::get_cublas_handle(handle);
auto cudaStream = resource::get_cuda_stream(handle);
if (method == chol_decomp) {
// lower part will contains chol_decomp
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnpotrf(
cusolverHandle, uplo, dim, P, dim, workspace_decomp, Lwork, info, cudaStream));
} else if (method == jacobi) {
RAFT_CUSOLVER_TRY(
raft::linalg::detail::cusolverDnsyevj(cusolverHandle,
jobz,
uplo,
dim,
P,
dim,
eig,
workspace_decomp,
Lwork,
info,
syevj_params,
cudaStream)); // vectors stored as cols. & col major
} else { // qr
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnsyevd(
cusolverHandle, jobz, uplo, dim, P, dim, eig, workspace_decomp, Lwork, info, cudaStream));
}
raft::update_host(&info_h, info, 1, cudaStream);
RAFT_CUDA_TRY(cudaStreamSynchronize(cudaStream));
ASSERT(info_h == 0, "mvg: error in syevj/syevd/potrf, info=%d | expected=0", info_h);
T mean = 0.0, stddv = 1.0;
// generate nxN gaussian nums in X
CURAND_CHECK(
detail::curandGenerateNormal(gen, X, (nPoints * dim) + (nPoints * dim) % 2, mean, stddv));
T alfa = 1.0, beta = 0.0;
if (method == chol_decomp) {
// upper part (0) being filled with 0.0
dim3 block(32, 32);
dim3 grid(raft::ceildiv(dim, (int)block.x), raft::ceildiv(dim, (int)block.y));
fill_uplo<T><<<grid, block, 0, cudaStream>>>(dim, UPPER, (T)0.0, P);
RAFT_CUDA_TRY(cudaPeekAtLastError());
// P is lower triangular chol decomp mtrx
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_N,
dim,
nPoints,
dim,
&alfa,
P,
dim,
X,
dim,
&beta,
X,
dim,
cudaStream));
} else {
epsilonToZero(eig, epsilon, dim, cudaStream);
dim3 block(64);
dim3 grid(raft::ceildiv(dim, (int)block.x));
RAFT_CUDA_TRY(cudaMemsetAsync(info, 0, sizeof(int), cudaStream));
grid.x = raft::ceildiv(dim * dim, (int)block.x);
combined_dot_product<T><<<grid, block, 0, cudaStream>>>(dim, dim, eig, P, info);
RAFT_CUDA_TRY(cudaPeekAtLastError());
// checking if any eigen vals were negative
raft::update_host(&info_h, info, 1, cudaStream);
RAFT_CUDA_TRY(cudaStreamSynchronize(cudaStream));
ASSERT(info_h == 0, "mvg: Cov matrix has %dth Eigenval negative", info_h);
// Got Q = eigvect*eigvals.sqrt in P, Q*X in X below
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_N,
dim,
nPoints,
dim,
&alfa,
P,
dim,
X,
dim,
&beta,
X,
dim,
cudaStream));
}
// working to make mean not 0
// since we are working with column-major, nPoints and dim are swapped
if (x != NULL) matVecAdd(X, X, x, T(1.0), nPoints, dim, cudaStream);
}
void deinit()
{
if (deinitilized) return;
CURAND_CHECK(curandDestroyGenerator(gen));
RAFT_CUSOLVER_TRY(cusolverDnDestroySyevjInfo(syevj_params));
deinitilized = true;
}
~multi_variable_gaussian_impl() { deinit(); }
}; // end of multi_variable_gaussian_impl
template <typename ValueType>
class multi_variable_gaussian_setup_token;
template <typename ValueType>
multi_variable_gaussian_setup_token<ValueType> build_multi_variable_gaussian_token_impl(
raft::resources const& handle,
rmm::mr::device_memory_resource& mem_resource,
const int dim,
const multi_variable_gaussian_decomposition_method method);
template <typename ValueType>
void compute_multi_variable_gaussian_impl(
multi_variable_gaussian_setup_token<ValueType>& token,
std::optional<raft::device_vector_view<const ValueType, int>> x,
raft::device_matrix_view<ValueType, int, raft::col_major> P,
raft::device_matrix_view<ValueType, int, raft::col_major> X);
template <typename ValueType>
class multi_variable_gaussian_setup_token {
template <typename T>
friend multi_variable_gaussian_setup_token<T> build_multi_variable_gaussian_token_impl(
raft::resources const& handle,
rmm::mr::device_memory_resource& mem_resource,
const int dim,
const multi_variable_gaussian_decomposition_method method);
template <typename T>
friend void compute_multi_variable_gaussian_impl(
multi_variable_gaussian_setup_token<T>& token,
std::optional<raft::device_vector_view<const T, int>> x,
raft::device_matrix_view<T, int, raft::col_major> P,
raft::device_matrix_view<T, int, raft::col_major> X);
private:
typename multi_variable_gaussian_impl<ValueType>::Decomposer new_enum_to_old_enum(
multi_variable_gaussian_decomposition_method method)
{
if (method == multi_variable_gaussian_decomposition_method::CHOLESKY) {
return multi_variable_gaussian_impl<ValueType>::chol_decomp;
} else if (method == multi_variable_gaussian_decomposition_method::JACOBI) {
return multi_variable_gaussian_impl<ValueType>::jacobi;
} else {
return multi_variable_gaussian_impl<ValueType>::qr;
}
}
// Constructor, only for use by friend functions.
// Hiding this will let us change the implementation in the future.
multi_variable_gaussian_setup_token(raft::resources const& handle,
rmm::mr::device_memory_resource& mem_resource,
const int dim,
const multi_variable_gaussian_decomposition_method method)
: impl_(std::make_unique<multi_variable_gaussian_impl<ValueType>>(
handle, dim, new_enum_to_old_enum(method))),
handle_(handle),
mem_resource_(mem_resource),
dim_(dim)
{
}
/**
* @brief Compute the multivariable Gaussian.
*
* @param[in] x vector of dim elements
* @param[inout] P On input, dim x dim matrix; overwritten on output
* @param[out] X dim x nPoints matrix
*/
void compute(std::optional<raft::device_vector_view<const ValueType, int>> x,
raft::device_matrix_view<ValueType, int, raft::col_major> P,
raft::device_matrix_view<ValueType, int, raft::col_major> X)
{
const int input_dim = P.extent(0);
RAFT_EXPECTS(input_dim == dim(),
"multi_variable_gaussian: "
"P.extent(0) = %d does not match the extent %d "
"with which the token was created",
input_dim,
dim());
RAFT_EXPECTS(P.extent(0) == P.extent(1),
"multi_variable_gaussian: "
"P must be square, but P.extent(0) = %d != P.extent(1) = %d",
P.extent(0),
P.extent(1));
RAFT_EXPECTS(P.extent(0) == X.extent(0),
"multi_variable_gaussian: "
"P.extent(0) = %d != X.extent(0) = %d",
P.extent(0),
X.extent(0));
const bool x_has_value = x.has_value();
const int x_extent_0 = x_has_value ? (*x).extent(0) : 0;
RAFT_EXPECTS(not x_has_value || P.extent(0) == x_extent_0,
"multi_variable_gaussian: "
"P.extent(0) = %d != x.extent(0) = %d",
P.extent(0),
x_extent_0);
const int nPoints = X.extent(1);
const ValueType* x_ptr = x_has_value ? (*x).data_handle() : nullptr;
auto workspace = allocate_workspace();
impl_->set_workspace(workspace.data());
impl_->give_gaussian(nPoints, P.data_handle(), X.data_handle(), x_ptr);
}
private:
std::unique_ptr<multi_variable_gaussian_impl<ValueType>> impl_;
raft::resources const& handle_;
rmm::mr::device_memory_resource& mem_resource_;
int dim_ = 0;
auto allocate_workspace() const
{
const auto num_elements = impl_->get_workspace_size();
return rmm::device_uvector<ValueType>{
num_elements, resource::get_cuda_stream(handle_), &mem_resource_};
}
int dim() const { return dim_; }
};
template <typename ValueType>
multi_variable_gaussian_setup_token<ValueType> build_multi_variable_gaussian_token_impl(
raft::resources const& handle,
rmm::mr::device_memory_resource& mem_resource,
const int dim,
const multi_variable_gaussian_decomposition_method method)
{
return multi_variable_gaussian_setup_token<ValueType>(handle, mem_resource, dim, method);
}
template <typename ValueType>
void compute_multi_variable_gaussian_impl(
multi_variable_gaussian_setup_token<ValueType>& token,
std::optional<raft::device_vector_view<const ValueType, int>> x,
raft::device_matrix_view<ValueType, int, raft::col_major> P,
raft::device_matrix_view<ValueType, int, raft::col_major> X)
{
token.compute(x, P, X);
}
template <typename ValueType>
void compute_multi_variable_gaussian_impl(
raft::resources const& handle,
rmm::mr::device_memory_resource& mem_resource,
std::optional<raft::device_vector_view<const ValueType, int>> x,
raft::device_matrix_view<ValueType, int, raft::col_major> P,
raft::device_matrix_view<ValueType, int, raft::col_major> X,
const multi_variable_gaussian_decomposition_method method)
{
auto token =
build_multi_variable_gaussian_token_impl<ValueType>(handle, mem_resource, P.extent(0), method);
compute_multi_variable_gaussian_impl(token, x, P, X);
}
template <typename T>
class multi_variable_gaussian : public detail::multi_variable_gaussian_impl<T> {
public:
// using Decomposer = typename detail::multi_variable_gaussian_impl<T>::Decomposer;
// using detail::multi_variable_gaussian_impl<T>::Decomposer::chol_decomp;
// using detail::multi_variable_gaussian_impl<T>::Decomposer::jacobi;
// using detail::multi_variable_gaussian_impl<T>::Decomposer::qr;
multi_variable_gaussian() = delete;
multi_variable_gaussian(raft::resources const& handle,
const int dim,
typename detail::multi_variable_gaussian_impl<T>::Decomposer method)
: detail::multi_variable_gaussian_impl<T>{handle, dim, method}
{
}
std::size_t get_workspace_size()
{
return detail::multi_variable_gaussian_impl<T>::get_workspace_size();
}
void set_workspace(T* workarea)
{
detail::multi_variable_gaussian_impl<T>::set_workspace(workarea);
}
void give_gaussian(const int nPoints, T* P, T* X, const T* x = 0)
{
detail::multi_variable_gaussian_impl<T>::give_gaussian(nPoints, P, X, x);
}
void deinit() { detail::multi_variable_gaussian_impl<T>::deinit(); }
~multi_variable_gaussian() { deinit(); }
}; // end of multi_variable_gaussian
}; // end of namespace detail
}; // end of namespace raft::random
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/detail/rng_device.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/random/rng_state.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/integer_utils.hpp>
#include <curand_kernel.h>
#include <rmm/device_uvector.hpp>
#include <random>
namespace raft {
namespace random {
namespace detail {
/**
* The device state used to communicate RNG state from host to device.
* As of now, it is just a templated version of `RngState`.
*/
template <typename GenType>
struct DeviceState {
using gen_t = GenType;
static constexpr auto GEN_TYPE = gen_t::GEN_TYPE;
explicit DeviceState(const RngState& rng_state)
: seed(rng_state.seed), base_subsequence(rng_state.base_subsequence)
{
}
uint64_t seed;
uint64_t base_subsequence;
};
template <typename OutType>
struct InvariantDistParams {
OutType const_val;
};
template <typename OutType>
struct UniformDistParams {
OutType start;
OutType end;
};
template <typename OutType, typename DiffType>
struct UniformIntDistParams {
OutType start;
OutType end;
DiffType diff;
};
template <typename OutType>
struct NormalDistParams {
OutType mu;
OutType sigma;
};
template <typename IntType>
struct NormalIntDistParams {
IntType mu;
IntType sigma;
};
template <typename OutType, typename LenType>
struct NormalTableDistParams {
LenType n_rows;
LenType n_cols;
const OutType* mu_vec;
OutType sigma;
const OutType* sigma_vec;
};
template <typename OutType>
struct BernoulliDistParams {
OutType prob;
};
template <typename OutType>
struct ScaledBernoulliDistParams {
OutType prob;
OutType scale;
};
template <typename OutType>
struct GumbelDistParams {
OutType mu;
OutType beta;
};
template <typename OutType>
struct LogNormalDistParams {
OutType mu;
OutType sigma;
};
template <typename OutType>
struct LogisticDistParams {
OutType mu;
OutType scale;
};
template <typename OutType>
struct ExponentialDistParams {
OutType lambda;
};
template <typename OutType>
struct RayleighDistParams {
OutType sigma;
};
template <typename OutType>
struct LaplaceDistParams {
OutType mu;
OutType scale;
};
// Not really a distro, useful for sample without replacement function
template <typename WeightsT, typename IdxT>
struct SamplingParams {
IdxT* inIdxPtr;
const WeightsT* wts;
};
template <typename Type>
HDI void box_muller_transform(Type& val1, Type& val2, Type sigma1, Type mu1, Type sigma2, Type mu2)
{
constexpr Type twoPi = Type(2.0) * Type(3.141592653589793);
constexpr Type minus2 = -Type(2.0);
Type R = raft::sqrt(minus2 * raft::log(val1));
Type theta = twoPi * val2;
Type s, c;
raft::sincos(theta, &s, &c);
val1 = R * c * sigma1 + mu1;
val2 = R * s * sigma2 + mu2;
}
template <typename Type>
HDI void box_muller_transform(Type& val1, Type& val2, Type sigma1, Type mu1)
{
box_muller_transform<Type>(val1, val2, sigma1, mu1, sigma1, mu1);
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
InvariantDistParams<OutType> params,
LenType idx = 0,
LenType stride = 0)
{
*val = params.const_val;
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
UniformDistParams<OutType> params,
LenType idx = 0,
LenType stride = 0)
{
OutType res;
gen.next(res);
*val = (res * (params.end - params.start)) + params.start;
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
UniformIntDistParams<OutType, uint32_t> params,
LenType idx = 0,
LenType stride = 0)
{
uint32_t x = 0;
uint32_t s = params.diff;
gen.next(x);
uint64_t m = uint64_t(x) * s;
uint32_t l = uint32_t(m);
if (l < s) {
uint32_t t = (-s) % s; // (2^32 - s) mod s
while (l < t) {
gen.next(x);
m = uint64_t(x) * s;
l = uint32_t(m);
}
}
*val = OutType(m >> 32) + params.start;
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
UniformIntDistParams<OutType, uint64_t> params,
LenType idx = 0,
LenType stride = 0)
{
using raft::wmul_64bit;
uint64_t x = 0;
gen.next(x);
uint64_t s = params.diff;
uint64_t m_lo, m_hi;
// m = x * s;
wmul_64bit(m_hi, m_lo, x, s);
if (m_lo < s) {
uint64_t t = (-s) % s; // (2^64 - s) mod s
while (m_lo < t) {
gen.next(x);
wmul_64bit(m_hi, m_lo, x, s);
}
}
*val = OutType(m_hi) + params.start;
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(
GenType& gen, OutType* val, NormalDistParams<OutType> params, LenType idx = 0, LenType stride = 0)
{
OutType res1, res2;
do {
gen.next(res1);
} while (res1 == OutType(0.0));
gen.next(res2);
box_muller_transform<OutType>(res1, res2, params.sigma, params.mu);
*val = res1;
*(val + 1) = res2;
}
template <typename GenType, typename IntType, typename LenType>
HDI void custom_next(GenType& gen,
IntType* val,
NormalIntDistParams<IntType> params,
LenType idx = 0,
LenType stride = 0)
{
double res1, res2;
do {
gen.next(res1);
} while (res1 == double(0.0));
gen.next(res2);
double mu = static_cast<double>(params.mu);
double sigma = static_cast<double>(params.sigma);
box_muller_transform<double>(res1, res2, sigma, mu);
*val = static_cast<IntType>(res1);
*(val + 1) = static_cast<IntType>(res2);
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
NormalTableDistParams<OutType, LenType> params,
LenType idx,
LenType stride)
{
OutType res1, res2;
do {
gen.next(res1);
} while (res1 == OutType(0.0));
gen.next(res2);
LenType col1 = idx % params.n_cols;
LenType col2 = (idx + stride) % params.n_cols;
OutType mean1 = params.mu_vec[col1];
OutType mean2 = params.mu_vec[col2];
OutType sig1 = params.sigma_vec == nullptr ? params.sigma : params.sigma_vec[col1];
OutType sig2 = params.sigma_vec == nullptr ? params.sigma : params.sigma_vec[col2];
box_muller_transform<OutType>(res1, res2, sig1, mean1, sig2, mean2);
*val = res1;
*(val + 1) = res2;
}
template <typename GenType, typename OutType, typename Type, typename LenType>
HDI void custom_next(
GenType& gen, OutType* val, BernoulliDistParams<Type> params, LenType idx = 0, LenType stride = 0)
{
Type res = 0;
gen.next(res);
*val = res < params.prob;
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
ScaledBernoulliDistParams<OutType> params,
LenType idx,
LenType stride)
{
OutType res = 0;
gen.next(res);
*val = res < params.prob ? -params.scale : params.scale;
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(
GenType& gen, OutType* val, GumbelDistParams<OutType> params, LenType idx = 0, LenType stride = 0)
{
OutType res = 0;
do {
gen.next(res);
} while (res == OutType(0.0));
*val = params.mu - params.beta * raft::log(-raft::log(res));
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
LogNormalDistParams<OutType> params,
LenType idx = 0,
LenType stride = 0)
{
OutType res1 = 0, res2 = 0;
do {
gen.next(res1);
} while (res1 == OutType(0.0));
gen.next(res2);
box_muller_transform<OutType>(res1, res2, params.sigma, params.mu);
*val = raft::exp(res1);
*(val + 1) = raft::exp(res2);
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
LogisticDistParams<OutType> params,
LenType idx = 0,
LenType stride = 0)
{
OutType res;
do {
gen.next(res);
} while (res == OutType(0.0));
constexpr OutType one = (OutType)1.0;
*val = params.mu - params.scale * raft::log(one / res - one);
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
ExponentialDistParams<OutType> params,
LenType idx = 0,
LenType stride = 0)
{
OutType res;
gen.next(res);
constexpr OutType one = (OutType)1.0;
*val = -raft::log(one - res) / params.lambda;
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
RayleighDistParams<OutType> params,
LenType idx = 0,
LenType stride = 0)
{
OutType res;
gen.next(res);
constexpr OutType one = (OutType)1.0;
constexpr OutType two = (OutType)2.0;
*val = raft::sqrt(-two * raft::log(one - res)) * params.sigma;
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(GenType& gen,
OutType* val,
LaplaceDistParams<OutType> params,
LenType idx = 0,
LenType stride = 0)
{
OutType res, out;
do {
gen.next(res);
} while (res == OutType(0.0));
constexpr OutType one = (OutType)1.0;
constexpr OutType two = (OutType)2.0;
constexpr OutType oneHalf = (OutType)0.5;
// The <= comparison here means, number of samples going in `if` branch are more by 1 than `else`
// branch. However it does not matter as for 0.5 both branches evaluate to same result.
if (res <= oneHalf) {
out = params.mu + params.scale * raft::log(two * res);
} else {
out = params.mu - params.scale * raft::log(two * (one - res));
}
*val = out;
}
template <typename GenType, typename OutType, typename LenType>
HDI void custom_next(
GenType& gen, OutType* val, SamplingParams<OutType, LenType> params, LenType idx, LenType stride)
{
OutType res;
gen.next(res);
params.inIdxPtr[idx] = idx;
constexpr OutType one = (OutType)1.0;
auto exp = -raft::log(one - res);
if (params.wts != nullptr) {
*val = exp / params.wts[idx];
} else {
*val = exp;
}
}
/** Philox-based random number generator */
// Courtesy: Jakub Szuppe
struct PhiloxGenerator {
static constexpr auto GEN_TYPE = GeneratorType::GenPhilox;
/**
* @brief ctor. Initializes the state for RNG
* @param seed random seed (can be same across all threads)
* @param subsequence as found in curand docs
* @param offset as found in curand docs
*/
DI PhiloxGenerator(uint64_t seed, uint64_t subsequence, uint64_t offset)
{
curand_init(seed, subsequence, offset, &philox_state);
}
DI PhiloxGenerator(const DeviceState<PhiloxGenerator>& rng_state, const uint64_t subsequence)
{
curand_init(rng_state.seed, rng_state.base_subsequence + subsequence, 0, &philox_state);
}
/**
* @defgroup NextRand Generate the next random number
* @{
*/
DI uint32_t next_u32()
{
uint32_t ret = curand(&(this->philox_state));
return ret;
}
DI uint64_t next_u64()
{
uint64_t ret;
uint32_t a, b;
a = next_u32();
b = next_u32();
ret = uint64_t(a) | (uint64_t(b) << 32);
return ret;
}
DI int32_t next_i32()
{
int32_t ret;
uint32_t val;
val = next_u32();
ret = int32_t(val & 0x7fffffff);
return ret;
}
DI int64_t next_i64()
{
int64_t ret;
uint64_t val;
val = next_u64();
ret = int64_t(val & 0x7fffffffffffffff);
return ret;
}
DI float next_float()
{
float ret;
uint32_t val = next_u32() >> 8;
ret = static_cast<float>(val) / float(uint32_t(1) << 24);
return ret;
}
DI double next_double()
{
double ret;
uint64_t val = next_u64() >> 11;
ret = static_cast<double>(val) / double(uint64_t(1) << 53);
return ret;
}
DI void next(float& ret)
{
// ret = curand_uniform(&(this->philox_state));
ret = next_float();
}
DI void next(double& ret)
{
// ret = curand_uniform_double(&(this->philox_state));
ret = next_double();
}
DI void next(uint32_t& ret) { ret = next_u32(); }
DI void next(uint64_t& ret) { ret = next_u64(); }
DI void next(int32_t& ret) { ret = next_i32(); }
DI void next(int64_t& ret) { ret = next_i64(); }
/** @} */
private:
/** the state for RNG */
curandStatePhilox4_32_10_t philox_state;
};
/** PCG random number generator */
struct PCGenerator {
static constexpr auto GEN_TYPE = GeneratorType::GenPC;
/**
* @brief ctor. Initializes the PCG
* @param rng_state is the generator state used for initializing the generator
* @param subsequence specifies the subsequence to be generated out of 2^64 possible subsequences
* In a parallel setting, like threads of a CUDA kernel, each thread is required to generate a
* unique set of random numbers. This can be achieved by initializing the generator with same
* rng_state for all the threads and distinct values for subsequence.
*/
HDI PCGenerator(const DeviceState<PCGenerator>& rng_state, const uint64_t subsequence)
{
_init_pcg(rng_state.seed, rng_state.base_subsequence + subsequence, subsequence);
}
/**
* @brief ctor. This is lower level constructor for PCG
* This code is derived from PCG basic code
* @param seed A 64-bit seed for the generator
* @param subsequence The id of subsequence that should be generated [0, 2^64-1]
* @param offset Initial `offset` number of items are skipped from the subsequence
*/
HDI PCGenerator(uint64_t seed, uint64_t subsequence, uint64_t offset)
{
_init_pcg(seed, subsequence, offset);
}
// Based on "Random Number Generation with Arbitrary Strides" F. B. Brown
// Link https://mcnp.lanl.gov/pdf_files/anl-rn-arb-stride.pdf
HDI void skipahead(uint64_t offset)
{
uint64_t G = 1;
uint64_t h = 6364136223846793005ULL;
uint64_t C = 0;
uint64_t f = inc;
while (offset) {
if (offset & 1) {
G = G * h;
C = C * h + f;
}
f = f * (h + 1);
h = h * h;
offset >>= 1;
}
pcg_state = pcg_state * G + C;
}
/**
* @defgroup NextRand Generate the next random number
* @brief This code is derived from PCG basic code
* @{
*/
HDI uint32_t next_u32()
{
uint32_t ret;
uint64_t oldstate = pcg_state;
pcg_state = oldstate * 6364136223846793005ULL + inc;
uint32_t xorshifted = ((oldstate >> 18u) ^ oldstate) >> 27u;
uint32_t rot = oldstate >> 59u;
ret = (xorshifted >> rot) | (xorshifted << ((-rot) & 31));
return ret;
}
HDI uint64_t next_u64()
{
uint64_t ret;
uint32_t a, b;
a = next_u32();
b = next_u32();
ret = uint64_t(a) | (uint64_t(b) << 32);
return ret;
}
HDI int32_t next_i32()
{
int32_t ret;
uint32_t val;
val = next_u32();
ret = int32_t(val & 0x7fffffff);
return ret;
}
HDI int64_t next_i64()
{
int64_t ret;
uint64_t val;
val = next_u64();
ret = int64_t(val & 0x7fffffffffffffff);
return ret;
}
HDI float next_float()
{
float ret;
uint32_t val = next_u32() >> 8;
ret = static_cast<float>(val) / (1U << 24);
return ret;
}
HDI double next_double()
{
double ret;
uint64_t val = next_u64() >> 11;
ret = static_cast<double>(val) / (1LU << 53);
return ret;
}
HDI void next(uint32_t& ret) { ret = next_u32(); }
HDI void next(uint64_t& ret) { ret = next_u64(); }
HDI void next(int32_t& ret) { ret = next_i32(); }
HDI void next(int64_t& ret) { ret = next_i64(); }
HDI void next(float& ret) { ret = next_float(); }
HDI void next(double& ret) { ret = next_double(); }
/** @} */
private:
HDI void _init_pcg(uint64_t seed, uint64_t subsequence, uint64_t offset)
{
pcg_state = uint64_t(0);
inc = (subsequence << 1u) | 1u;
uint32_t discard;
next(discard);
pcg_state += seed;
next(discard);
skipahead(offset);
}
uint64_t pcg_state;
uint64_t inc;
};
template <int ITEMS_PER_CALL,
typename OutType,
typename LenType,
typename GenType,
typename ParamType>
RAFT_KERNEL rngKernel(DeviceState<GenType> rng_state, OutType* ptr, LenType len, ParamType params)
{
LenType tid = threadIdx.x + static_cast<LenType>(blockIdx.x) * blockDim.x;
GenType gen(rng_state, (uint64_t)tid);
const LenType stride = gridDim.x * blockDim.x;
for (LenType idx = tid; idx < len; idx += stride * ITEMS_PER_CALL) {
OutType val[ITEMS_PER_CALL];
custom_next(gen, val, params, idx, stride);
#pragma unroll
for (int i = 0; i < ITEMS_PER_CALL; i++) {
if ((idx + i * stride) < len) ptr[idx + i * stride] = val[i];
}
}
return;
}
template <typename GenType, typename OutType, typename WeightType, typename IdxType>
RAFT_KERNEL sample_with_replacement_kernel(DeviceState<GenType> rng_state,
OutType* out,
const WeightType* weights_csum,
IdxType sampledLen,
IdxType len)
{
// todo(lsugy): warp-collaborative binary search
IdxType tid = threadIdx.x + static_cast<IdxType>(blockIdx.x) * blockDim.x;
GenType gen(rng_state, static_cast<uint64_t>(tid));
if (tid < sampledLen) {
WeightType val_01;
gen.next(val_01);
WeightType val_search = val_01 * weights_csum[len - 1];
// Binary search of the first index for which the cumulative sum of weights is larger than the
// generated value
IdxType idx_start = 0;
IdxType idx_end = len;
while (idx_end > idx_start) {
IdxType idx_middle = (idx_start + idx_end) / 2;
WeightType val_middle = weights_csum[idx_middle];
if (val_search <= val_middle) {
idx_end = idx_middle;
} else {
idx_start = idx_middle + 1;
}
}
out[tid] = static_cast<OutType>(min(idx_start, len - 1));
}
}
/**
* This kernel is deprecated and should be removed in a future release
*/
template <typename OutType,
typename LenType,
typename GenType,
int ITEMS_PER_CALL,
typename ParamType>
RAFT_KERNEL fillKernel(
uint64_t seed, uint64_t adv_subs, uint64_t offset, OutType* ptr, LenType len, ParamType params)
{
LenType tid = threadIdx.x + static_cast<LenType>(blockIdx.x) * blockDim.x;
GenType gen(seed, adv_subs + (uint64_t)tid, offset);
const LenType stride = gridDim.x * blockDim.x;
for (LenType idx = tid; idx < len; idx += stride * ITEMS_PER_CALL) {
OutType val[ITEMS_PER_CALL];
custom_next(gen, val, params, idx, stride);
#pragma unroll
for (int i = 0; i < ITEMS_PER_CALL; i++) {
if ((idx + i * stride) < len) ptr[idx + i * stride] = val[i];
}
}
return;
}
#undef POTENTIAL_DEPR
}; // end namespace detail
}; // end namespace random
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/detail/permute.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cooperative_groups.h>
#include <memory>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/vectorized.cuh>
namespace raft::random {
namespace detail {
template <typename Type, typename IntType, typename IdxType, int TPB, bool rowMajor>
RAFT_KERNEL permuteKernel(
IntType* perms, Type* out, const Type* in, IdxType a, IdxType b, IdxType N, IdxType D)
{
namespace cg = cooperative_groups;
const int WARP_SIZE = 32;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// having shuffled input indices and coalesced output indices appears
// to be preferable to the reverse, especially for column major
IntType inIdx = ((a * int64_t(tid)) + b) % N;
IntType outIdx = tid;
if (perms != nullptr && tid < N) { perms[outIdx] = inIdx; }
if (out == nullptr || in == nullptr) { return; }
if (rowMajor) {
cg::thread_block_tile<WARP_SIZE> warp = cg::tiled_partition<WARP_SIZE>(cg::this_thread_block());
__shared__ IntType inIdxShm[TPB];
__shared__ IntType outIdxShm[TPB];
inIdxShm[threadIdx.x] = inIdx;
outIdxShm[threadIdx.x] = outIdx;
warp.sync();
int warpID = threadIdx.x / WARP_SIZE;
int laneID = threadIdx.x % WARP_SIZE;
for (int i = warpID * WARP_SIZE; i < warpID * WARP_SIZE + WARP_SIZE; ++i) {
if (outIdxShm[i] < N) {
#pragma unroll
for (int j = laneID; j < D; j += WARP_SIZE) {
out[outIdxShm[i] * D + j] = in[inIdxShm[i] * D + j];
}
}
}
} else {
#pragma unroll
for (int j = 0; j < D; ++j) {
if (tid < N) { out[outIdx + j * N] = in[inIdx + j * N]; }
}
}
}
// This is wrapped in a type to allow for partial template specialization
template <typename Type, typename IntType, typename IdxType, int TPB, bool rowMajor, int VLen>
struct permute_impl_t {
static void permuteImpl(IntType* perms,
Type* out,
const Type* in,
IdxType N,
IdxType D,
int nblks,
IdxType a,
IdxType b,
cudaStream_t stream)
{
// determine vector type and set new pointers
typedef typename raft::IOType<Type, VLen>::Type VType;
VType* vout = reinterpret_cast<VType*>(out);
const VType* vin = reinterpret_cast<const VType*>(in);
// check if we can execute at this vector length
if (D % VLen == 0 && raft::is_aligned(vout, sizeof(VType)) &&
raft::is_aligned(vin, sizeof(VType))) {
permuteKernel<VType, IntType, IdxType, TPB, rowMajor>
<<<nblks, TPB, 0, stream>>>(perms, vout, vin, a, b, N, D / VLen);
RAFT_CUDA_TRY(cudaPeekAtLastError());
} else { // otherwise try the next lower vector length
permute_impl_t<Type, IntType, IdxType, TPB, rowMajor, VLen / 2>::permuteImpl(
perms, out, in, N, D, nblks, a, b, stream);
}
}
};
// at vector length 1 we just execute a scalar version to break the recursion
template <typename Type, typename IntType, typename IdxType, int TPB, bool rowMajor>
struct permute_impl_t<Type, IntType, IdxType, TPB, rowMajor, 1> {
static void permuteImpl(IntType* perms,
Type* out,
const Type* in,
IdxType N,
IdxType D,
int nblks,
IdxType a,
IdxType b,
cudaStream_t stream)
{
permuteKernel<Type, IntType, IdxType, TPB, rowMajor>
<<<nblks, TPB, 0, stream>>>(perms, out, in, a, b, N, D);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
};
template <typename Type, typename IntType = int, typename IdxType = int, int TPB = 256>
void permute(IntType* perms,
Type* out,
const Type* in,
IntType D,
IntType N,
bool rowMajor,
cudaStream_t stream)
{
auto nblks = raft::ceildiv(N, (IntType)TPB);
// always keep 'a' to be coprime to N
IdxType a = rand() % N;
while (raft::gcd(a, N) != 1)
a = (a + 1) % N;
IdxType b = rand() % N;
if (rowMajor) {
permute_impl_t<Type,
IntType,
IdxType,
TPB,
true,
(16 / sizeof(Type) > 0) ? 16 / sizeof(Type) : 1>::permuteImpl(perms,
out,
in,
N,
D,
nblks,
a,
b,
stream);
} else {
permute_impl_t<Type, IntType, IdxType, TPB, false, 1>::permuteImpl(
perms, out, in, N, D, nblks, a, b, stream);
}
}
}; // end namespace detail
}; // end namespace raft::random | 0 |
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/detail/make_blobs.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "permute.cuh"
#include <raft/core/handle.hpp>
#include <raft/linalg/map.cuh>
#include <raft/random/rng.cuh>
#include <raft/random/rng_device.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <vector>
namespace raft {
namespace random {
namespace detail {
// generate the labels first and shuffle them instead of shuffling the dataset
template <typename IdxT>
void generate_labels(IdxT* labels,
IdxT n_rows,
IdxT n_clusters,
bool shuffle,
raft::random::RngState& r,
cudaStream_t stream)
{
raft::handle_t handle(stream);
IdxT a, b;
raft::random::affine_transform_params(r, n_clusters, a, b);
auto op = [=] __device__(IdxT idx) {
if (shuffle) { idx = static_cast<IdxT>((a * int64_t(idx)) + b); }
idx %= n_clusters;
return idx;
};
auto labels_view = raft::make_device_vector_view<IdxT, IdxT>(labels, n_rows);
linalg::map_offset(handle, labels_view, op);
}
template <typename DataT, typename IdxT>
DI void get_mu_sigma(DataT& mu,
DataT& sigma,
IdxT idx,
const IdxT* labels,
bool row_major,
const DataT* centers,
const DataT* cluster_std,
DataT cluster_std_scalar,
IdxT n_rows,
IdxT n_cols,
IdxT n_clusters)
{
IdxT cid, fid;
if (row_major) {
cid = idx / n_cols;
fid = idx % n_cols;
} else {
cid = idx % n_rows;
fid = idx / n_rows;
}
IdxT center_id;
if (cid < n_rows) {
center_id = labels[cid];
} else {
center_id = 0;
}
if (fid >= n_cols) { fid = 0; }
if (row_major) {
center_id = center_id * n_cols + fid;
} else {
center_id += fid * n_clusters;
}
sigma = cluster_std == nullptr ? cluster_std_scalar : cluster_std[cid];
mu = centers[center_id];
}
template <typename DataT, typename IdxT, typename GenType>
RAFT_KERNEL generate_data_kernel(raft::random::DeviceState<GenType> rng_state,
DataT* out,
const IdxT* labels,
IdxT n_rows,
IdxT n_cols,
IdxT n_clusters,
bool row_major,
const DataT* centers,
const DataT* cluster_std,
const DataT cluster_std_scalar)
{
uint64_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;
GenType gen(rng_state, tid);
const IdxT stride = gridDim.x * blockDim.x;
IdxT len = n_rows * n_cols;
for (IdxT idx = tid; idx < len; idx += stride) {
DataT val1, val2;
do {
gen.next(val1);
} while (val1 == DataT(0.0));
gen.next(val2);
DataT mu1, sigma1, mu2, sigma2;
get_mu_sigma(mu1,
sigma1,
idx,
labels,
row_major,
centers,
cluster_std,
cluster_std_scalar,
n_rows,
n_cols,
n_clusters);
get_mu_sigma(mu2,
sigma2,
idx + stride,
labels,
row_major,
centers,
cluster_std,
cluster_std_scalar,
n_rows,
n_cols,
n_clusters);
raft::random::box_muller_transform<DataT>(val1, val2, sigma1, mu1, sigma2, mu2);
if (idx < len) out[idx] = val1;
idx += stride;
if (idx < len) out[idx] = val2;
}
}
template <typename DataT, typename IdxT>
void generate_data(DataT* out,
const IdxT* labels,
IdxT n_rows,
IdxT n_cols,
IdxT n_clusters,
cudaStream_t stream,
bool row_major,
const DataT* centers,
const DataT* cluster_std,
const DataT cluster_std_scalar,
raft::random::RngState& rng_state)
{
constexpr IdxT block_size = 128;
int64_t items = static_cast<int64_t>(n_rows) * n_cols;
// Choose a grid size so that each thread can write two output values.
int64_t nBlocks = ceildiv<int64_t>(items, 2 * block_size);
// parentheses needed here for kernel, otherwise macro interprets the arguments
// of triple chevron notation as macro arguments
RAFT_CALL_RNG_FUNC(rng_state,
(generate_data_kernel<<<nBlocks, 128, 0, stream>>>),
out,
labels,
n_rows,
n_cols,
n_clusters,
row_major,
centers,
cluster_std,
cluster_std_scalar);
}
/**
* @brief GPU-equivalent of sklearn.datasets.make_blobs
*
* @tparam DataT output data type
* @tparam IdxT indexing arithmetic type
*
* @param[out] out generated data [on device]
* [dim = n_rows x n_cols]
* @param[out] labels labels for the generated data [on device]
* [len = n_rows]
* @param[in] n_rows number of rows in the generated data
* @param[in] n_cols number of columns in the generated data
* @param[in] n_clusters number of clusters (or classes) to generate
* @param[in] stream cuda stream to schedule the work on
* @param[in] row_major whether input `centers` and output `out`
* buffers are to be stored in row or column
* major layout
* @param[in] centers centers of each of the cluster, pass a nullptr
* if you need this also to be generated randomly
* [on device] [dim = n_clusters x n_cols]
* @param[in] cluster_std standard deviation of each cluster center,
* pass a nullptr if this is to be read from the
* `cluster_std_scalar`. [on device]
* [len = n_clusters]
* @param[in] cluster_std_scalar if 'cluster_std' is nullptr, then use this as
* the std-dev across all dimensions.
* @param[in] shuffle shuffle the generated dataset and labels
* @param[in] center_box_min min value of box from which to pick cluster
* centers. Useful only if 'centers' is nullptr
* @param[in] center_box_max max value of box from which to pick cluster
* centers. Useful only if 'centers' is nullptr
* @param[in] seed seed for the RNG
* @param[in] type RNG type
*/
template <typename DataT, typename IdxT>
void make_blobs_caller(DataT* out,
IdxT* labels,
IdxT n_rows,
IdxT n_cols,
IdxT n_clusters,
cudaStream_t stream,
bool row_major,
const DataT* centers,
const DataT* cluster_std,
const DataT cluster_std_scalar,
bool shuffle,
DataT center_box_min,
DataT center_box_max,
uint64_t seed,
raft::random::GeneratorType type)
{
raft::random::RngState r(seed, type);
// use the right centers buffer for data generation
rmm::device_uvector<DataT> rand_centers(0, stream);
const DataT* _centers;
if (centers == nullptr) {
rand_centers.resize(n_clusters * n_cols, stream);
detail::uniform(
r, rand_centers.data(), n_clusters * n_cols, center_box_min, center_box_max, stream);
_centers = rand_centers.data();
} else {
_centers = centers;
}
generate_labels(labels, n_rows, n_clusters, shuffle, r, stream);
generate_data(out,
labels,
n_rows,
n_cols,
n_clusters,
stream,
row_major,
_centers,
cluster_std,
cluster_std_scalar,
r);
}
} // end namespace detail
} // end namespace random
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/device/sample.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdint.h>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/operators.hpp>
#include <raft/util/cuda_dev_essentials.cuh>
#include <raft/util/warp_primitives.cuh>
namespace raft::random::device {
/**
* @brief warp-level random sampling of an index.
* It selects an index with the given discrete probability
* distribution(represented by weights of each index)
* @param rng random number generator, must have next_u32() function
* @param weight weight of the rank/index.
* @param idx index to be used as rank
* @return only the thread0 will contain valid reduced result
*/
template <typename T, typename rng_t, typename i_t = int>
DI T warp_random_sample(rng_t& rng, T& weight, i_t& idx)
{
// Todo(#1491): benchmark whether a scan and then selecting within the ranges is more efficient.
static_assert(std::is_integral<T>::value, "The type T must be an integral type.");
#pragma unroll
for (i_t offset = raft::WarpSize / 2; offset > 0; offset /= 2) {
T tmp_weight = shfl(weight, laneId() + offset);
i_t tmp_idx = shfl(idx, laneId() + offset);
T sum = (tmp_weight + weight);
weight = sum;
if (sum != 0) {
i_t rnd_number = (rng.next_u32() % sum);
if (rnd_number < tmp_weight) { idx = tmp_idx; }
}
}
}
/**
* @brief 1-D block-level random sampling of an index.
* It selects an index with the given discrete probability
* distribution(represented by weights of each index)
*
* Let w_i be the weight stored on thread i. We calculate the cumulative distribution function
* F_i = sum_{k=0..i} weight_i.
* Sequentially, we could select one of the elements with with the desired probability using the
* following method. We can consider that each element has a subinterval assigned: [F_{i-1}, F_i).
* We generate a uniform random number in the [0, F_i) range, and check which subinterval it falls.
* We return idx corresponding to the selected subinterval.
* In parallel, we do a tree reduction and make a selection at every step when we combine two
* values.
* @param rng random number generator, must have next_u32() function
* @param shbuf shared memory region needed for storing intermediate results. It
* must alteast be of size: `(sizeof(T) + sizeof(i_t)) * WarpSize`
* @param weight weight of the rank/index.
* @param idx index to be used as rank
* @return only the thread0 will contain valid reduced result
*/
template <typename T, typename rng_t, typename i_t = int>
DI i_t block_random_sample(rng_t rng, T* shbuf, T weight = 1, i_t idx = threadIdx.x)
{
T* values = shbuf;
i_t* indices = (i_t*)&shbuf[WarpSize];
i_t wid = threadIdx.x / WarpSize;
i_t nWarps = (blockDim.x + WarpSize - 1) / WarpSize;
warp_random_sample(rng, weight, idx); // Each warp performs partial reduction
i_t lane = laneId();
if (lane == 0) {
values[wid] = weight; // Write reduced value to shared memory
indices[wid] = idx; // Write reduced value to shared memory
}
__syncthreads(); // Wait for all partial reductions
// read from shared memory only if that warp existed
if (lane < nWarps) {
weight = values[lane];
idx = indices[lane];
} else {
weight = 0;
idx = -1;
}
__syncthreads();
if (wid == 0) warp_random_sample(rng, weight, idx);
return idx;
}
} // namespace raft::random::device | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/distance.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "distance-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "distance-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/distance_types.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft {
namespace distance {
/** enum to tell how to compute distance */
enum DistanceType : unsigned short {
/** evaluate as dist_ij = sum(x_ik^2) + sum(y_ij)^2 - 2*sum(x_ik * y_jk) */
L2Expanded = 0,
/** same as above, but inside the epilogue, perform square root operation */
L2SqrtExpanded = 1,
/** cosine distance */
CosineExpanded = 2,
/** L1 distance */
L1 = 3,
/** evaluate as dist_ij += (x_ik - y-jk)^2 */
L2Unexpanded = 4,
/** same as above, but inside the epilogue, perform square root operation */
L2SqrtUnexpanded = 5,
/** basic inner product **/
InnerProduct = 6,
/** Chebyshev (Linf) distance **/
Linf = 7,
/** Canberra distance **/
Canberra = 8,
/** Generalized Minkowski distance **/
LpUnexpanded = 9,
/** Correlation distance **/
CorrelationExpanded = 10,
/** Jaccard distance **/
JaccardExpanded = 11,
/** Hellinger distance **/
HellingerExpanded = 12,
/** Haversine distance **/
Haversine = 13,
/** Bray-Curtis distance **/
BrayCurtis = 14,
/** Jensen-Shannon distance**/
JensenShannon = 15,
/** Hamming distance **/
HammingUnexpanded = 16,
/** KLDivergence **/
KLDivergence = 17,
/** RusselRao **/
RusselRaoExpanded = 18,
/** Dice-Sorensen distance **/
DiceExpanded = 19,
/** Precomputed (special value) **/
Precomputed = 100
};
/**
* Whether minimal distance corresponds to similar elements (using the given metric).
*/
inline bool is_min_close(DistanceType metric)
{
bool select_min;
switch (metric) {
case DistanceType::InnerProduct:
// Similarity metrics have the opposite meaning, i.e. nearest neighbors are those with larger
// similarity (See the same logic at cpp/include/raft/sparse/spatial/detail/knn.cuh:362
// {perform_k_selection})
select_min = false;
break;
default: select_min = true;
}
return select_min;
}
namespace kernels {
enum KernelType { LINEAR, POLYNOMIAL, RBF, TANH };
/**
* Parameters for kernel matrices.
* The following kernels are implemented:
* - LINEAR \f[ K(x_1,x_2) = <x_1,x_2>, \f] where \f$< , >\f$ is the dot product
* - POLYNOMIAL \f[ K(x_1, x_2) = (\gamma <x_1,x_2> + \mathrm{coef0})^\mathrm{degree} \f]
* - RBF \f[ K(x_1, x_2) = \exp(- \gamma |x_1-x_2|^2) \f]
* - TANH \f[ K(x_1, x_2) = \tanh(\gamma <x_1,x_2> + \mathrm{coef0}) \f]
*/
struct KernelParams {
// Kernel function parameters
KernelType kernel; //!< Type of the kernel function
int degree; //!< Degree of polynomial kernel (ignored by others)
double gamma; //!< multiplier in the
double coef0; //!< additive constant in poly and tanh kernels
};
} // end namespace kernels
}; // namespace distance
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/distance-inl.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/detail/distance.cuh>
#include <raft/distance/distance_types.hpp>
#include <rmm/device_uvector.hpp>
#include <type_traits>
#include <raft/core/device_mdspan.hpp>
namespace raft {
namespace distance {
/**
* @defgroup pairwise_distance pointer-based pairwise distance prims
* @{
*/
/**
* @brief Evaluate pairwise distances with the user epilogue lamba allowed
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam FinalLambda user-defined epilogue lamba
* @tparam IdxT Index type
* @param handle raft handle for managing expensive resources
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param workspace temporary workspace needed for computations
* @param worksize number of bytes of the workspace
* @param fin_op the final gemm epilogue lambda
* @param isRowMajor whether the matrices are row-major or col-major
* @param metric_arg metric argument (used for Minkowski distance)
*
* @note fin_op: This is a device lambda which is supposed to operate upon the
* input which is AccT and returns the output in OutT. It's signature is
* as follows: <pre>OutT fin_op(AccT in, int g_idx);</pre>. If one needs
* any other parameters, feel free to pass them via closure.
*/
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename FinalLambda,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
size_t worksize,
FinalLambda fin_op,
bool isRowMajor = true,
DataT metric_arg = 2.0f)
{
detail::distance<DistT, DataT, AccT, OutT, FinalLambda, IdxT>(
handle, x, y, dist, m, n, k, workspace, worksize, fin_op, isRowMajor, metric_arg);
}
/**
* @brief Evaluate pairwise distances for the simple use case
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam IdxT Index type
* @param handle raft handle for managing expensive resources
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param workspace temporary workspace needed for computations
* @param worksize number of bytes of the workspace
* @param isRowMajor whether the matrices are row-major or col-major
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
size_t worksize,
bool isRowMajor = true,
DataT metric_arg = 2.0f)
{
detail::distance<DistT, DataT, AccT, OutT, IdxT>(
handle, x, y, dist, m, n, k, workspace, worksize, isRowMajor, metric_arg);
}
/**
* @brief Return the exact workspace size to compute the distance
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam IdxT Index type
* @param x first set of points
* @param y second set of points
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
*
* @note If the specified DistT doesn't need the workspace at all, it
* returns 0.
*/
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
size_t getWorkspaceSize(const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k)
{
return detail::getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>(x, y, m, n, k);
}
/**
* @brief Return the exact workspace size to compute the distance
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam IdxT Index type
* @param x first set of points (size m*k)
* @param y second set of points (size n*k)
* @return number of bytes needed in workspace
*
* @note If the specified DistT doesn't need the workspace at all, it
* returns 0.
*/
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int,
typename layout>
size_t getWorkspaceSize(raft::device_matrix_view<DataT, IdxT, layout> const& x,
raft::device_matrix_view<DataT, IdxT, layout> const& y)
{
RAFT_EXPECTS(x.extent(1) == y.extent(1), "Number of columns must be equal.");
return getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>(
x.data_handle(), y.data_handle(), x.extent(0), y.extent(0), x.extent(1));
}
/**
* @brief Evaluate pairwise distances for the simple use case
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam IdxT Index type
* @param handle raft handle for managing expensive resources
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param isRowMajor whether the matrices are row-major or col-major
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
bool isRowMajor = true,
DataT metric_arg = 2.0f)
{
auto stream = raft::resource::get_cuda_stream(handle);
rmm::device_uvector<char> workspace(0, stream);
auto worksize = getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>(x, y, m, n, k);
workspace.resize(worksize, stream);
detail::distance<DistT, DataT, AccT, OutT, IdxT>(
handle, x, y, dist, m, n, k, workspace.data(), worksize, isRowMajor, metric_arg);
}
/**
* @brief Convenience wrapper around 'distance' prim to convert runtime metric
* into compile time for the purpose of dispatch
* @tparam Type input/accumulation/output data-type
* @tparam IdxT indexing type
* @param handle raft handle for managing expensive resources
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param workspace temporary workspace buffer which can get resized as per the
* needed workspace size
* @param metric distance metric
* @param isRowMajor whether the matrices are row-major or col-major
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <typename Type, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
const Type* x,
const Type* y,
Type* dist,
IdxT m,
IdxT n,
IdxT k,
rmm::device_uvector<char>& workspace,
raft::distance::DistanceType metric,
bool isRowMajor = true,
Type metric_arg = 2.0f)
{
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
auto dispatch = [&](auto distance_type) {
auto worksize = getWorkspaceSize<distance_type(), Type, Type, Type, IdxT>(x, y, m, n, k);
workspace.resize(worksize, stream);
detail::distance<distance_type(), Type, Type, Type, IdxT>(
handle, x, y, dist, m, n, k, workspace.data(), worksize, isRowMajor, metric_arg);
};
switch (metric) {
case DistanceType::Canberra:
dispatch(std::integral_constant<DistanceType, DistanceType::Canberra>{});
break;
case DistanceType::CorrelationExpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::CorrelationExpanded>{});
break;
case DistanceType::CosineExpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::CosineExpanded>{});
break;
case DistanceType::HammingUnexpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::HammingUnexpanded>{});
break;
case DistanceType::HellingerExpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::HellingerExpanded>{});
break;
case raft::distance::DistanceType::InnerProduct:
dispatch(std::integral_constant<DistanceType, DistanceType::InnerProduct>{});
break;
case DistanceType::JensenShannon:
dispatch(std::integral_constant<DistanceType, DistanceType::JensenShannon>{});
break;
case DistanceType::KLDivergence:
dispatch(std::integral_constant<DistanceType, DistanceType::KLDivergence>{});
break;
case DistanceType::L1:
dispatch(std::integral_constant<DistanceType, DistanceType::L1>{});
break;
case DistanceType::L2Expanded:
dispatch(std::integral_constant<DistanceType, DistanceType::L2Expanded>{});
break;
case DistanceType::L2SqrtExpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::L2SqrtExpanded>{});
break;
case DistanceType::L2SqrtUnexpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::L2SqrtUnexpanded>{});
break;
case DistanceType::L2Unexpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::L2Unexpanded>{});
break;
case DistanceType::Linf:
dispatch(std::integral_constant<DistanceType, DistanceType::Linf>{});
break;
case DistanceType::LpUnexpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::LpUnexpanded>{});
break;
case DistanceType::RusselRaoExpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::RusselRaoExpanded>{});
break;
default: THROW("Unknown or unsupported distance metric '%d'!", (int)metric);
};
}
/**
* @brief Convenience wrapper around 'distance' prim to convert runtime metric
* into compile time for the purpose of dispatch
* @tparam Type input/accumulation/output data-type
* @tparam IdxT indexing type
* @param handle raft handle for managing expensive resources
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param metric distance metric
* @param isRowMajor whether the matrices are row-major or col-major
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <typename Type, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
const Type* x,
const Type* y,
Type* dist,
IdxT m,
IdxT n,
IdxT k,
raft::distance::DistanceType metric,
bool isRowMajor = true,
Type metric_arg = 2.0f)
{
auto stream = raft::resource::get_cuda_stream(handle);
rmm::device_uvector<char> workspace(0, stream);
pairwise_distance<Type, IdxT>(
handle, x, y, dist, m, n, k, workspace, metric, isRowMajor, metric_arg);
}
/** @} */
/**
* \defgroup distance_mdspan Pairwise distance functions
* @{
*/
/**
* @brief Evaluate pairwise distances for the simple use case.
*
* Note: Only contiguous row- or column-major layouts supported currently.
*
* Usage example:
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_mdarray.hpp>
* #include <raft/random/make_blobs.cuh>
* #include <raft/distance/distance.cuh>
*
* raft::raft::resources handle;
* int n_samples = 5000;
* int n_features = 50;
*
* auto input = raft::make_device_matrix<float>(handle, n_samples, n_features);
* auto labels = raft::make_device_vector<int>(handle, n_samples);
* auto output = raft::make_device_matrix<float>(handle, n_samples, n_samples);
*
* raft::random::make_blobs(handle, input.view(), labels.view());
* auto metric = raft::distance::DistanceType::L2SqrtExpanded;
* raft::distance::pairwise_distance(handle, input.view(), input.view(), output.view(), metric);
* @endcode
*
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam IdxT Index type
* @param handle raft handle for managing expensive resources
* @param x first set of points (size n*k)
* @param y second set of points (size m*k)
* @param dist output distance matrix (size n*m)
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename layout = raft::layout_c_contiguous,
typename IdxT = int>
void distance(raft::resources const& handle,
raft::device_matrix_view<DataT, IdxT, layout> const x,
raft::device_matrix_view<DataT, IdxT, layout> const y,
raft::device_matrix_view<OutT, IdxT, layout> dist,
DataT metric_arg = 2.0f)
{
RAFT_EXPECTS(x.extent(1) == y.extent(1), "Number of columns must be equal.");
RAFT_EXPECTS(dist.extent(0) == x.extent(0),
"Number of rows in output must be equal to "
"number of rows in X");
RAFT_EXPECTS(dist.extent(1) == y.extent(0),
"Number of columns in output must be equal to "
"number of rows in Y");
RAFT_EXPECTS(x.is_exhaustive(), "Input x must be contiguous.");
RAFT_EXPECTS(y.is_exhaustive(), "Input y must be contiguous.");
constexpr auto is_rowmajor = std::is_same_v<layout, layout_c_contiguous>;
distance<DistT, DataT, AccT, OutT, IdxT>(handle,
x.data_handle(),
y.data_handle(),
dist.data_handle(),
x.extent(0),
y.extent(0),
x.extent(1),
is_rowmajor,
metric_arg);
}
/**
* @brief Convenience wrapper around 'distance' prim to convert runtime metric
* into compile time for the purpose of dispatch
* @tparam Type input/accumulation/output data-type
* @tparam IdxT indexing type
* @param handle raft handle for managing expensive resources
* @param x first matrix of points (size mxk)
* @param y second matrix of points (size nxk)
* @param dist output distance matrix (size mxn)
* @param metric distance metric
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <typename Type, typename layout = layout_c_contiguous, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
device_matrix_view<Type, IdxT, layout> const x,
device_matrix_view<Type, IdxT, layout> const y,
device_matrix_view<Type, IdxT, layout> dist,
raft::distance::DistanceType metric,
Type metric_arg = 2.0f)
{
RAFT_EXPECTS(x.extent(1) == y.extent(1), "Number of columns must be equal.");
RAFT_EXPECTS(dist.extent(0) == x.extent(0),
"Number of rows in output must be equal to "
"number of rows in X");
RAFT_EXPECTS(dist.extent(1) == y.extent(0),
"Number of columns in output must be equal to "
"number of rows in Y");
RAFT_EXPECTS(x.is_exhaustive(), "Input x must be contiguous.");
RAFT_EXPECTS(y.is_exhaustive(), "Input y must be contiguous.");
RAFT_EXPECTS(dist.is_exhaustive(), "Output must be contiguous.");
constexpr auto rowmajor = std::is_same_v<layout, layout_c_contiguous>;
auto stream = raft::resource::get_cuda_stream(handle);
rmm::device_uvector<char> workspace(0, stream);
pairwise_distance(handle,
x.data_handle(),
y.data_handle(),
dist.data_handle(),
x.extent(0),
y.extent(0),
x.extent(1),
metric,
rowmajor,
metric_arg);
}
/** @} */
}; // namespace distance
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/fused_l2_nn.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "fused_l2_nn-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "fused_l2_nn-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/fused_l2_nn_helpers.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/detail/fused_l2_nn.cuh>
namespace raft::distance {
/**
* \defgroup fused_l2_nn Fused 1-nearest neighbors
* @{
*/
template <typename LabelT, typename DataT>
using KVPMinReduce = detail::KVPMinReduceImpl<LabelT, DataT>;
template <typename LabelT, typename DataT>
using MinAndDistanceReduceOp = detail::MinAndDistanceReduceOpImpl<LabelT, DataT>;
template <typename LabelT, typename DataT>
using MinReduceOp = detail::MinReduceOpImpl<LabelT, DataT>;
/** @} */
/**
* Initialize array using init value from reduction op
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT>
void initialize(raft::resources const& handle, OutT* min, IdxT m, DataT maxVal, ReduceOpT redOp)
{
detail::initialize<DataT, OutT, IdxT, ReduceOpT>(
min, m, maxVal, redOp, resource::get_cuda_stream(handle));
}
} // namespace raft::distance
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/fused_l2_nn-ext.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // int64_t
#include <raft/core/kvp.hpp> // raft::KeyValuePair
#include <raft/core/resources.hpp> // raft::resources
#include <raft/distance/fused_l2_nn_helpers.cuh> // include initialize and reduce operations
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace raft {
namespace distance {
template <typename DataT, typename OutT, typename IdxT>
void fusedL2NNMinReduce(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
bool sqrt,
bool initOutBuffer,
cudaStream_t stream) RAFT_EXPLICIT;
} // namespace distance
} // namespace raft
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_distance_fusedL2NNMinReduce(DataT, OutT, IdxT) \
extern template void raft::distance::fusedL2NNMinReduce<DataT, OutT, IdxT>(OutT * min, \
const DataT* x, \
const DataT* y, \
const DataT* xn, \
const DataT* yn, \
IdxT m, \
IdxT n, \
IdxT k, \
void* workspace, \
bool sqrt, \
bool initOutBuffer, \
cudaStream_t stream)
instantiate_raft_distance_fusedL2NNMinReduce(double, double, int);
instantiate_raft_distance_fusedL2NNMinReduce(double, double, int64_t);
instantiate_raft_distance_fusedL2NNMinReduce(float, float, int);
instantiate_raft_distance_fusedL2NNMinReduce(float, float, int64_t);
// We can't have comma's in the macro expansion, so we use the COMMA macro:
#define COMMA ,
instantiate_raft_distance_fusedL2NNMinReduce(double, raft::KeyValuePair<int COMMA double>, int);
instantiate_raft_distance_fusedL2NNMinReduce(double,
raft::KeyValuePair<int64_t COMMA double>,
int64_t);
instantiate_raft_distance_fusedL2NNMinReduce(float, raft::KeyValuePair<int COMMA float>, int);
instantiate_raft_distance_fusedL2NNMinReduce(float,
raft::KeyValuePair<int64_t COMMA float>,
int64_t);
#undef COMMA
#undef instantiate_raft_distance_fusedL2NNMinReduce
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/masked_nn.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MASKED_L2_NN_H
#define __MASKED_L2_NN_H
#pragma once
#include <limits>
#include <raft/core/handle.hpp>
#include <raft/distance/detail/masked_nn.cuh>
#include <raft/distance/fused_l2_nn.cuh>
#include <raft/util/cuda_utils.cuh>
#include <stdint.h>
namespace raft {
namespace distance {
/**
* \defgroup masked_nn Masked 1-nearest neighbors
* @{
*/
/**
* @brief Parameter struct for masked_l2_nn function
*
* @tparam ReduceOpT Type of reduction operator in the epilogue.
* @tparam KVPReduceOpT Type of Reduction operation on key value pairs.
*
* Usage example:
* @code{.cpp}
* #include <raft/distance/masked_nn.cuh>
*
* using IdxT = int;
* using DataT = float;
* using RedOpT = raft::distance::MinAndDistanceReduceOp<IdxT, DataT>;
* using PairRedOpT = raft::distance::KVPMinReduce<IdxT, DataT>;
* using ParamT = raft::distance::masked_l2_nn_params<RedOpT, PairRedOpT>;
*
* bool init_out = true;
* bool sqrt = false;
*
* ParamT masked_l2_params{RedOpT{}, PairRedOpT{}, sqrt, init_out};
* @endcode
*
* Prescribes how to reduce a distance to an intermediate type (`redOp`), and
* how to reduce two intermediate types (`pairRedOp`). Typically, a distance is
* mapped to an (index, value) pair and (index, value) pair with the lowest
* value (distance) is selected.
*
* In addition, prescribes whether to compute the square root of the distance
* (`sqrt`) and whether to initialize the output buffer (`initOutBuffer`).
*/
template <typename ReduceOpT, typename KVPReduceOpT>
struct masked_l2_nn_params {
/** Reduction operator in the epilogue */
ReduceOpT redOp;
/** Reduction operation on key value pairs */
KVPReduceOpT pairRedOp;
/** Whether the output `minDist` should contain L2-sqrt */
bool sqrt;
/** Whether to initialize the output buffer before the main kernel launch */
bool initOutBuffer;
};
/**
* @brief Masked L2 distance and 1-nearest-neighbor computation in a single call.
*
* This function enables faster computation of nearest neighbors if the
* computation of distances between certain point pairs can be skipped.
*
* We use an adjacency matrix that describes which distances to calculate. The
* points in `y` are divided into groups, and the adjacency matrix indicates
* whether to compute distances between points in `x` and groups in `y`. In other
* words, if `adj[i,k]` is true then distance between point `x_i`, and points in
* `group_k` will be calculated.
*
* **Performance considerations**
*
* The points in `x` are processed in tiles of `M` points (`M` is currently 64,
* but may change in the future). As a result, the largest compute time
* reduction occurs if all `M` points can skip a group. If only part of the `M`
* points can skip a group, then at most a minor compute time reduction and a
* modest energy use reduction can be expected.
*
* The points in `y` are also grouped into tiles of `N` points (`N` is currently
* 64, but may change in the future). As a result, group sizes should be larger
* than `N` to avoid wasting computational resources. If the group sizes are
* evenly divisible by `N`, then the computation is most efficient, although for
* larger group sizes this effect is minor.
*
*
* **Comparison to SDDM**
*
* [SDDMM](https://ieeexplore.ieee.org/document/8638042) (sampled dense-dense
* matrix multiplication) is a matrix-matrix multiplication where only part of
* the output is computed. Compared to masked_l2_nn, there are a few differences:
*
* - The output of masked_l2_nn is a single vector (of nearest neighbors) and not
* a sparse matrix.
*
* - The sampling in masked_l2_nn is expressed through intermediate "groups"
rather than a CSR format.
*
* @tparam DataT data type
* @tparam OutT output type to either store 1-NN indices and their minimum
* distances or store only the min distances. Accordingly, one
* has to pass an appropriate `ReduceOpT`
* @tparam IdxT indexing arithmetic type
* @tparam ReduceOpT A struct to perform the final needed reduction operation
* and also to initialize the output array elements with the
* appropriate initial value needed for reduction.
*
* @param handle RAFT handle for managing expensive resources
* @param params Parameter struct specifying the reduction operations.
* @param[in] x First matrix. Row major. Dim = `m x k`.
* (on device).
* @param[in] y Second matrix. Row major. Dim = `n x k`.
* (on device).
* @param[in] x_norm L2 squared norm of `x`. Length = `m`. (on device).
* @param[in] y_norm L2 squared norm of `y`. Length = `n`. (on device)
* @param[in] adj A boolean adjacency matrix indicating for each
* row of `x` and each group in `y` whether to compute the
* distance. Dim = `m x num_groups`.
* @param[in] group_idxs An array containing the *end* indices of each group
* in `y`. The value of group_idxs[j] indicates the
* start of group j + 1, i.e., it is the inclusive
* scan of the group lengths. The first group is
* always assumed to start at index 0 and the last
* group typically ends at index `n`. Length =
* `num_groups`.
* @param[out] out will contain the reduced output (Length = `m`)
* (on device)
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT, typename KVPReduceOpT>
void masked_l2_nn(raft::resources const& handle,
raft::distance::masked_l2_nn_params<ReduceOpT, KVPReduceOpT> params,
raft::device_matrix_view<const DataT, IdxT, raft::layout_c_contiguous> x,
raft::device_matrix_view<const DataT, IdxT, raft::layout_c_contiguous> y,
raft::device_vector_view<const DataT, IdxT, raft::layout_c_contiguous> x_norm,
raft::device_vector_view<const DataT, IdxT, raft::layout_c_contiguous> y_norm,
raft::device_matrix_view<const bool, IdxT, raft::layout_c_contiguous> adj,
raft::device_vector_view<const IdxT, IdxT, raft::layout_c_contiguous> group_idxs,
raft::device_vector_view<OutT, IdxT, raft::layout_c_contiguous> out)
{
IdxT m = x.extent(0);
IdxT n = y.extent(0);
IdxT k = x.extent(1);
IdxT num_groups = group_idxs.extent(0);
// Match k dimension of x, y
RAFT_EXPECTS(x.extent(1) == y.extent(1), "Dimension of vectors in x and y must be equal.");
// Match x, x_norm and y, y_norm
RAFT_EXPECTS(m == x_norm.extent(0), "Length of `x_norm` must match input `x`.");
RAFT_EXPECTS(n == y_norm.extent(0), "Length of `y_norm` must match input `y` ");
// Match adj to x and group_idxs
RAFT_EXPECTS(m == adj.extent(0), "#rows in `adj` must match input `x`.");
RAFT_EXPECTS(num_groups == adj.extent(1), "#cols in `adj` must match length of `group_idxs`.");
// NOTE: We do not check if all indices in group_idxs actually points *inside* y.
// If there is no work to be done, return immediately.
if (m == 0 || n == 0 || k == 0 || num_groups == 0) { return; }
detail::masked_l2_nn_impl<DataT, OutT, IdxT, ReduceOpT>(handle,
out.data_handle(),
x.data_handle(),
y.data_handle(),
x_norm.data_handle(),
y_norm.data_handle(),
adj.data_handle(),
group_idxs.data_handle(),
num_groups,
m,
n,
k,
params.redOp,
params.pairRedOp,
params.sqrt,
params.initOutBuffer);
}
/** @} */
} // namespace distance
} // namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/specializations.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/distance-ext.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp> // raft::device_matrix_view
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/core/resources.hpp> // raft::resources
#include <raft/distance/detail/kernels/rbf_fin_op.cuh> // rbf_fin_op
#include <raft/distance/distance_types.hpp> // raft::distance::DistanceType
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#include <rmm/device_uvector.hpp> // rmm::device_uvector
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace raft {
namespace distance {
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename FinalLambda,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
size_t worksize,
FinalLambda fin_op,
bool isRowMajor = true,
DataT metric_arg = 2.0f) RAFT_EXPLICIT;
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
size_t worksize,
bool isRowMajor = true,
DataT metric_arg = 2.0f) RAFT_EXPLICIT;
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
size_t getWorkspaceSize(const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k) RAFT_EXPLICIT;
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int,
typename layout>
size_t getWorkspaceSize(raft::device_matrix_view<DataT, IdxT, layout> const& x,
raft::device_matrix_view<DataT, IdxT, layout> const& y) RAFT_EXPLICIT;
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
bool isRowMajor = true,
DataT metric_arg = 2.0f) RAFT_EXPLICIT;
template <typename Type, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
const Type* x,
const Type* y,
Type* dist,
IdxT m,
IdxT n,
IdxT k,
rmm::device_uvector<char>& workspace,
raft::distance::DistanceType metric,
bool isRowMajor = true,
Type metric_arg = 2.0f) RAFT_EXPLICIT;
template <typename Type, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
const Type* x,
const Type* y,
Type* dist,
IdxT m,
IdxT n,
IdxT k,
raft::distance::DistanceType metric,
bool isRowMajor = true,
Type metric_arg = 2.0f) RAFT_EXPLICIT;
template <raft::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename layout = raft::layout_c_contiguous,
typename IdxT = int>
void distance(raft::resources const& handle,
raft::device_matrix_view<DataT, IdxT, layout> const x,
raft::device_matrix_view<DataT, IdxT, layout> const y,
raft::device_matrix_view<OutT, IdxT, layout> dist,
DataT metric_arg = 2.0f) RAFT_EXPLICIT;
template <typename Type, typename layout = layout_c_contiguous, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
device_matrix_view<Type, IdxT, layout> const x,
device_matrix_view<Type, IdxT, layout> const y,
device_matrix_view<Type, IdxT, layout> dist,
raft::distance::DistanceType metric,
Type metric_arg = 2.0f) RAFT_EXPLICIT;
}; // namespace distance
}; // namespace raft
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
/*
* Hierarchy of instantiations:
*
* This file defines the extern template instantiations for the public API of
* raft::distance. To improve compile times, the extern template instantiation
* of the distance kernels is handled in
* distance/detail/pairwise_matrix/dispatch-ext.cuh.
*
* After adding an instance here, make sure to also add the instance to
* dispatch-ext.cuh and the corresponding .cu files.
*/
#define instantiate_raft_distance_distance(DT, DataT, AccT, OutT, FinalLambda, IdxT) \
extern template void raft::distance::distance<DT, DataT, AccT, OutT, FinalLambda, IdxT>( \
raft::resources const& handle, \
const DataT* x, \
const DataT* y, \
OutT* dist, \
IdxT m, \
IdxT n, \
IdxT k, \
void* workspace, \
size_t worksize, \
FinalLambda fin_op, \
bool isRowMajor, \
DataT metric_arg)
// The following two instances are used in test/distance/gram.cu. Note the use
// of int64_t for the index type.
instantiate_raft_distance_distance(raft::distance::DistanceType::L2Unexpanded,
float,
float,
float,
raft::distance::kernels::detail::rbf_fin_op<float>,
int64_t);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2Unexpanded,
double,
double,
double,
raft::distance::kernels::detail::rbf_fin_op<double>,
int64_t);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Canberra, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Canberra, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CorrelationExpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::CorrelationExpanded,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CosineExpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CosineExpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HammingUnexpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HammingUnexpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HellingerExpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HellingerExpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::InnerProduct, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::InnerProduct, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::JensenShannon, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::JensenShannon, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::KLDivergence, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::KLDivergence, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L1, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L1, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Expanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Expanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtExpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtExpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtUnexpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtUnexpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Unexpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Unexpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Linf, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Linf, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::LpUnexpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::LpUnexpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::RusselRaoExpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::RusselRaoExpanded, double, double, double, raft::identity_op, int);
#undef instantiate_raft_distance_distance
// Same, but without raft::identity_op
#define instantiate_raft_distance_distance(DT, DataT, AccT, OutT, IdxT) \
extern template void raft::distance::distance<DT, DataT, AccT, OutT, IdxT>( \
raft::resources const& handle, \
const DataT* x, \
const DataT* y, \
OutT* dist, \
IdxT m, \
IdxT n, \
IdxT k, \
void* workspace, \
size_t worksize, \
bool isRowMajor, \
DataT metric_arg)
instantiate_raft_distance_distance(
raft::distance::DistanceType::Canberra, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Canberra, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CorrelationExpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CorrelationExpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CosineExpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CosineExpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HammingUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HammingUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HellingerExpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HellingerExpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::InnerProduct, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::InnerProduct, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::JensenShannon, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::JensenShannon, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::KLDivergence, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::KLDivergence, double, double, double, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L1, float, float, float, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L1, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Expanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Expanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtExpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtExpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Unexpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Unexpanded, double, double, double, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::Linf, float, float, float, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::Linf, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::LpUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::LpUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::RusselRaoExpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::RusselRaoExpanded, double, double, double, int);
#undef instantiate_raft_distance_distance
// Same, but without workspace
#define instantiate_raft_distance_distance(DT, DataT, AccT, OutT, IdxT) \
extern template void raft::distance::distance<DT, DataT, AccT, OutT, IdxT>( \
raft::resources const& handle, \
const DataT* x, \
const DataT* y, \
OutT* dist, \
IdxT m, \
IdxT n, \
IdxT k, \
bool isRowMajor, \
DataT metric_arg)
instantiate_raft_distance_distance(
raft::distance::DistanceType::Canberra, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Canberra, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CorrelationExpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CorrelationExpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CosineExpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::CosineExpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HammingUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HammingUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HellingerExpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::HellingerExpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::InnerProduct, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::InnerProduct, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::JensenShannon, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::JensenShannon, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::KLDivergence, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::KLDivergence, double, double, double, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L1, float, float, float, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L1, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Expanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Expanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtExpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtExpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2SqrtUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Unexpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Unexpanded, double, double, double, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::Linf, float, float, float, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::Linf, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::LpUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::LpUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::RusselRaoExpanded, float, float, float, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::RusselRaoExpanded, double, double, double, int);
#undef instantiate_raft_distance_distance
#define instantiate_raft_distance_getWorkspaceSize(DistT, DataT, AccT, OutT, IdxT) \
extern template size_t raft::distance::getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>( \
const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k)
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::Canberra, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::Canberra, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::CorrelationExpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::CorrelationExpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::CosineExpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::CosineExpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::HammingUnexpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::HammingUnexpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::HellingerExpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::HellingerExpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::InnerProduct, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::InnerProduct, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::JensenShannon, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::JensenShannon, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::KLDivergence, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::KLDivergence, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L1, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L1, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2Expanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2Expanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2SqrtExpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2SqrtExpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2SqrtUnexpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2SqrtUnexpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2Unexpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2Unexpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::Linf, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::Linf, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::LpUnexpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::LpUnexpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::RusselRaoExpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::RusselRaoExpanded, double, double, double, int);
#undef instantiate_raft_distance_getWorkspaceSize
#define instantiate_raft_distance_getWorkspaceSize(DistT, DataT, AccT, OutT, IdxT, layout) \
extern template size_t raft::distance::getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT, layout>( \
raft::device_matrix_view<DataT, IdxT, layout> const& x, \
raft::device_matrix_view<DataT, IdxT, layout> const& y)
// We could consider not taking template parameters for this function. The
// number of instantiations seems a bit excessive..
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::Canberra, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::Canberra, double, double, double, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::Canberra, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::Canberra, double, double, double, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::CorrelationExpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::CorrelationExpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::CorrelationExpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::CorrelationExpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::CosineExpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::CosineExpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::CosineExpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::CosineExpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::HammingUnexpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::HammingUnexpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::HammingUnexpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::HammingUnexpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::HellingerExpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::HellingerExpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::HellingerExpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::HellingerExpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::InnerProduct, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::InnerProduct,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::InnerProduct, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::InnerProduct,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::JensenShannon, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::JensenShannon,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::JensenShannon, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::JensenShannon,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::KLDivergence, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::KLDivergence,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::KLDivergence, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::KLDivergence,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L1, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L1, double, double, double, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L1, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L1, double, double, double, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2Expanded, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2Expanded, double, double, double, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2Expanded, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2Expanded, double, double, double, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::L2SqrtExpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::L2SqrtExpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::L2SqrtExpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::L2SqrtExpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::L2SqrtUnexpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::L2SqrtUnexpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::L2SqrtUnexpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::L2SqrtUnexpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2Unexpanded, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(raft::distance::DistanceType::L2Unexpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
raft::distance::DistanceType::L2Unexpanded, float, float, float, int, raft::layout_f_contiguous);
#undef instantiate_raft_distance_getWorkspaceSize
#define instantiate_raft_distance_pairwise_distance(DataT, IdxT) \
extern template void raft::distance::pairwise_distance(raft::resources const& handle, \
const DataT* x, \
const DataT* y, \
DataT* dist, \
IdxT m, \
IdxT n, \
IdxT k, \
rmm::device_uvector<char>& workspace, \
raft::distance::DistanceType metric, \
bool isRowMajor, \
DataT metric_arg)
instantiate_raft_distance_pairwise_distance(float, int);
instantiate_raft_distance_pairwise_distance(double, int);
#undef instantiate_raft_distance_pairwise_distance
// Same, but without workspace
#define instantiate_raft_distance_pairwise_distance(DataT, IdxT) \
extern template void raft::distance::pairwise_distance(raft::resources const& handle, \
const DataT* x, \
const DataT* y, \
DataT* dist, \
IdxT m, \
IdxT n, \
IdxT k, \
raft::distance::DistanceType metric, \
bool isRowMajor, \
DataT metric_arg)
instantiate_raft_distance_pairwise_distance(float, int);
instantiate_raft_distance_pairwise_distance(double, int);
#undef instantiate_raft_distance_pairwise_distance
// Version with mdspan
#define instantiate_raft_distance_distance(DistT, DataT, AccT, OutT, layout, IdxT) \
extern template void raft::distance::distance<DistT, DataT, AccT, OutT, layout, IdxT>( \
raft::resources const& handle, \
raft::device_matrix_view<DataT, IdxT, layout> const x, \
raft::device_matrix_view<DataT, IdxT, layout> const y, \
raft::device_matrix_view<OutT, IdxT, layout> dist, \
DataT metric_arg)
// Again, we might want to consider reigning in the number of instantiations...
instantiate_raft_distance_distance(
raft::distance::DistanceType::Canberra, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Canberra, double, double, double, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Canberra, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Canberra, double, double, double, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::CorrelationExpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::CorrelationExpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::CorrelationExpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::CorrelationExpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::CosineExpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::CosineExpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::CosineExpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::CosineExpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::HammingUnexpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::HammingUnexpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::HammingUnexpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::HammingUnexpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::HellingerExpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::HellingerExpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::HellingerExpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::HellingerExpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::InnerProduct, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::InnerProduct,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::InnerProduct, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::InnerProduct,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::JensenShannon, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::JensenShannon,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::JensenShannon, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::JensenShannon,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::KLDivergence, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::KLDivergence,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::KLDivergence, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::KLDivergence,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L1, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L1, double, double, double, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L1, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L1, double, double, double, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Expanded, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Expanded, double, double, double, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Expanded, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Expanded, double, double, double, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2SqrtExpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2SqrtExpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2SqrtExpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2SqrtExpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2SqrtUnexpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2SqrtUnexpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2SqrtUnexpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2SqrtUnexpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Unexpanded, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2Unexpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::L2Unexpanded, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::L2Unexpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Linf, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Linf, double, double, double, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Linf, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::Linf, double, double, double, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::LpUnexpanded, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::LpUnexpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(
raft::distance::DistanceType::LpUnexpanded, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(raft::distance::DistanceType::LpUnexpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::RusselRaoExpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::RusselRaoExpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::RusselRaoExpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(raft::distance::DistanceType::RusselRaoExpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
#undef instantiate_raft_distance_distance
#define instantiate_raft_distance_pairwise_distance(DataT, layout, IdxT) \
extern template void raft::distance::pairwise_distance( \
raft::resources const& handle, \
raft::device_matrix_view<DataT, IdxT, layout> const x, \
raft::device_matrix_view<DataT, IdxT, layout> const y, \
raft::device_matrix_view<DataT, IdxT, layout> dist, \
raft::distance::DistanceType metric, \
DataT metric_arg)
instantiate_raft_distance_pairwise_distance(float, raft::layout_c_contiguous, int);
instantiate_raft_distance_pairwise_distance(float, raft::layout_f_contiguous, int);
instantiate_raft_distance_pairwise_distance(double, raft::layout_c_contiguous, int);
instantiate_raft_distance_pairwise_distance(double, raft::layout_f_contiguous, int);
#undef instantiate_raft_distance_pairwise_distance
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/fused_l2_nn-inl.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __FUSED_L2_NN_H
#define __FUSED_L2_NN_H
#pragma once
#include <cub/cub.cuh>
#include <limits>
#include <raft/core/resources.hpp>
#include <raft/distance/detail/fused_l2_nn.cuh>
#include <raft/distance/fused_l2_nn_helpers.cuh>
#include <raft/linalg/contractions.cuh>
#include <raft/util/cuda_utils.cuh>
#include <stdint.h>
#include <type_traits>
namespace raft {
namespace distance {
/**
* \ingroup fused_l2_nn
* @{
*/
/**
* @brief Fused L2 distance and 1-nearest-neighbor computation in a single call.
*
* The benefits of such a call are 2-fold: 1) eliminate the need for an
* intermediate buffer to store the output of gemm 2) reduce the memory read
* traffic on this intermediate buffer, otherwise needed during the reduction
* phase for 1-NN.
*
* @tparam DataT data type
* @tparam OutT output type to either store 1-NN indices and their minimum
* distances or store only the min distances. Accordingly, one
* has to pass an appropriate `ReduceOpT`
* @tparam IdxT indexing arithmetic type
* @tparam ReduceOpT A struct to perform the final needed reduction operation
* and also to initialize the output array elements with the
* appropriate initial value needed for reduction.
*
* @param[out] min will contain the reduced output (Length = `m`)
* (on device)
* @param[in] x first matrix. Row major. Dim = `m x k`.
* (on device).
* @param[in] y second matrix. Row major. Dim = `n x k`.
* (on device).
* @param[in] xn L2 squared norm of `x`. Length = `m`. (on device).
* @param[in] yn L2 squared norm of `y`. Length = `n`. (on device)
* @param[in] m gemm m
* @param[in] n gemm n
* @param[in] k gemm k
* @param[in] workspace temp workspace. Size = sizeof(int)*m. (on device)
* @param[in] redOp reduction operator in the epilogue
* @param[in] pairRedOp reduction operation on key value pairs
* @param[in] sqrt Whether the output `minDist` should contain L2-sqrt
* @param[in] initOutBuffer whether to initialize the output buffer before the
* main kernel launch
* @param[in] stream cuda stream
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT, typename KVPReduceOpT>
void fusedL2NN(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
bool sqrt,
bool initOutBuffer,
cudaStream_t stream)
{
// When k is smaller than 32, the Policy4x4 results in redundant calculations
// as it uses tiles that have k=32. Therefore, use a "skinny" policy instead
// that uses tiles with a smaller value of k.
bool is_skinny = k < 32;
size_t bytes = sizeof(DataT) * k;
auto px = reinterpret_cast<uintptr_t>(x);
auto py = reinterpret_cast<uintptr_t>(y);
if (16 % sizeof(DataT) == 0 && bytes % 16 == 0 && px % 16 == 0 && py % 16 == 0) {
if (is_skinny) {
detail::fusedL2NNImpl<DataT,
OutT,
IdxT,
typename linalg::Policy4x4Skinny<DataT, 16 / sizeof(DataT)>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
} else {
detail::fusedL2NNImpl<DataT,
OutT,
IdxT,
typename linalg::Policy4x4<DataT, 16 / sizeof(DataT)>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
}
} else if (8 % sizeof(DataT) == 0 && bytes % 8 == 0 && px % 8 == 0 && py % 8 == 0) {
if (is_skinny) {
detail::fusedL2NNImpl<DataT,
OutT,
IdxT,
typename linalg::Policy4x4Skinny<DataT, 8 / sizeof(DataT)>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
} else {
detail::fusedL2NNImpl<DataT,
OutT,
IdxT,
typename linalg::Policy4x4<DataT, 8 / sizeof(DataT)>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
}
} else {
if (is_skinny) {
detail::fusedL2NNImpl<DataT,
OutT,
IdxT,
typename linalg::Policy4x4Skinny<DataT, 1>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
} else {
detail::fusedL2NNImpl<DataT,
OutT,
IdxT,
typename linalg::Policy4x4<DataT, 1>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
}
}
}
/**
* @brief Wrapper around fusedL2NN with minimum reduction operators.
*
* fusedL2NN cannot be compiled in the distance library due to the lambda
* operators, so this wrapper covers the most common case (minimum).
* This should be preferred to the more generic API when possible, in order to
* reduce compilation times for users of the shared library.
*
* @tparam DataT data type
* @tparam OutT output type to either store 1-NN indices and their minimum
* distances (e.g. raft::KeyValuePair<int, float>) or store only the min
* distances.
* @tparam IdxT indexing arithmetic type
* @param[out] min will contain the reduced output (Length = `m`)
* (on device)
* @param[in] x first matrix. Row major. Dim = `m x k`.
* (on device).
* @param[in] y second matrix. Row major. Dim = `n x k`.
* (on device).
* @param[in] xn L2 squared norm of `x`. Length = `m`. (on device).
* @param[in] yn L2 squared norm of `y`. Length = `n`. (on device)
* @param[in] m gemm m
* @param[in] n gemm n
* @param[in] k gemm k
* @param[in] workspace temp workspace. Size = sizeof(int)*m. (on device)
* @param[in] sqrt Whether the output `minDist` should contain L2-sqrt
* @param[in] initOutBuffer whether to initialize the output buffer before the
* main kernel launch
* @param[in] stream cuda stream
*/
template <typename DataT, typename OutT, typename IdxT>
void fusedL2NNMinReduce(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
bool sqrt,
bool initOutBuffer,
cudaStream_t stream)
{
MinAndDistanceReduceOp<IdxT, DataT> redOp;
KVPMinReduce<IdxT, DataT> pairRedOp;
fusedL2NN<DataT, OutT, IdxT>(
min, x, y, xn, yn, m, n, k, workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
}
/** @} */
} // namespace distance
} // namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/distance/kernels.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/distance/detail/kernels/gram_matrix.cuh>
#include <raft/distance/detail/kernels/kernel_factory.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/distance/distance.cuh>
#include <raft/linalg/gemm.cuh>
namespace raft::distance::kernels {
// TODO: Need to expose formal APIs for this that are more consistent w/ other APIs in RAFT
using raft::distance::kernels::detail::GramMatrixBase;
using raft::distance::kernels::detail::KernelFactory;
}; // end namespace raft::distance::kernels
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/detail/distance_ops/all_ops.cuh>
#include <raft/distance/detail/pairwise_matrix/dispatch.cuh>
#include <raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh>
#include <raft/distance/detail/pairwise_matrix/dispatch_sm80.cuh>
#include <raft/distance/distance_types.hpp>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/reduce.cuh>
#include <raft/linalg/unary_op.cuh>
#include <type_traits>
namespace raft {
namespace distance {
namespace detail {
/**
* @brief: A tag type for overload resolution based on DistanceType
*
* It is not possible to partially specialize function templates on a single
* parameter. Instead, it is often easier to use a combination of conventional
* method overloading and a parameter with a specific tag type. The following
* type is used to help method overloading based on the DistanceType enum.
*/
template <DistanceType d>
using distance_tag = std::integral_constant<DistanceType, d>;
/**
* @brief Implement pairwise_matrix for specific distance
*
* There are multiple overloads for this function, one for each distance type.
* They are implemented below. The documentation of this function serves as
* documentation for all functions. The following overloads are defined:
*
* - DistanceType::Canberra:
* - DistanceType::CorrelationExpanded:
* - DistanceType::CosineExpanded:
* - DistanceType::HammingUnexpanded:
* - DistanceType::HellingerExpanded:
* - DistanceType::JensenShannon:
* - DistanceType::KLDivergence:
* - DistanceType::L1:
* - DistanceType::L2Expanded:
* - DistanceType::L2SqrtExpanded:
* - DistanceType::L2Unexpanded:
* - DistanceType::L2SqrtUnexpanded:
* - DistanceType::Linf:
* - DistanceType::LpUnexpanded:
* - DistanceType::RusselRaoExpanded:
*
* @tparam DataT Input data type
* @tparam AccT Accumulation data type
* @tparam OutT Output data type
* @tparam FinOpT Type of final operation
* @tparam IdxT Index type
*
* @param handle RAFT resources handle
* @param distance_type A tag type to indicate which distance is calculated.
* @param x First set of points
* @param y Second set of points
* @param out Output distance matrix
* @param m Number of points in x
* @param n Number of points in y
* @param k Dimensionality of points in x, y
* @param workspace Temporary workspace needed for computations
* @param worksize Number of bytes of the workspace
* @param is_row_major Whether the matrices are row-major or col-major
* @param metric_arg The `p` argument for Lp.
*/
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::Canberra> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace, // unused
size_t worksize, // unused
FinOpT fin_op,
bool is_row_major,
DataT metric_arg) // unused
{
ops::canberra_distance_op<DataT, AccT, IdxT> distance_op{};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::CorrelationExpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace,
size_t worksize,
FinOpT fin_op,
bool is_row_major,
DataT) // unused
{
ASSERT(!(worksize < 2 * (m + n) * sizeof(AccT)), "workspace size error");
ASSERT(workspace != nullptr, "workspace is null");
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
AccT* x_norm = workspace;
AccT* y_norm = workspace;
AccT* sq_x_norm = workspace;
AccT* sq_y_norm = workspace;
// TODO: Column major case looks to have lower accuracy for X == Y,
// perhaps the use of stridedSummationKernel could be causing this,
// need to investigate and fix.
if (x == y && is_row_major) {
raft::linalg::reduce(x_norm,
x,
k,
std::max(m, n),
(AccT)0,
is_row_major,
true,
stream,
false,
raft::identity_op(),
raft::add_op());
sq_x_norm += std::max(m, n);
sq_y_norm = sq_x_norm;
raft::linalg::rowNorm(
sq_x_norm, x, k, std::max(m, n), raft::linalg::L2Norm, is_row_major, stream);
} else {
y_norm += m;
raft::linalg::reduce(x_norm,
x,
k,
m,
(AccT)0,
is_row_major,
true,
stream,
false,
raft::identity_op(),
raft::add_op());
raft::linalg::reduce(y_norm,
y,
k,
n,
(AccT)0,
is_row_major,
true,
stream,
false,
raft::identity_op(),
raft::add_op());
sq_x_norm += (m + n);
sq_y_norm = sq_x_norm + m;
raft::linalg::rowNorm(sq_x_norm, x, k, m, raft::linalg::L2Norm, is_row_major, stream);
raft::linalg::rowNorm(sq_y_norm, y, k, n, raft::linalg::L2Norm, is_row_major, stream);
}
using OpT = ops::correlation_distance_op<DataT, AccT, IdxT>;
OpT corr_op(is_row_major, sq_x_norm, sq_y_norm, m, n, k);
pairwise_matrix_dispatch<decltype(corr_op), DataT, AccT, OutT, FinOpT, IdxT>(
corr_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::CosineExpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace,
size_t worksize,
FinOpT fin_op,
bool is_row_major,
DataT) // unused
{
// raft distance support inputs as float/double and output as uint8_t/float/double.
static_assert(!((sizeof(OutT) > 1) && (sizeof(AccT) != sizeof(OutT))),
"OutT can be uint8_t, float, double,"
"if sizeof(OutT) > 1 then sizeof(AccT) == sizeof(OutT).");
ASSERT(!(worksize < (m + n) * sizeof(AccT)), "workspace size error");
ASSERT(workspace != nullptr, "workspace is null");
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
DataT* x_norm = workspace;
DataT* y_norm = workspace;
// TODO: Column major case looks to have lower accuracy for X == Y,
// perhaps the use of stridedSummationKernel could be causing this,
// need to investigate and fix.
if (x == y && is_row_major) {
raft::linalg::rowNorm(
x_norm, x, k, std::max(m, n), raft::linalg::L2Norm, is_row_major, stream, raft::sqrt_op{});
} else {
y_norm += m;
raft::linalg::rowNorm(
x_norm, x, k, m, raft::linalg::L2Norm, is_row_major, stream, raft::sqrt_op{});
raft::linalg::rowNorm(
y_norm, y, k, n, raft::linalg::L2Norm, is_row_major, stream, raft::sqrt_op{});
}
ops::cosine_distance_op<DataT, AccT, IdxT> distance_op{};
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::HammingUnexpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
ops::hamming_distance_op<DataT, AccT, IdxT> distance_op{k};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::InnerProduct> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
raft::linalg::gemm(handle,
out,
const_cast<DataT*>(x),
const_cast<DataT*>(y),
m,
n,
k,
!is_row_major,
!is_row_major,
is_row_major,
stream);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::HellingerExpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
// First sqrt x and y
const auto raft_sqrt = raft::linalg::unaryOp<DataT, raft::sqrt_op, IdxT>;
raft_sqrt((DataT*)x, x, m * k, raft::sqrt_op{}, stream);
if (x != y) { raft_sqrt((DataT*)y, y, n * k, raft::sqrt_op{}, stream); }
// Then calculate Hellinger distance
ops::hellinger_distance_op<DataT, AccT, IdxT> distance_op{};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
// Finally revert sqrt of x and y
raft_sqrt((DataT*)x, x, m * k, raft::sqrt_op{}, stream);
if (x != y) { raft_sqrt((DataT*)y, y, n * k, raft::sqrt_op{}, stream); }
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::JensenShannon> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
ops::jensen_shannon_distance_op<DataT, AccT, IdxT> distance_op{};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::KLDivergence> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
auto unaryOp_lambda = [] __device__(DataT input) {
const bool x_zero = (input == 0);
return (!x_zero) * raft::log(input + x_zero);
};
auto unaryOp_lambda_reverse = [] __device__(DataT input) {
// reverse previous log (x) back to x using (e ^ log(x))
const bool x_zero = (input == 0);
return (!x_zero) * raft::exp(input);
};
if (x != y) {
raft::linalg::unaryOp<DataT, decltype(unaryOp_lambda), IdxT>(
(DataT*)y, y, n * k, unaryOp_lambda, stream);
}
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
// This op takes some shortcuts when x equals y. So its behavior changes based
// on this.
ops::kl_divergence_op<DataT, AccT, IdxT> distance_op{is_row_major, x == y};
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
if (x != y) {
// Now reverse previous log (x) back to x using (e ^ log(x))
raft::linalg::unaryOp<DataT, decltype(unaryOp_lambda_reverse), IdxT>(
(DataT*)y, y, n * k, unaryOp_lambda_reverse, stream);
}
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::L1> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
ops::l1_distance_op<DataT, AccT, IdxT> distance_op{};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT,
typename AccT,
typename OutT,
typename FinOpT,
typename IdxT = int>
void distance_impl_l2_expanded( // NOTE: different name
bool perform_sqrt, // dispatch on sqrt
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace,
size_t worksize,
FinOpT fin_op,
cudaStream_t stream,
bool is_row_major)
{
// raft distance support inputs as float/double and output as uint8_t/float/double.
static_assert(!((sizeof(OutT) > 1) && (sizeof(AccT) != sizeof(OutT))),
"OutT can be uint8_t, float, double,"
"if sizeof(OutT) > 1 then sizeof(AccT) == sizeof(OutT).");
ASSERT(!(worksize < (m + n) * sizeof(AccT)), "workspace size error");
ASSERT(workspace != nullptr, "workspace is null");
DataT* x_norm = workspace;
DataT* y_norm = workspace;
// TODO: Column major case looks to have lower accuracy for X == Y,
// perhaps the use of stridedSummationKernel could be causing this,
// need to investigate and fix.
if ((x == y) && is_row_major) {
raft::linalg::rowNorm(x_norm,
x,
k,
std::max(m, n),
raft::linalg::L2Norm,
is_row_major,
stream,
raft::identity_op{});
} else {
y_norm += m;
raft::linalg::rowNorm(
x_norm, x, k, m, raft::linalg::L2Norm, is_row_major, stream, raft::identity_op{});
raft::linalg::rowNorm(
y_norm, y, k, n, raft::linalg::L2Norm, is_row_major, stream, raft::identity_op{});
}
ops::l2_exp_distance_op<DataT, AccT, IdxT> distance_op{perform_sqrt};
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::L2Expanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace,
size_t worksize,
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
bool perform_sqrt = false;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
distance_impl_l2_expanded(
perform_sqrt, x, y, out, m, n, k, workspace, worksize, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::L2SqrtExpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace,
size_t worksize,
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
bool perform_sqrt = true;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
distance_impl_l2_expanded(
perform_sqrt, x, y, out, m, n, k, workspace, worksize, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::L2Unexpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
bool perform_sqrt = false;
ops::l2_unexp_distance_op<DataT, AccT, IdxT> l2_op(perform_sqrt);
// The unexpanded L2 does not require the norms of a and b to be calculated.
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(l2_op), DataT, AccT, OutT, FinOpT, IdxT>(
l2_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::L2SqrtUnexpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
bool perform_sqrt = true;
ops::l2_unexp_distance_op<DataT, AccT, IdxT> l2_op(perform_sqrt);
// The unexpanded L2 does not require the norms of a and b to be calculated.
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(l2_op), DataT, AccT, OutT, FinOpT, IdxT>(
l2_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::Linf> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
ops::l_inf_distance_op<DataT, AccT, IdxT> distance_op{};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::LpUnexpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT metric_arg)
{
ops::lp_unexp_distance_op<DataT, AccT, IdxT> distance_op{metric_arg};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::RusselRaoExpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
ops::russel_rao_distance_op<DataT, AccT, IdxT> distance_op{k};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
/**
* @brief Evaluate pairwise distances with the user epilogue lamba allowed
* @tparam DistanceType which distance to evaluate
* @tparam InType input argument type
* @tparam AccType accumulation type
* @tparam OutType output type
* @tparam FinalLambda user-defined epilogue lamba
* @tparam Index_ Index type
*
* @param x first set of points
* @param y second set of points
* @param out output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param workspace temporary workspace needed for computations
* @param worksize number of bytes of the workspace
* @param fin_op the final gemm epilogue lambda
* @param stream cuda stream
* @param isRowMajor whether the matrices are row-major or col-major
*
* @note fin_op: This is a device lambda which is supposed to operate upon the
* input which is AccType and returns the output in OutType. It's signature is
* as follows: <pre>OutType fin_op(AccType in, int g_idx);</pre>. If one needs
* any other parameters, feel free to pass them via closure.
*/
template <raft::distance::DistanceType distanceType,
typename InType,
typename AccType,
typename OutType,
typename FinalLambda,
typename Index_ = int>
void distance(raft::resources const& handle,
const InType* x,
const InType* y,
OutType* out,
Index_ m,
Index_ n,
Index_ k,
void* workspace,
size_t worksize,
FinalLambda fin_op,
bool isRowMajor = true,
InType metric_arg = 2.0f)
{
// raft distance support inputs as float/double and output as uint8_t/float/double.
static_assert(!((sizeof(OutType) > 1) && (sizeof(AccType) != sizeof(OutType))),
"OutType can be uint8_t, float, double,"
"if sizeof(OutType) > 1 then sizeof(AccType) == sizeof(OutType).");
distance_impl<InType, AccType, OutType, FinalLambda, Index_>(
handle,
distance_tag<distanceType>{},
x,
y,
out,
m,
n,
k,
reinterpret_cast<AccType*>(workspace),
worksize,
fin_op,
isRowMajor,
metric_arg);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* @brief Evaluate pairwise distances for the simple use case
* @tparam DistanceType which distance to evaluate
* @tparam InType input argument type
* @tparam AccType accumulation type
* @tparam OutType output type
* @tparam Index_ Index type
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param workspace temporary workspace needed for computations
* @param worksize number of bytes of the workspace
* @param stream cuda stream
* @param isRowMajor whether the matrices are row-major or col-major
*/
template <raft::distance::DistanceType distanceType,
typename InType,
typename AccType,
typename OutType,
typename Index_ = int>
void distance(raft::resources const& handle,
const InType* x,
const InType* y,
OutType* out,
Index_ m,
Index_ n,
Index_ k,
void* workspace,
size_t worksize,
bool isRowMajor = true,
InType metric_arg = 2.0f)
{
auto fin_op = raft::identity_op();
distance<distanceType, InType, AccType, OutType, decltype(fin_op), Index_>(
handle, x, y, out, m, n, k, workspace, worksize, fin_op, isRowMajor, metric_arg);
}
/**
* @brief Return the exact workspace size to compute the distance
* @tparam DistanceType which distance to evaluate
* @tparam InType input argument type
* @tparam AccType accumulation type
* @tparam OutType output type
* @tparam Index_ Index type
* @param x first set of points
* @param y second set of points
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
*
* @note If the specified distanceType doesn't need the workspace at all, it
* returns 0.
*/
template <raft::distance::DistanceType distanceType,
typename InType,
typename AccType,
typename OutType,
typename Index_ = int>
size_t getWorkspaceSize(const InType* x, const InType* y, Index_ m, Index_ n, Index_ k)
{
size_t worksize = 0;
constexpr bool is_allocated = (distanceType <= raft::distance::DistanceType::CosineExpanded) ||
(distanceType == raft::distance::DistanceType::CorrelationExpanded);
constexpr int numOfBuffers =
(distanceType == raft::distance::DistanceType::CorrelationExpanded) ? 2 : 1;
if (is_allocated) {
// TODO : when X == Y allocate std::max(m, n) instead of m + n when column major input
// accuracy issue is resolved until then we allocate as m + n.
worksize += numOfBuffers * m * sizeof(AccType);
worksize += numOfBuffers * n * sizeof(AccType);
}
return worksize;
}
}; // namespace detail
}; // namespace distance
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_distance_cutlass_base.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wtautological-compare"
// We define CUTLASS_NAMESPACE in case
// RAFT cmake is not used
#ifndef CUTLASS_NAMESPACE
#define cutlass raft_cutlass
#endif
#include <rmm/device_uvector.hpp>
#include <type_traits>
#include <cutlass/cutlass.h>
#include <cutlass/gemm/device/gemm.h>
#include <cutlass/gemm/device/gemm_universal_adapter.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_coord.h>
#include <cutlass/tensor_view.h>
#include <raft/distance/detail/distance_ops/cutlass.cuh>
#include <raft/util/cutlass_utils.cuh>
#include "./pairwise_distance_epilogue_elementwise.h"
#include "./pairwise_distance_gemm.h"
namespace raft {
namespace distance {
namespace detail {
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
int VecLen,
typename FinalLambda,
typename OpT,
bool isRowMajor>
std::enable_if_t<ops::has_cutlass_op<OpT>::value> cutlassDistanceKernel(const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
OutT* dOutput,
FinalLambda fin_op,
OpT distance_op,
cudaStream_t stream)
{
static_assert(!(std::is_same<OutT, bool>::value),
"OutType bool is not supported use uint8_t instead");
auto dist_op = distance_op.get_cutlass_op();
using DistanceFn = decltype(dist_op);
using EpilogueOutputOp =
cutlass::epilogue::thread::PairwiseDistanceEpilogueElementwise<DataT, // ElementC_
AccT, // ElementAccumulator_
DataT, // ElementCompute_
AccT, // ElementZ_
OutT, // ElementT_
1, // Elements per access 1
DistanceFn,
FinalLambda>;
constexpr int batch_count = 1;
constexpr auto mode = cutlass::gemm::GemmUniversalMode::kGemm;
typename EpilogueOutputOp::Params epilog_op_param(dist_op, fin_op);
const DataT *a, *b;
IdxT gemm_lda, gemm_ldb;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// Alignment
constexpr int Alignment = VecLen;
// default initialize problem size with row major inputs
auto problem_size = cutlass::gemm::GemmCoord(n, m, k);
using cutlassDistKernel =
typename cutlass::gemm::kernel::PairwiseDistanceGemm<DataT,
Alignment,
DataT,
Alignment,
AccT,
AccT,
EpilogueOutputOp,
NumStages, // Number of pipeline stages
isRowMajor>::GemmKernel;
using cutlassDist = cutlass::gemm::device::GemmUniversalAdapter<cutlassDistKernel>;
if constexpr (isRowMajor) {
a = y;
b = x;
gemm_lda = ldb;
gemm_ldb = lda;
} else {
problem_size = cutlass::gemm::GemmCoord(m, n, k);
a = x;
b = y;
gemm_lda = lda;
gemm_ldb = ldb;
}
typename cutlassDist::Arguments arguments{
mode, problem_size, batch_count, epilog_op_param, a, b,
xn, // C matrix eq vector param, which here is A norm
nullptr, // tensor_Z,
(DataT*)yn, // this is broadcast vec, which is required to be non-const param
dOutput, // Output distance matrix
(int64_t)0, // batch stride A
(int64_t)0, // batch stride B
(int64_t)0, // batch stride Norm A
(int64_t)0,
(int64_t)0, // batch stride Norm B
(int64_t)0, // batch stride Output
gemm_lda, // stride A
gemm_ldb, // stride B
1, // stride A norm
0, // this is no-op for Z
0, // This must be zero
ldd // stride Output matrix
};
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = cutlassDist::get_workspace_size(arguments);
// Allocate workspace memory
rmm::device_uvector<uint8_t> workspace(workspace_size, stream);
// Instantiate CUTLASS kernel depending on templates
cutlassDist cutlassDist_op;
// Check the problem size is supported or not
RAFT_CUTLASS_TRY(cutlassDist_op.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
RAFT_CUTLASS_TRY(cutlassDist_op.initialize(arguments, workspace.data(), stream));
// Launch initialized CUTLASS kernel
RAFT_CUTLASS_TRY(cutlassDist_op(stream));
}
}; // namespace detail
}; // namespace distance
}; // namespace raft
#pragma GCC diagnostic pop
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_distance_gemm.h | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/gemm/kernel/default_gemm_universal.h>
#include <cutlass/gemm/kernel/gemm_with_fused_epilogue.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include "./pairwise_distance_epilogue.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Element type for final output
// typename ElementOutT,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Number of stages used in the pipelined mainloop
int Stages,
/// data layout row/column major of inputs
bool isRowMajor>
struct PairwiseDistanceGemm {
// This struct is specialized for fp32/3xTF32
/// Threadblock-level tile size (concept: GemmShape)
using ThreadblockShape =
cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128, K = 16
/// Warp-level tile size (concept: GemmShape)
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16
/// Warp-level tile size (concept: GemmShape)
// This code section describes the size of MMA op
using InstructionShape =
cutlass::gemm::GemmShape<16, 8, 4>; // <- MMA Op tile M = 16, N = 8, K = 4
/// Operation performed by GEMM
using Operator = cutlass::arch::OpMultiplyAddFastF32;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using OperatorClass = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using ArchTag = cutlass::arch::Sm80;
// This code section describes how threadblocks are scheduled on GPU
/// Threadblock-level swizzling operator
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
/// data layout for final output matrix.
// we keep this same layout even for column major inputs
using LayoutOutput = cutlass::layout::RowMajor;
typedef typename std::conditional<isRowMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor>::type NormXLayout;
typedef typename std::
conditional<isRowMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor>::type LayoutA_;
typedef typename std::
conditional<isRowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor>::type LayoutB_;
using GemmBase = typename DefaultGemmUniversal<ElementA_,
LayoutA_,
cutlass::ComplexTransform::kNone,
kAlignmentA,
ElementB_,
LayoutB_,
cutlass::ComplexTransform::kNone,
kAlignmentB,
ElementC_,
LayoutOutput,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::PairwiseDistanceEpilogue<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementAccumulator,
typename EpilogueOutputOp::ElementT,
ElementAccumulator,
EpilogueOutputOp,
NormXLayout,
GemmBase::Epilogue::kElementsPerAccess>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = GemmWithFusedEpilogue<typename GemmBase::Mma, Epilogue, ThreadblockSwizzle>;
};
template <
/// Layout type for A matrix operand
int kAlignmentA,
/// Layout type for B matrix operand
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Number of stages used in the pipelined mainloop
int Stages,
/// data layout row/column major of inputs
bool isRowMajor>
struct PairwiseDistanceGemm<double,
kAlignmentA,
double,
kAlignmentB,
ElementC_,
ElementAccumulator,
EpilogueOutputOp,
Stages,
isRowMajor> {
// using Transform = cutlass::ComplexTransform::kNone;
// Threadblock-level tile size (concept: GemmShape)
using ThreadblockShape =
cutlass::gemm::GemmShape<64, 64, 16>; // <- threadblock tile M = 64, N = 64, K = 16
/// Warp-level tile size (concept: GemmShape)
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>; // <- warp tile M = 32, N = 32, K = 16
/// Warp-level tile size (concept: GemmShape)
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
// Operation performed by GEMM
using Operator = cutlass::arch::OpMultiplyAdd;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using OperatorClass = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using ArchTag = cutlass::arch::Sm80;
// This code section describes how threadblocks are scheduled on GPU
/// Threadblock-level swizzling operator
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
/// data layout for final output matrix.
// we keep this same layout even for column major inputs
using LayoutOutput = cutlass::layout::RowMajor;
typedef typename std::conditional<isRowMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor>::type NormXLayout;
typedef typename std::
conditional<isRowMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor>::type LayoutA_;
typedef typename std::
conditional<isRowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor>::type LayoutB_;
using GemmBase = typename DefaultGemmUniversal<double,
LayoutA_,
cutlass::ComplexTransform::kNone,
1,
double,
LayoutB_,
cutlass::ComplexTransform::kNone,
1,
ElementC_,
LayoutOutput,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::PairwiseDistanceEpilogue<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementC_,
typename EpilogueOutputOp::ElementT,
ElementC_,
EpilogueOutputOp,
NormXLayout,
GemmBase::Epilogue::kElementsPerAccess>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = GemmWithFusedEpilogue<typename GemmBase::Mma, Epilogue, ThreadblockSwizzle>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass | 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/fused_l2_nn.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef> // size_t
#include <limits> // std::numeric_limits
#include <raft/core/kvp.hpp> // raft::KeyValuePair
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/distance/detail/distance_ops/l2_exp.cuh> // ops::l2_exp_distance_op
#include <raft/distance/detail/fused_distance_nn/cutlass_base.cuh>
#include <raft/distance/detail/pairwise_distance_base.cuh> // PairwiseDistances
#include <raft/linalg/contractions.cuh> // Policy
#include <raft/util/arch.cuh> // raft::util::arch::SM_*
#include <raft/util/cuda_utils.cuh> // raft::ceildiv, raft::shfl
namespace raft {
namespace distance {
namespace detail {
template <typename LabelT, typename DataT>
struct KVPMinReduceImpl {
typedef raft::KeyValuePair<LabelT, DataT> KVP;
DI KVP operator()(LabelT rit, const KVP& a, const KVP& b) { return b.value < a.value ? b : a; }
DI KVP operator()(const KVP& a, const KVP& b) { return b.value < a.value ? b : a; }
}; // KVPMinReduce
template <typename LabelT, typename DataT>
struct MinAndDistanceReduceOpImpl {
typedef typename raft::KeyValuePair<LabelT, DataT> KVP;
DI void operator()(LabelT rid, KVP* out, const KVP& other) const
{
if (other.value < out->value) {
out->key = other.key;
out->value = other.value;
}
}
DI void operator()(LabelT rid, DataT* out, const KVP& other) const
{
if (other.value < *out) { *out = other.value; }
}
DI void operator()(LabelT rid, DataT* out, const DataT& other) const
{
if (other < *out) { *out = other; }
}
DI void init(DataT* out, DataT maxVal) const { *out = maxVal; }
DI void init(KVP* out, DataT maxVal) const { out->value = maxVal; }
DI void init_key(DataT& out, LabelT idx) const { return; }
DI void init_key(KVP& out, LabelT idx) const { out.key = idx; }
DI DataT get_value(KVP& out) const
{
return out.value;
;
}
DI DataT get_value(DataT& out) const { return out; }
};
template <typename LabelT, typename DataT>
struct MinReduceOpImpl {
typedef typename raft::KeyValuePair<LabelT, DataT> KVP;
DI void operator()(LabelT rid, DataT* out, const KVP& other)
{
if (other.value < *out) { *out = other.value; }
}
DI void init(DataT* out, DataT maxVal) { *out = maxVal; }
};
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT>
RAFT_KERNEL initKernel(OutT* min, IdxT m, DataT maxVal, ReduceOpT redOp)
{
auto tid = IdxT(blockIdx.x) * blockDim.x + threadIdx.x;
if (tid < m) { redOp.init(min + tid, maxVal); }
}
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT>
void initialize(OutT* min, IdxT m, DataT maxVal, ReduceOpT redOp, cudaStream_t stream)
{
auto blks = raft::ceildiv(m, 256);
initKernel<DataT, OutT, IdxT><<<blks, 256, 0, stream>>>(min, m, maxVal, redOp);
}
// TODO: specialize this function for MinAndDistanceReduceOp<int, float>
// with atomicCAS of 64 bit which will eliminate mutex and shfls
template <typename P, typename OutT, typename IdxT, typename KVPair, typename ReduceOpT>
DI void updateReducedVal(
int* mutex, OutT* min, KVPair* val, ReduceOpT red_op, IdxT m, IdxT gridStrideY)
{
const auto lid = threadIdx.x % raft::WarpSize;
const auto accrowid = threadIdx.x / P::AccThCols;
// Update each output row in order within a warp. This will resolve hang
// issues with pre-Volta architectures
#pragma unroll
for (int j = 0; j < (raft::WarpSize / P::AccThCols); j++) {
if (lid == j * P::AccThCols) {
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
auto rid = gridStrideY + accrowid + i * P::AccThRows;
if (rid < m) {
auto value = val[i];
while (atomicCAS(mutex + rid, 0, 1) == 1)
;
__threadfence();
red_op(rid, min + rid, value);
__threadfence();
atomicCAS(mutex + rid, 1, 0);
}
}
}
}
}
template <typename DataT,
typename OutT,
typename IdxT,
typename P,
typename ReduceOpT,
typename KVPReduceOpT,
typename OpT,
typename FinalLambda>
__launch_bounds__(P::Nthreads, 2) RAFT_KERNEL fusedL2NNkernel(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
DataT maxVal,
int* mutex,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
OpT distance_op,
FinalLambda fin_op)
{
// compile only if below non-ampere arch.
#if __CUDA_ARCH__ < 800
extern __shared__ char smem[];
typedef KeyValuePair<IdxT, DataT> KVPair;
KVPair val[P::AccRowsPerTh];
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
val[i] = {0, maxVal};
}
// epilogue operation lambda for final value calculation
auto epilog_lambda = [n, pairRedOp, &val, maxVal] __device__(
DataT acc[P::AccRowsPerTh][P::AccColsPerTh],
DataT * regxn,
DataT * regyn,
IdxT gridStrideX,
IdxT gridStrideY) {
KVPReduceOpT pairRed_op(pairRedOp);
// intra thread reduce
const auto acccolid = threadIdx.x % P::AccThCols;
const auto accrowid = threadIdx.x / P::AccThCols;
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
auto tmpkey = acccolid + j * P::AccThCols + gridStrideX;
KVPair tmp = {tmpkey, acc[i][j]};
if (tmpkey < n) {
val[i] = pairRed_op(accrowid + i * P::AccThRows + gridStrideY, tmp, val[i]);
}
}
}
};
auto rowEpilog_lambda =
[m, mutex, min, pairRedOp, redOp, &val, maxVal] __device__(IdxT gridStrideY) {
KVPReduceOpT pairRed_op(pairRedOp);
ReduceOpT red_op(redOp);
const auto accrowid = threadIdx.x / P::AccThCols;
const auto lid = raft::laneId();
// reduce
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = P::AccThCols / 2; j > 0; j >>= 1) {
// Actually, the srcLane (lid +j) should be (lid +j) % P:AccThCols,
// but the shfl op applies the modulo internally.
auto tmpkey = raft::shfl(val[i].key, lid + j, P::AccThCols);
auto tmpvalue = raft::shfl(val[i].value, lid + j, P::AccThCols);
KVPair tmp = {tmpkey, tmpvalue};
val[i] = pairRed_op(accrowid + i * P::AccThRows + gridStrideY, tmp, val[i]);
}
}
updateReducedVal<P, OutT, IdxT, KVPair, ReduceOpT>(mutex, min, val, red_op, m, gridStrideY);
// reset the val array.
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
val[i] = {0, maxVal};
}
};
IdxT lda = k, ldb = k, ldd = n;
constexpr bool row_major = true;
constexpr bool write_out = false;
PairwiseDistances<DataT,
DataT, // OutT (unused in PairwiseDistances)
IdxT,
P,
decltype(distance_op),
decltype(epilog_lambda),
FinalLambda,
decltype(rowEpilog_lambda),
row_major,
write_out>
obj(x,
y,
m,
n,
k,
lda,
ldb,
ldd,
xn,
yn,
nullptr, // Output pointer
smem,
distance_op,
epilog_lambda,
fin_op,
rowEpilog_lambda);
obj.run();
#endif
}
// cg::reduce functor for FusedDistanceNN used in its cutlass version
// to output the min distance value & key(loc id).
// This is used in fused_distance_nn/predicated_tile_iterator_reduced_vec.h
// store_with_byte_offset() passed to cg::reduce() & select_reduce.
template <typename AccType, typename Index, typename OutType>
struct kvp_cg_min_reduce_op {
typedef typename raft::KeyValuePair<Index, AccType> KVP;
__host__ __device__ kvp_cg_min_reduce_op() noexcept {};
using AccTypeT = AccType;
using IndexT = Index;
// functor signature.
__host__ __device__ KVP operator()(KVP a, KVP b) const { return a.value < b.value ? a : b; }
__host__ __device__ AccType operator()(AccType a, AccType b) const { return min(a, b); }
__host__ __device__ bool isAmin(AccType a, AccType b) const { return a < b ? true : false; }
};
template <typename DataT,
typename OutT,
typename IdxT,
typename Policy,
typename ReduceOpT,
typename KVPReduceOpT>
void fusedL2NNImpl(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
int* workspace,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
bool sqrt,
bool initOutBuffer,
cudaStream_t stream)
{
// The kernel policy is determined by fusedL2NN.
typedef Policy P;
dim3 blk(P::Nthreads);
auto nblks = raft::ceildiv<int>(m, P::Nthreads);
constexpr auto maxVal = std::numeric_limits<DataT>::max();
typedef KeyValuePair<IdxT, DataT> KVPair;
RAFT_CUDA_TRY(cudaMemsetAsync(workspace, 0, sizeof(int) * m, stream));
if (initOutBuffer) {
initKernel<DataT, OutT, IdxT, ReduceOpT>
<<<nblks, P::Nthreads, 0, stream>>>(min, m, maxVal, redOp);
RAFT_CUDA_TRY(cudaGetLastError());
}
namespace arch = raft::util::arch;
using AccT = DataT;
ops::l2_exp_distance_op<DataT, AccT, IdxT> distance_op{sqrt};
raft::identity_op fin_op{};
auto kernel = fusedL2NNkernel<DataT,
OutT,
IdxT,
P,
ReduceOpT,
KVPReduceOpT,
decltype(distance_op),
decltype(fin_op)>;
// Get pointer to fp32 SIMT kernel to determine the best compute architecture
// out of all for which the kernel was compiled for that matches closely
// to the current device. Other methods to determine the architecture (that do not
// require a pointer) can be error prone. See:
// https://github.com/NVIDIA/cub/issues/545
void* kernel_ptr = reinterpret_cast<void*>(kernel);
auto runtime_arch = arch::kernel_virtual_arch(kernel_ptr);
auto cutlass_range = arch::SM_range(arch::SM_80(), arch::SM_future());
if (cutlass_range.contains(runtime_arch)) {
// If device is SM_80 or later, use CUTLASS-based kernel.
using L2Op = raft::distance::detail::ops::l2_exp_cutlass_op<DataT, DataT>;
using kvp_cg_min_reduce_op_ = kvp_cg_min_reduce_op<DataT, IdxT, OutT>;
kvp_cg_min_reduce_op_ cg_reduce_op;
L2Op L2_dist_op(sqrt);
IdxT lda, ldb, ldd;
lda = k, ldb = k, ldd = n;
cutlassFusedDistanceNN<DataT,
DataT,
OutT,
IdxT,
P::Veclen,
kvp_cg_min_reduce_op_,
L2Op,
ReduceOpT,
KVPReduceOpT>(x,
y,
xn,
yn,
m,
n,
k,
lda,
ldb,
ldd,
min,
workspace,
cg_reduce_op,
L2_dist_op,
redOp,
pairRedOp,
stream);
} else {
// If device less than SM_80, use fp32 SIMT kernel.
constexpr size_t shmemSize = P::SmemSize + ((P::Mblk + P::Nblk) * sizeof(DataT));
dim3 grid = launchConfigGenerator<P>(m, n, shmemSize, kernel);
kernel<<<grid, blk, shmemSize, stream>>>(
min, x, y, xn, yn, m, n, k, maxVal, workspace, redOp, pairRedOp, distance_op, fin_op);
RAFT_CUDA_TRY(cudaGetLastError());
}
}
} // namespace detail
} // namespace distance
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/masked_nn.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <limits>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/device_memory_resource.hpp>
#include <stdint.h>
#include <raft/distance/detail/compress_to_bits.cuh>
#include <raft/distance/detail/fused_l2_nn.cuh>
#include <raft/distance/detail/masked_distance_base.cuh>
#include <raft/linalg/contractions.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace distance {
namespace detail {
template <typename DataT,
typename OutT,
typename IdxT,
typename P,
typename ReduceOpT,
typename KVPReduceOpT,
typename CoreLambda,
typename FinalLambda>
__launch_bounds__(P::Nthreads, 2) RAFT_KERNEL masked_l2_nn_kernel(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
const uint64_t* adj,
const IdxT* group_idxs,
IdxT num_groups,
IdxT m,
IdxT n,
IdxT k,
bool sqrt,
DataT maxVal,
int* mutex,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
CoreLambda core_op,
FinalLambda fin_op)
{
extern __shared__ char smem[];
typedef raft::KeyValuePair<IdxT, DataT> KVPair;
KVPair val[P::AccRowsPerTh];
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
val[i] = {-1, maxVal};
}
// epilogue operation lambda for final value calculation
auto epilog_lambda = [pairRedOp, &val, maxVal, sqrt] __device__(
DataT acc[P::AccRowsPerTh][P::AccColsPerTh],
int thread_adj,
DataT* regxn,
DataT* regyn,
IdxT tile_idx_n,
IdxT tile_idx_m,
IdxT tile_end_n) {
KVPReduceOpT pairRed_op(pairRedOp);
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
acc[i][j] = regxn[i] + regyn[j] - (DataT)2.0 * acc[i][j];
}
}
if (sqrt) {
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
acc[i][j] = raft::sqrt(acc[i][j]);
}
}
}
// intra thread reduce
const auto acccolid = threadIdx.x % P::AccThCols;
const auto accrowid = threadIdx.x / P::AccThCols;
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
// thread_adj is a bitfield that contains a 1 at location i iff we must
// compute row i of acc (the accumulator register tile). It is described in
// more detail in the maskedDistances.run() method.
const bool ignore = (thread_adj & (1 << i)) == 0;
if (ignore) { continue; }
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
auto tmpkey = acccolid + j * P::AccThCols + tile_idx_n;
if (tile_end_n <= tmpkey) {
// Do not process beyond end of tile.
continue;
}
KVPair tmp = {tmpkey, acc[i][j]};
if (tmpkey < tile_end_n) {
val[i] = pairRed_op(accrowid + i * P::AccThRows + tile_idx_m, tmp, val[i]);
}
}
}
};
auto rowEpilog_lambda =
[m, mutex, min, pairRedOp, redOp, &val, maxVal] __device__(IdxT tile_idx_m) {
KVPReduceOpT pairRed_op(pairRedOp);
ReduceOpT red_op(redOp);
const auto accrowid = threadIdx.x / P::AccThCols;
const auto lid = raft::laneId();
// reduce
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = P::AccThCols / 2; j > 0; j >>= 1) {
auto tmpkey = raft::shfl(val[i].key, lid + j);
auto tmpvalue = raft::shfl(val[i].value, lid + j);
KVPair tmp = {tmpkey, tmpvalue};
val[i] = pairRed_op(accrowid + i * P::AccThRows + tile_idx_m, tmp, val[i]);
}
}
updateReducedVal<P, OutT, IdxT, KVPair, ReduceOpT>(mutex, min, val, red_op, m, tile_idx_m);
// reset the val array.
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
val[i] = {-1, maxVal};
}
};
IdxT lda = k, ldb = k, ldd = n;
MaskedDistances<true,
DataT,
DataT,
IdxT,
P,
CoreLambda,
decltype(epilog_lambda),
FinalLambda,
decltype(rowEpilog_lambda),
true>
obj(x,
y,
m,
n,
k,
lda,
ldb,
ldd,
xn,
yn,
adj,
group_idxs,
num_groups,
smem,
core_op,
epilog_lambda,
fin_op,
rowEpilog_lambda);
obj.run();
}
/**
* @brief Wrapper for masked_l2_nn_kernel
*
* Responsibilities:
* - Allocate (and initialize) workspace memory for:
* - mutexes used in nearest neighbor update step
* - adjacency matrix bitfield
* - Compress adjacency matrix to bitfield
* - Initialize output buffer (conditional on `initOutBuffer`)
* - Specify core and final operations for the L2 norm
* - Determine optimal launch configuration for kernel.
* - Launch kernel and check for errors.
*
* @tparam DataT Input data-type (for x and y matrices).
* @tparam OutT Output data-type (for key-value pairs).
* @tparam IdxT Index data-type.
* @tparam ReduceOpT A struct to perform the final needed reduction
* operation and also to initialize the output array
* elements with the appropriate initial value needed for
* reduction.
* @tparam KVPReduceOpT Type of Reduction operation on key value pairs.
*
* @param handle RAFT handle for managing expensive resources
* @param[out] out Will contain reduced output (nn key-value pairs)
* @param[in] x First matrix. Row major. Dim = `m x k`. (on device)
* @param[in] y Second matrix. Row major. Dim = `n x k`. (on device)
* @param[in] xn L2 squared norm of `x`. Length = `m`.
* @param[in] yn L2 squared norm of `y`. Length = `n`.
* @param[in] adj A boolean adjacency matrix indicating for each
* row of `x` and each group in `y` whether to compute the
* distance. Dim = `m x num_groups`.
* @param[in] group_idxs An array containing the *end* indices of each group
* in `y`. The value of group_idxs[j] indicates the
* start of group j + 1, i.e., it is the inclusive
* scan of the group lengths. The first group is
* always assumed to start at index 0 and the last
* group typically ends at index `n`. Length =
* `num_groups`.
* @param[in] num_groups Length of `group_idxs`.
* @param m Rows of `x`.
* @param n Rows of `y`.
* @param k Cols of `x` and `y`.
* @param redOp Reduction operator in the epilogue
* @param pairRedOp Reduction operation on key value pairs
* @param sqrt Whether to compute the squared or actual (i.e. sqrt) L2 norm.
* @param initOutBuffer Whether to initialize the output buffer
*
*
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT, typename KVPReduceOpT>
void masked_l2_nn_impl(raft::resources const& handle,
OutT* out,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
const bool* adj,
const IdxT* group_idxs,
IdxT num_groups,
IdxT m,
IdxT n,
IdxT k,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
bool sqrt,
bool initOutBuffer)
{
typedef typename linalg::Policy4x4<DataT, 1>::Policy P;
static_assert(P::Mblk == 64, "masked_l2_nn_impl only supports a policy with 64 rows per block.");
// Get stream and workspace memory resource
rmm::mr::device_memory_resource* ws_mr =
dynamic_cast<rmm::mr::device_memory_resource*>(resource::get_workspace_resource(handle));
auto stream = resource::get_cuda_stream(handle);
// Acquire temporary buffers and initialize to zero:
// 1) Adjacency matrix bitfield
// 2) Workspace for fused nearest neighbor operation
size_t m_div_64 = raft::ceildiv(m, IdxT(64));
rmm::device_uvector<uint64_t> ws_adj64{m_div_64 * num_groups, stream, ws_mr};
rmm::device_uvector<int> ws_fused_nn{size_t(m), stream, ws_mr};
RAFT_CUDA_TRY(cudaMemsetAsync(ws_adj64.data(), 0, ws_adj64.size() * sizeof(uint64_t), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(ws_fused_nn.data(), 0, ws_fused_nn.size() * sizeof(int), stream));
// Compress boolean adjacency matrix to bitfield.
auto adj_view = raft::make_device_matrix_view<const bool, int>(adj, m, num_groups);
auto adj64_view =
raft::make_device_matrix_view<uint64_t, int>(ws_adj64.data(), m_div_64, num_groups);
compress_to_bits(handle, adj_view, adj64_view);
// Initialize output buffer with keyvalue pairs as determined by the reduction
// operator (it will be called with maxVal).
constexpr auto maxVal = std::numeric_limits<DataT>::max();
if (initOutBuffer) {
dim3 grid(raft::ceildiv<int>(m, P::Nthreads));
dim3 block(P::Nthreads);
initKernel<DataT, OutT, IdxT, ReduceOpT><<<grid, block, 0, stream>>>(out, m, maxVal, redOp);
RAFT_CUDA_TRY(cudaGetLastError());
}
// Accumulation operation lambda
auto core_lambda = [] __device__(DataT & acc, DataT & x, DataT & y) { acc += x * y; };
auto fin_op = raft::identity_op{};
auto kernel = masked_l2_nn_kernel<DataT,
OutT,
IdxT,
P,
ReduceOpT,
KVPReduceOpT,
decltype(core_lambda),
decltype(fin_op)>;
constexpr size_t smemSize = P::SmemSize + ((P::Mblk + P::Nblk) * sizeof(DataT));
dim3 block(P::Nthreads);
dim3 grid = launchConfigGenerator<P>(m, n, smemSize, kernel);
kernel<<<grid, block, smemSize, stream>>>(out,
x,
y,
xn,
yn,
ws_adj64.data(),
group_idxs,
num_groups,
m,
n,
k,
sqrt,
maxVal,
ws_fused_nn.data(),
redOp,
pairRedOp,
core_lambda,
fin_op);
RAFT_CUDA_TRY(cudaGetLastError());
}
} // namespace detail
} // namespace distance
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_distance_epilogue_elementwise.h | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
/*! \file
\brief Functor performing distance operations used by epilogues of pairwise distance
* kernels.
* This is adapted from LinearCombinationBiasElementwise from CUTLASS 2.9.0
* customized for applying elementwise distance formula on accumulated GEMM value
* and applying user-defined final custom operation on the distance value.
*/
#pragma once
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/functional.h>
#include <cutlass/numeric_conversion.h>
#include <cutlass/numeric_types.h>
#include <cutlass/epilogue/thread/activation.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
typename DistanceOp_,
typename FinalOp_>
class PairwiseDistanceEpilogueElementwise {
public:
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using DistanceOp = DistanceOp_;
using FinalOp = FinalOp_;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
using FragmentOutput = FragmentZ;
static bool const kIsHeavy = false; // ElementwiseOp::kIsHeavy;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = false; // We don't store anything in Z,
/// If true, the 'T' tensor is stored
static bool const kStoreT = true; // this is our final output storage.
/// Host-constructable parameters structure
struct Params {
FinalOp_ final_op_;
DistanceOp_ dist_op_;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(DistanceOp_ dist_op, FinalOp final_op) : final_op_(final_op), dist_op_(dist_op) {}
CUTLASS_HOST_DEVICE
Params() {}
};
private:
//
// Data members
//
FinalOp_ final_op;
DistanceOp_ elementwise_op;
public:
//
// Methods
//
/// Constructor from Params
CUTLASS_HOST_DEVICE
PairwiseDistanceEpilogueElementwise(Params const& params)
: final_op(params.final_op_), elementwise_op(params.dist_op_)
{
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const
{
// we use for making sure C matrix path is used for A mat norm.
return true;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(FragmentZ& frag_Z,
FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentC const& frag_C,
FragmentCompute const& V) const
{
FragmentCompute tmp_Accum =
NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_C =
NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C);
FragmentCompute result_Z;
FragmentCompute result_T;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
result_Z[i] = elementwise_op(tmp_C[i], V[i], tmp_Accum[i]);
result_T[i] = final_op(result_Z[i], 0);
}
NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t;
frag_T = convert_t(result_T);
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(FragmentZ& frag_Z,
FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentCompute const& V) const
{
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/predicated_tile_iterator_normvec.h | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This file contains a customized version of PredicatedTileIterator from CUTLASS 2.9.0
(https://github.com/NVIDIA/cutlass/blob/v2.9.0/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h#L75)
Changes:
- added `Layout_` template param
- Only the row index is used to load the data in load_with_byte_offset().
This way the same normalization data is used across all columns in a row.
*/
#pragma once
#include <cutlass/arch/arch.h>
#include <cutlass/arch/memory.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/epilogue/threadblock/output_tile_thread_map.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator_params.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_ref.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
typename Layout_,
bool ScatterD = false, ///< Scatter D operand or not
bool UseCUDAStore = false>
class PredicatedTileIteratorNormVec {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
static_assert(ThreadMap::Iterations::kCluster > 0, "ThreadMap::Iterations::kCluster must be > 0");
static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<Element,
ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>())
{
}
CUTLASS_HOST_DEVICE
Params(Base const& base) : Base(base) {}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() { enable(); }
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Byte-level pointer
uint8_t* byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
/// Scatter indices
int const* indices_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorNormVec(PredicatedTileIteratorParams const& params,
Element* pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord(),
int const* indices = nullptr)
: params_(params), indices_(indices)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] =
((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column());
}
// Null pointer performs no accesses
if (!pointer) { mask_.clear(); }
if (ScatterD && !indices) { mask_.clear(); }
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.row()) * LongIndex(params_.stride);
if (ScatterD) {
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset)
{
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment& frag, int64_t byte_offset) const
{
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType* memory_pointer = reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
if (ScatterD && row_guard) {
assert(indices_);
memory_pointer = reinterpret_cast<AccessType*>(
byte_pointer + byte_offset +
LongIndex(indices_[row_offset + thread_start_row_]) * LongIndex(params_.stride));
}
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
if (column == 0) {
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer[0],
guard);
} else {
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column] =
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn];
}
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) { byte_pointer += params_.increment_row; }
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; }
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment& frag) const { load_with_byte_offset(frag, 0); }
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) const
{
uint8_t* byte_pointer = byte_pointer_;
AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType* memory_pointer = reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
if (ScatterD && row_guard) {
assert(indices_);
memory_pointer = reinterpret_cast<AccessType*>(
byte_pointer + byte_offset +
LongIndex(indices_[row_offset + thread_start_row_]) * LongIndex(params_.stride));
}
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
if (UseCUDAStore) {
if (guard) {
memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess] =
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column];
}
} else {
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) { byte_pointer += params_.increment_row; }
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; }
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const& frag) const { store_with_byte_offset(frag, 0); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void downsample_load_with_byte_offset(Fragment& frag,
int64_t byte_offset,
int convolution_P,
int convolution_Q,
int add_P,
int add_Q,
int problem_N) const
{
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
int output_row = row_offset + thread_start_row_;
int output_N = output_row / (convolution_P * convolution_Q);
int output_PQ = output_row % (convolution_P * convolution_Q);
int output_P = output_PQ / convolution_Q;
int output_Q = output_PQ % convolution_Q;
int input_row = output_N * 2 * convolution_P * 2 * convolution_Q +
(2 * output_P + add_P) * 2 * convolution_Q + 2 * output_Q + add_Q;
int64_t byte_offset = (input_row - output_row) * problem_N * sizeof(float);
AccessType* memory_pointer = reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; }
}
if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; }
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void upsample_load_with_byte_offset(Fragment& frag,
int64_t byte_offset,
int convolution_P,
int convolution_Q,
int add_P,
int add_Q,
int problem_N) const
{
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
int output_row = row_offset + thread_start_row_;
int output_N = output_row / (convolution_P * convolution_Q);
int output_PQ = output_row % (convolution_P * convolution_Q);
int output_P = output_PQ / convolution_Q;
int output_Q = output_PQ % convolution_Q;
int row_add_P = add_P;
int row_add_Q = add_Q;
if (output_P > convolution_P - 2) row_add_P = 0;
if (output_Q > convolution_Q - 2) row_add_Q = 0;
int input_row = output_N * (convolution_P / 2) * (convolution_Q / 2) +
((output_P + row_add_P) / 2) * (convolution_Q / 2) +
(output_Q + row_add_Q) / 2;
int64_t byte_offset = (input_row - output_row) * problem_N * sizeof(float);
AccessType* memory_pointer = reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; }
}
if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; }
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
CUTLASS_DEVICE
MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const { return thread_start_row_; }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const { return thread_start_column_; }
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const { return extent_row_; }
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const { return extent_column_; }
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorNormVec& operator++()
{
++state_[0];
if (!ScatterD) { byte_pointer_ += params_.advance_row; }
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ +=
(ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; }
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_distance_epilogue.h | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This is adapted from DefaultEpilogueWithBroadcastTensorOp from CUTLASS 2.9.0
(https://github.com/NVIDIA/cutlass/blob/master/include/cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h#L75)
This epilogue allows us to load norm buffers using PredicatedTileIteratorNormVec
and EpilogueWithBroadcast used for distances L2/cosine as well as applies user-define elementwise
operation.
-- A norm load is provided PredicatedTileIteratorNormVec
-- B norm load is provided by EpilogueWithBroadcast
-- elementwise operation is provided by OutputOp
*/
#pragma once
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/numeric_types.h>
#include <cutlass/gemm/gemm.h>
#include "./predicated_tile_iterator_normvec.h"
#include <cutlass/epilogue/threadblock/default_epilogue_tensor_op.h>
#include <cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h>
#include <cutlass/epilogue/threadblock/epilogue.h>
#include <cutlass/epilogue/threadblock/epilogue_with_broadcast.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
typename LayoutT,
int ElementsPerAccess,
bool ScatterD = false>
struct PairwiseDistanceEpilogue {
/// Use defaults related to the existing epilogue
using Base =
DefaultEpilogueTensorOp<Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess>;
//
// Stores the result z = (y = GEMM(A, B, C), broadcast)
//
using OutputTileIterator = cutlass::epilogue::threadblock::
PredicatedTileIteratorNormVec<typename Base::OutputTileThreadMap, ElementOutput, LayoutT>;
//
// Additional tensor tile iterator - stores t = Elementwise(z)
//
using TensorTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<typename Base::OutputTileThreadMap,
ElementTensor>;
/// Define the epilogue
using Epilogue = EpilogueWithBroadcast<Shape,
WarpMmaTensorOp,
PartitionsK,
OutputTileIterator,
TensorTileIterator,
ElementVector,
typename Base::AccumulatorFragmentIterator,
typename Base::WarpTileIterator,
typename Base::SharedLoadIterator,
OutputOp,
typename Base::Padding,
Base::kFragmentsPerIteration>;
};
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_distance_base.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/contractions.cuh> // raft::linalg::Contractions_NT
#include <raft/util/cuda_dev_essentials.cuh> // ceildiv
#include <raft/util/cuda_rt_essentials.hpp> // RAFT_CUDA_TRY
#include <cstddef> // size_t
namespace raft {
namespace distance {
namespace detail {
/**
* @brief Device class for L1, L2 and cosine distance metrics.
* @tparam DataT input data-type (for A and B matrices)
* @tparam AccT accumulation data-type
* @tparam OutT output data-type (for C and D matrices)
* @tparam IdxT index data-type
* @tparam Policy struct which tunes the Contraction kernel
* @tparam OpT A distance operation, e.g., cosine_distance_op.
* @tparam EpilogueLambda applies an elementwise function to compute final
values. Its signature is:
template <typename AccT, typename DataT> void epilogue_lambda
(AccT acc[][], DataT* regxn, DataT* regyn);
* @tparam FinalLambda the final lambda called on final distance value
* @param[in] x input matrix
* @param[in] y input matrix
* @param[in] m number of rows of A and C/D
* @param[in] n number of columns of B and C/D
* @param[in] k number of cols of A and rows of B
* @param[in] lda leading dimension of A
* @param[in] ldb leading dimension of B
* @param[in] ldd leading dimension of C/D
* @param[in] xn row norms of input matrix A. Required for expanded L2, cosine
* @param[in] yn row norms of input matrix B. Required for expanded L2, cosine
* @param[output] pD output matrix
* @param[in] smem shared mem buffer for intermediate storage of A, B, xn & yn.
* @param distance_op the distance operation, e.g. cosine_distance_op
* @param epilog_op the epilog operation lambda
* @param fin_op the final gemm epilogue lambda
* @param rowEpilog_op epilog lambda that executes when a full row has been processed
*/
template <typename DataT,
typename OutT,
typename IdxT,
typename Policy,
typename OpT,
typename EpilogueLambda,
typename FinalLambda,
typename rowEpilogueLambda,
bool isRowMajor = true,
bool writeOut = true,
typename BaseClass = raft::linalg::Contractions_NT<DataT, IdxT, Policy, isRowMajor>>
struct PairwiseDistances : public BaseClass {
// Get accumulation type from distance_op
using AccT = typename OpT::AccT;
private:
typedef Policy P;
const DataT* xn;
const DataT* yn;
const DataT* const yBase;
OutT* dOutput;
char* smem;
OpT distance_op;
EpilogueLambda epilog_op;
FinalLambda fin_op;
rowEpilogueLambda rowEpilog_op;
const IdxT grid_stride_m;
const IdxT grid_stride_n;
const IdxT grid_offset_m;
const IdxT grid_offset_n;
AccT acc[P::AccRowsPerTh][P::AccColsPerTh];
public:
// Constructor
DI PairwiseDistances(const DataT* _x,
const DataT* _y,
IdxT _m,
IdxT _n,
IdxT _k,
IdxT _lda,
IdxT _ldb,
IdxT _ldd,
const DataT* _xn,
const DataT* _yn,
OutT* _dOutput,
char* _smem,
OpT _distance_op,
EpilogueLambda _epilog_op,
FinalLambda _fin_op,
rowEpilogueLambda _rowEpilog_op)
: BaseClass(_x, _y, _m, _n, _k, _lda, _ldb, _ldd, _smem),
xn(_xn),
yn(_yn),
yBase(_y),
dOutput(_dOutput),
smem(_smem),
distance_op(_distance_op),
epilog_op(_epilog_op),
fin_op(_fin_op),
rowEpilog_op(_rowEpilog_op),
grid_stride_m(P::Mblk * gridDim.y),
grid_stride_n(P::Nblk * gridDim.x),
grid_offset_m(P::Mblk * blockIdx.y),
grid_offset_n(P::Nblk * blockIdx.x)
{
}
DI void run()
{
for (auto tile_idx_m = grid_offset_m; tile_idx_m < this->m; tile_idx_m += grid_stride_m) {
this->ldgXY(tile_idx_m, grid_offset_n, 0);
for (auto tile_idx_n = grid_offset_n; tile_idx_n < this->n; tile_idx_n += grid_stride_n) {
// Prolog:
reset_accumulator();
this->stsXY();
__syncthreads();
this->switch_write_buffer();
// Main loop:
for (int kidx = P::Kblk; kidx < this->k; kidx += P::Kblk) {
this->ldgXY(tile_idx_m, tile_idx_n, kidx);
// Process all data in shared memory (previous k-block) and
// accumulate in registers.
accumulate();
this->stsXY();
__syncthreads();
this->switch_write_buffer();
this->switch_read_buffer();
}
accumulate(); // last iteration
// The pre-condition for the loop over tile_idx_n is that write_buffer
// and read_buffer point to the same buffer. This flips read_buffer back
// so that it satisfies the pre-condition of this loop.
this->switch_read_buffer();
// Epilog:
if (distance_op.use_norms) {
DataT regxn[P::AccRowsPerTh], regyn[P::AccColsPerTh];
load_norms(tile_idx_m, tile_idx_n, regxn, regyn);
// Overlap ldg with epilog computation
ldgNextGridStride(tile_idx_m, tile_idx_n);
// Calculate distance_op epilog.
// Use .template to disambiguate (See:
// https://en.cppreference.com/w/cpp/language/dependent_name)
distance_op.template epilog<Policy>(acc, regxn, regyn, tile_idx_n, tile_idx_m);
// And any possible additional epilogs
epilog_op(acc, regxn, regyn, tile_idx_n, tile_idx_m);
} else {
// Overlap ldg with epilog computation
ldgNextGridStride(tile_idx_m, tile_idx_n);
// Calculate distance_op epilog.
// Use .template to disambiguate (See:
// https://en.cppreference.com/w/cpp/language/dependent_name)
distance_op.template epilog<Policy>(acc, nullptr, nullptr, tile_idx_n, tile_idx_m);
// And any possible additional epilogs
epilog_op(acc, nullptr, nullptr, tile_idx_n, tile_idx_m);
}
if (writeOut) { store_output(tile_idx_m, tile_idx_n); }
}
rowEpilog_op(tile_idx_m);
}
}
private:
DI void ldgNextGridStride(IdxT tile_idx_m, IdxT tile_idx_n)
{
// Fetch next grid stride ldg if within range
const auto next_tile_tile_idx_n = tile_idx_n + grid_stride_n;
const auto next_tile_tile_idx_m = tile_idx_m + grid_stride_m;
if ((next_tile_tile_idx_n) < this->n) {
this->ldgXY(tile_idx_m, next_tile_tile_idx_n, 0);
} else if ((next_tile_tile_idx_m) < this->m) {
this->ldgXY(next_tile_tile_idx_m, grid_offset_n, 0);
}
}
DI void reset_accumulator()
{
// Reset accumulator registers to zero.
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
acc[i][j] = BaseClass::Zero;
}
}
}
DI void accumulate_reg_tile(DataT (®_x)[P::AccRowsPerTh][P::Veclen],
DataT (®_y)[P::AccColsPerTh][P::Veclen])
{
#pragma unroll
for (int v = 0; v < P::Veclen; ++v) {
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
distance_op.core(acc[i][j], reg_x[i][v], reg_y[j][v]);
}
}
}
}
DI void accumulate()
{
// We have a separate ldsXY and accumulate_reg_tile outside the loop body,
// so that these separated calls can be interspersed with preceding and
// following instructions, thereby hiding latency.
this->ldsXY(0);
// If expensive inner loop, do not unroll loop.
constexpr int num_iterations = P::Kblk / P::Veclen - 1;
constexpr int unroll_count = decltype(distance_op)::expensive_inner_loop ? 1 : num_iterations;
#pragma unroll unroll_count
for (int ki = P::Veclen; ki < P::Kblk; ki += P::Veclen) {
accumulate_reg_tile(this->regx, this->regy);
this->ldsXY(ki);
}
// Accumulate last loaded tile.
accumulate_reg_tile(this->regx, this->regy);
}
DI void load_norms(IdxT tile_idx_m,
IdxT tile_idx_n,
DataT (®xn)[P::AccRowsPerTh],
DataT (®yn)[P::AccColsPerTh])
{
DataT* sxNorm = (DataT*)(&smem[P::SmemSize]);
DataT* syNorm = (&sxNorm[P::Mblk]);
// Load x & y norms required by this threadblock in shmem buffer
if (tile_idx_n == blockIdx.x * P::Nblk) {
for (int i = threadIdx.x; i < P::Mblk; i += P::Nthreads) {
auto idx = tile_idx_m + i;
sxNorm[i] = idx < this->m ? xn[idx] : 0;
}
}
for (int i = threadIdx.x; i < P::Nblk; i += P::Nthreads) {
auto idx = tile_idx_n + i;
syNorm[i] = idx < this->n ? yn[idx] : 0;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
regxn[i] = sxNorm[i * P::AccThRows + (threadIdx.x / P::AccThCols)];
}
#pragma unroll
for (int i = 0; i < P::AccColsPerTh; ++i) {
regyn[i] = syNorm[i * P::AccThCols + (threadIdx.x % P::AccThCols)];
}
}
DI void store_output(IdxT tile_idx_m, IdxT tile_idx_n)
{
IdxT starty = tile_idx_m + this->accrowid;
IdxT startx = tile_idx_n + this->acccolid;
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
auto rowId = starty + i * P::AccThRows;
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
auto colId = startx + j * P::AccThCols;
if (rowId < this->m && colId < this->n) {
// Promote to 64 bit index for final write, as output array can be > 2^31
dOutput[std::size_t(rowId) * this->n + colId] = fin_op(acc[i][j], 0);
}
}
}
}
}; // struct PairwiseDistances
template <typename P, typename IdxT, typename T>
dim3 launchConfigGenerator(IdxT m, IdxT n, std::size_t sMemSize, T func)
{
int devId;
RAFT_CUDA_TRY(cudaGetDevice(&devId));
int numSMs;
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, devId));
int numBlocksPerSm = 0;
dim3 grid;
RAFT_CUDA_TRY(
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, func, P::Nthreads, sMemSize));
std::size_t minGridSize = numSMs * numBlocksPerSm;
std::size_t yChunks = raft::ceildiv<int>(m, P::Mblk);
std::size_t xChunks = raft::ceildiv<int>(n, P::Nblk);
grid.y = yChunks > minGridSize ? minGridSize : yChunks;
grid.x = (minGridSize - grid.y) <= 0 ? 1 : xChunks;
if (grid.x != 1) {
std::size_t i = 1;
while (grid.y * i < minGridSize) {
i++;
}
grid.x = i >= xChunks ? xChunks : i;
}
return grid;
}
}; // namespace detail
}; // namespace distance
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/compress_to_bits.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/device_atomics.cuh>
namespace raft::distance::detail {
/**
* @brief Compress 2D boolean matrix to bitfield
*
* Utility kernel for masked_l2_nn.
*
* @tparam T
*
* @parameter[in] in An `m x n` boolean matrix. Row major.
* @parameter[out] out An `(m / bits_per_elem) x n` matrix with elements of
* type T, where T is of size `bits_per_elem` bits.
* Note: the division (`/`) is a ceilDiv.
*/
template <typename T = uint64_t, typename = std::enable_if_t<std::is_integral<T>::value>>
RAFT_KERNEL compress_to_bits_kernel(
raft::device_matrix_view<const bool, int, raft::layout_c_contiguous> in,
raft::device_matrix_view<T, int, raft::layout_c_contiguous> out)
{
constexpr int bits_per_element = 8 * sizeof(T);
constexpr int tile_dim_m = bits_per_element;
constexpr int nthreads = 128;
constexpr int tile_dim_n = nthreads; // read 128 bools at once = 1 sector
// Tile in shared memory is transposed
__shared__ bool smem[tile_dim_n][tile_dim_m];
const int num_tiles_per_m = raft::ceildiv(in.extent(0), tile_dim_m);
const int num_tiles_per_n = raft::ceildiv(in.extent(1), tile_dim_n);
for (int lin_tile_idx = blockIdx.x; true; lin_tile_idx += gridDim.x) {
const int tile_idx_n = tile_dim_n * (lin_tile_idx % num_tiles_per_n);
const int tile_idx_m = tile_dim_m * (lin_tile_idx / num_tiles_per_n);
if (in.extent(0) <= tile_idx_m) { break; }
// Fill shared memory tile
bool reg_buf[tile_dim_m];
#pragma unroll
for (int i = 0; i < tile_dim_m; ++i) {
const int in_m = tile_idx_m + i;
const int in_n = tile_idx_n + threadIdx.x;
bool in_bounds = in_m < in.extent(0) && in_n < in.extent(1);
reg_buf[i] = in_bounds ? in(in_m, in_n) : false;
smem[threadIdx.x][i] = reg_buf[i];
}
__syncthreads();
// Drain memory tile into single output element out_elem.
T out_elem{0};
#pragma unroll
for (int j = 0; j < tile_dim_n; ++j) {
if (smem[threadIdx.x][j]) { out_elem |= T(1) << j; }
}
__syncthreads();
// Write output.
int out_m = tile_idx_m / bits_per_element;
int out_n = tile_idx_n + threadIdx.x;
if (out_m < out.extent(0) && out_n < out.extent(1)) { out(out_m, out_n) = out_elem; }
}
}
/**
* @brief Compress 2D boolean matrix to bitfield
*
* Utility kernel for masked_l2_nn.
*
* @tparam T
*
* @parameter[in] in An `m x n` boolean matrix. Row major.
* @parameter[out] out An `(m / bits_per_elem) x n` matrix with elements of
* type T, where T is of size `bits_per_elem` bits.
* Note: the division (`/`) is a ceilDiv.
*/
template <typename T = uint64_t, typename = std::enable_if_t<std::is_integral<T>::value>>
void compress_to_bits(raft::resources const& handle,
raft::device_matrix_view<const bool, int, raft::layout_c_contiguous> in,
raft::device_matrix_view<T, int, raft::layout_c_contiguous> out)
{
auto stream = resource::get_cuda_stream(handle);
constexpr int bits_per_element = 8 * sizeof(T);
RAFT_EXPECTS(raft::ceildiv(in.extent(0), bits_per_element) == out.extent(0),
"Number of output rows must be ceildiv(input rows, bits_per_elem)");
RAFT_EXPECTS(in.extent(1) == out.extent(1), "Number of output columns must equal input columns.");
const int num_SMs = raft::getMultiProcessorCount();
int blocks_per_sm = 0;
constexpr int num_threads = 128;
constexpr int dyn_smem_size = 0;
RAFT_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&blocks_per_sm, compress_to_bits_kernel<T>, num_threads, dyn_smem_size));
dim3 grid(num_SMs * blocks_per_sm);
dim3 block(128);
compress_to_bits_kernel<<<grid, block, 0, stream>>>(in, out);
RAFT_CUDA_TRY(cudaGetLastError());
}
}; // namespace raft::distance::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/masked_distance_base.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/distance/detail/pairwise_distance_base.cuh>
#include <raft/linalg/contractions.cuh>
#include <raft/util/cuda_utils.cuh>
#include <cstddef>
namespace raft {
namespace distance {
namespace detail {
/**
* @brief Device class for masked nearest neighbor computations.
*
* @tparam useNorms whether norms are needed
* @tparam DataT input data-type (for x and y matrices)
* @tparam AccT accumulation data-type
* @tparam IdxT index data-type
* @tparam Policy struct which tunes the Contraction kernel
* @tparam CoreLambda tells how to accumulate an x and y into
acc. its signature:
template <typename AccT, typename DataT> void core_lambda(AccT& acc,
const DataT& x, const DataT& y)
* @tparam EpilogueLambda applies an elementwise function to compute final
values. Its signature is:
template <typename AccT, typename DataT> void epilogue_lambda
(AccT acc[][], DataT* regxn, DataT* regyn);
* @tparam FinalLambda the final lambda called on final distance value
* @tparam rowEpilogueLambda epilog lambda that executes when a full row has
* been processed.
*
* @param[in] x input matrix
* @param[in] y input matrix
* @param[in] m number of rows of x
* @param[in] n number of columns of y
* @param[in] k number of cols of x and y
* @param[in] lda leading dimension of x
* @param[in] ldb leading dimension of y
* @param[in] ldd parameter to keep Contractions_NT happy..
* @param[in] xn row norms of input matrix A. Required for expanded L2, cosine
* @param[in] yn row norms of input matrix B. Required for expanded L2, cosine
* @param[in] adj An adjacency matrix encoded as a bitfield indicating for each
* row of `x` and each group in `y` whether to compute the
* distance. Dim = `(m / 64) x num_groups`.
* @param[in] group_idxs An array containing the *end* indices of each group
* in `y`. The value of group_idxs[j] indicates the
* start of group j + 1, i.e., it is the inclusive
* scan of the group lengths. The first group is
* always assumed to start at index 0 and the last
* group typically ends at index `n`. Length =
* `num_groups`.
* @param[in] num_groups The number of groups in group_idxs.
* @param[in] smem shared mem buffer for intermediate storage of x, y, xn & yn.
* @param core_op the core accumulation operation lambda
* @param epilog_op the epilog operation lambda
* @param fin_op the final gemm epilogue lambda
* @param rowEpilog_op epilog lambda that executes when a full row has been processed.
*/
template <bool useNorms,
typename DataT,
typename AccT,
typename IdxT,
typename Policy,
typename CoreLambda,
typename EpilogueLambda,
typename FinalLambda,
typename rowEpilogueLambda,
bool isRowMajor = true,
typename BaseClass = raft::linalg::Contractions_NT<DataT, IdxT, Policy, isRowMajor>>
struct MaskedDistances : public BaseClass {
private:
typedef Policy P;
const DataT* xn;
const DataT* yn;
const DataT* const yBase;
const uint64_t* adj;
const IdxT* group_idxs;
IdxT num_groups;
char* smem;
CoreLambda core_op;
EpilogueLambda epilog_op;
FinalLambda fin_op;
rowEpilogueLambda rowEpilog_op;
AccT acc[P::AccRowsPerTh][P::AccColsPerTh];
public:
// Constructor
DI MaskedDistances(const DataT* _x,
const DataT* _y,
IdxT _m,
IdxT _n,
IdxT _k,
IdxT _lda,
IdxT _ldb,
IdxT _ldd,
const DataT* _xn,
const DataT* _yn,
const uint64_t* _adj,
const IdxT* _group_idxs,
IdxT _num_groups,
char* _smem,
CoreLambda _core_op,
EpilogueLambda _epilog_op,
FinalLambda _fin_op,
rowEpilogueLambda _rowEpilog_op)
: BaseClass(_x, _y, _m, _n, _k, _lda, _ldb, _ldd, _smem),
xn(_xn),
yn(_yn),
yBase(_y),
adj(_adj),
group_idxs(_group_idxs),
num_groups(_num_groups),
smem(_smem),
core_op(_core_op),
epilog_op(_epilog_op),
fin_op(_fin_op),
rowEpilog_op(_rowEpilog_op)
{
}
DI void run()
{
const auto grid_stride_m = (P::Mblk * gridDim.y);
const auto grid_offset_m = (P::Mblk * blockIdx.y);
const auto grid_stride_g = gridDim.x;
const auto grid_offset_g = blockIdx.x;
for (auto tile_idx_m = grid_offset_m; tile_idx_m < this->m; tile_idx_m += grid_stride_m) {
// Start loop over groups
for (auto idx_g = grid_offset_g; idx_g < this->num_groups; idx_g += grid_stride_g) {
const uint64_t block_adj = get_block_adjacency(adj, tile_idx_m, idx_g);
// block_adj is a bitfield that contains a 1 if a row is adjacent to the
// current group. All zero means we can skip this group.
if (block_adj == 0) { continue; }
// thread_adj is a bitfield that contains a 1 at location i iff we must
// compute row i of acc (the accumulator register tile). That is,
// for i = 0,.., AccRowsPerTh and j = 0,.., AccColsPerTh:
//
// ((1 << i) & thread_adj) > 0 <=> acc[i][j] must be computed.
//
// We precompute this information because it is used in various
// locations to skip thread-local computations, specifically:
//
// 1. To skip computations if thread_adj == 0, i.e., none of the values
// of `acc` have to be computed.
//
// 2. In epilog_op, to consider only values of `acc` to be reduced that
// are not masked of.
//
// Note 1: Even when the computation can be skipped for a specific thread,
// the thread still participates in synchronization operations.
//
// Note 2: In theory, it should be possible to skip computations for
// specific rows of `acc`. In practice, however, this does not improve
// performance.
int thread_adj = compute_thread_adjacency(block_adj);
auto tile_idx_n = idx_g == 0 ? 0 : group_idxs[idx_g - 1];
const auto group_end_n = group_idxs[idx_g];
for (; tile_idx_n < group_end_n; tile_idx_n += P::Nblk) {
// We provide group_end_n to limit the number of unnecessary data
// points that are loaded from y.
this->ldgXY(tile_idx_m, tile_idx_n, 0, group_end_n);
reset_accumulator();
this->stsXY();
__syncthreads();
this->switch_write_buffer();
for (int kidx = P::Kblk; kidx < this->k; kidx += P::Kblk) {
this->ldgXY(tile_idx_m, tile_idx_n, kidx, group_end_n);
// Process all data in shared memory (previous k-block) and
// accumulate in registers.
if (thread_adj != 0) { accumulate(); }
this->stsXY();
__syncthreads();
this->switch_write_buffer();
this->switch_read_buffer();
}
if (thread_adj != 0) {
accumulate(); // last iteration
}
// The pre-condition for the loop over tile_idx_n is that write_buffer
// and read_buffer point to the same buffer. This flips read_buffer
// back so that it satisfies the pre-condition of this loop.
this->switch_read_buffer();
if (useNorms) {
DataT regxn[P::AccRowsPerTh], regyn[P::AccColsPerTh];
load_norms(tile_idx_m, tile_idx_n, group_end_n, regxn, regyn);
if (thread_adj != 0) {
epilog_op(acc, thread_adj, regxn, regyn, tile_idx_n, tile_idx_m, group_end_n);
}
} else {
if (thread_adj != 0) {
epilog_op(acc, thread_adj, nullptr, nullptr, tile_idx_n, tile_idx_m, group_end_n);
}
}
} // tile_idx_n
} // idx_g
rowEpilog_op(tile_idx_m);
} // tile_idx_m
}
private:
DI uint64_t get_block_adjacency(const uint64_t* adj, IdxT tile_idx_m, IdxT idx_group)
{
// A single element of `adj` contains exactly enough bits to indicate which
// rows in the current tile to skip and which to compute.
static_assert(P::Mblk == 8 * sizeof(adj[0]),
"masked_l2_nn only supports a policy with 64 rows per block.");
IdxT block_flag_idx = tile_idx_m / P::Mblk;
// Index into adj at row tile_idx_m / 64 and column idx_group.
return adj[block_flag_idx * this->num_groups + idx_group];
}
DI uint32_t compute_thread_adjacency(const uint64_t block_adj)
{
// thread_adj is a bitfield that contains a 1 at location i iff we must
// compute row i of acc (the accumulator register tile). It is described in
// more detail in the run() method.
uint32_t thread_adj = 0;
#pragma unroll
for (int thread_row_idx = 0; thread_row_idx < P::AccRowsPerTh; ++thread_row_idx) {
// Index `thread_row_idx` refers to a row of the current threads' register
// tile `acc`, i.e., acc[i][:]. Index `block_row_idx` refers to the
// corresponding row of the current block tile in shared memory.
const int block_row_idx = this->accrowid + thread_row_idx * P::AccThRows;
// block_row_is_adjacent is true if the current block_row_idx is adjacent
// to the current group.
const uint64_t block_mask = 1ull << block_row_idx;
const bool block_row_is_adjacent = (block_adj & block_mask) != 0;
if (block_row_is_adjacent) {
// If block row is adjacent, write a 1 bit to thread_adj at location
// `thread_row_idx`.
const uint32_t thread_mask = 1 << thread_row_idx;
thread_adj |= thread_mask;
}
}
return thread_adj;
}
DI void reset_accumulator()
{
// Reset accumulator registers to zero.
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
acc[i][j] = BaseClass::Zero;
}
}
}
DI void accumulate()
{
#pragma unroll
for (int ki = 0; ki < P::Kblk; ki += P::Veclen) {
this->ldsXY(ki);
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
#pragma unroll
for (int v = 0; v < P::Veclen; ++v) {
core_op(acc[i][j], this->regx[i][v], this->regy[j][v]);
}
}
}
}
}
DI void load_norms(IdxT tile_idx_m,
IdxT tile_idx_n,
IdxT end_n,
DataT (®xn)[P::AccRowsPerTh],
DataT (®yn)[P::AccColsPerTh])
{
DataT* sxNorm = (DataT*)(&smem[P::SmemSize]);
DataT* syNorm = (&sxNorm[P::Mblk]);
// Load x & y norms required by this threadblock in shmem buffer
for (int i = threadIdx.x; i < P::Mblk; i += P::Nthreads) {
auto idx = tile_idx_m + i;
sxNorm[i] = idx < this->m ? xn[idx] : 0;
}
for (int i = threadIdx.x; i < P::Nblk; i += P::Nthreads) {
auto idx = tile_idx_n + i;
syNorm[i] = idx < end_n ? yn[idx] : 0;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
regxn[i] = sxNorm[i * P::AccThRows + (threadIdx.x / P::AccThCols)];
}
#pragma unroll
for (int i = 0; i < P::AccColsPerTh; ++i) {
regyn[i] = syNorm[i * P::AccThCols + (threadIdx.x % P::AccThCols)];
}
}
}; // struct MaskedDistances
}; // namespace detail
}; // namespace distance
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/canberra.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp> // raft::abs
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/**
* @brief The canberra distance matrix calculation
*
* It computes the following equation:
*
* c_ij = sum_k |x_ik - y_kj| / ( |x_ik| + |y_kj| )
*/
template <typename DataType, typename AccType, typename IdxType>
struct canberra_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = true;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
const auto diff = raft::abs(x - y);
const auto add = raft::abs(x) + raft::abs(y);
// deal with potential for 0 in denominator by
// forcing 0/1 instead
acc += ((add != 0) * diff / (add + (add == 0)));
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
return;
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/l2_exp.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/math.hpp>
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/**
* Reserve 1 digit of precision from each floating-point type
* for round-off error tolerance.
* @tparam DataT
*/
template <typename DataT>
__device__ constexpr DataT get_clamp_precision()
{
switch (sizeof(DataT)) {
case 2: return 1e-3;
case 4: return 1e-6;
case 8: return 1e-15;
default: return 0;
}
}
// Epilogue operator for CUTLASS based kernel
template <typename DataT, typename AccT>
struct l2_exp_cutlass_op {
bool sqrt;
__device__ l2_exp_cutlass_op() noexcept : sqrt(false) {}
__device__ l2_exp_cutlass_op(bool isSqrt) noexcept : sqrt(isSqrt) {}
inline __device__ AccT operator()(DataT aNorm, DataT bNorm, DataT accVal) const noexcept
{
AccT outVal = aNorm + bNorm - DataT(2.0) * accVal;
/**
* Self-neighboring points should have (aNorm == bNorm) == accVal and the dot product (accVal)
* can sometimes have round-off errors, which will cause (aNorm == bNorm) ~ accVal instead.
*/
outVal = outVal * !((outVal * outVal < get_clamp_precision<DataT>()) * (aNorm == bNorm));
return sqrt ? raft::sqrt(outVal * (outVal > 0)) : outVal;
}
__device__ AccT operator()(DataT aData) const noexcept { return aData; }
};
/**
* @brief the expanded euclidean distance matrix calculation
*
* It computes the following equation:
*
* c_ij = - 2 sum_k x_ik * y_kj + ||x_i.||_2 + ||y_.j||_2
*
*/
template <typename DataType, typename AccType, typename IdxType>
struct l2_exp_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
const bool sqrt;
l2_exp_distance_op(bool sqrt_) noexcept : sqrt(sqrt_) {}
// Load norms of input data
static constexpr bool use_norms = true;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize + ((Policy::Mblk + Policy::Nblk) * sizeof(DataT));
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += x * y; };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
DataT accVal = acc[i][j];
DataT val = regxn[i] + regyn[j] - (DataT)2.0 * accVal;
/**
* Self-neighboring points should have (aNorm == bNorm) == accVal and the dot product
* (accVal) can sometimes have round-off errors, which will cause (aNorm == bNorm) ~ accVal
* instead.
*/
acc[i][j] =
val * (val > 0) * !((val * val < get_clamp_precision<DataT>()) * (regxn[i] == regyn[j]));
}
}
if (sqrt) {
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = raft::sqrt(acc[i][j]);
}
}
}
}
constexpr l2_exp_cutlass_op<DataT, AccT> get_cutlass_op() const
{
return l2_exp_cutlass_op<DataT, AccT>(sqrt);
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/l1.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/**
* @brief the L1 distance matrix calculation
*
* It computes the following equation:
*
* c_ij = sum_k abs(x_ik - y_kj)
*/
template <typename DataType, typename AccType, typename IdxType>
struct l1_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Do not load norms of data, the computation of L1 distance does not use them.
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += raft::abs(x - y); };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
return;
};
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/all_ops.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// Defines a named requirement "has_cutlass_op"
#include <raft/distance/detail/distance_ops/cutlass.cuh>
// The distance operations:
#include <raft/distance/detail/distance_ops/canberra.cuh>
#include <raft/distance/detail/distance_ops/correlation.cuh>
#include <raft/distance/detail/distance_ops/cosine.cuh>
#include <raft/distance/detail/distance_ops/hamming.cuh>
#include <raft/distance/detail/distance_ops/hellinger.cuh>
#include <raft/distance/detail/distance_ops/jensen_shannon.cuh>
#include <raft/distance/detail/distance_ops/kl_divergence.cuh>
#include <raft/distance/detail/distance_ops/l1.cuh>
#include <raft/distance/detail/distance_ops/l2_exp.cuh>
#include <raft/distance/detail/distance_ops/l2_unexp.cuh>
#include <raft/distance/detail/distance_ops/l_inf.cuh>
#include <raft/distance/detail/distance_ops/lp_unexp.cuh>
#include <raft/distance/detail/distance_ops/russel_rao.cuh>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/template.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
// Describes the computation the template distance
//
// Fill in the TODO items.
template <typename DataType, typename AccType, typename IdxType>
struct template_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
TODO member;
template_distance_op(TODO member_) noexcept : member(member_) {}
// Load norms of input data
static constexpr bool use_norms = TODO;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize + TODO;
}
DI void core(AccT& acc, DataT& x, DataT& y) const { TODO; };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
TODO;
}
// If exist, returns a cutlass op that performs the same operation.
// See cosine and l2_exp distance ops for an example.
constexpr l2_exp_cutlass_op<DataT, AccT> get_cutlass_op() const { TODO; }
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/hamming.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/**
* @brief the Hamming Unexpanded distance matrix calculation
* It computes the following equation:
*
* c_ij = sum_k (x_ik != y_kj) / k
*/
template <typename DataType, typename AccType, typename IdxType>
struct hamming_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
IdxT k;
hamming_distance_op(IdxT k_) noexcept : k(k_) {}
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += (x != y); };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
const DataT one_over_k = DataT(1.0) / k;
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] *= one_over_k;
}
}
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/kl_divergence.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp> // raft::log
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/**
* @brief the KL Divergence distance matrix calculation
*
* It computes the following equation:
*
* c_ij = 0.5 * sum(x * log (x / y));
*/
template <typename DataType, typename AccType, typename IdxType>
struct kl_divergence_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
const bool is_row_major;
const bool x_equal_y;
kl_divergence_op(bool row_major_, bool x_equal_y_ = false) noexcept
: is_row_major(row_major_), x_equal_y(x_equal_y_)
{
}
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = true;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
// TODO: make sure that these branches get hoisted out of main loop.. Could
// be quite expensive otherwise.
if (x_equal_y) {
if (is_row_major) {
const bool x_zero = (x == 0);
const bool y_zero = (y == 0);
acc += x * (raft::log(x + x_zero) - (!y_zero) * raft::log(y + y_zero));
} else {
const bool y_zero = (y == 0);
const bool x_zero = (x == 0);
acc += y * (raft::log(y + y_zero) - (!x_zero) * raft::log(x + x_zero));
}
} else {
if (is_row_major) {
const bool x_zero = (x == 0);
acc += x * (raft::log(x + x_zero) - y);
} else {
const bool y_zero = (y == 0);
acc += y * (raft::log(y + y_zero) - x);
}
}
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = (0.5f * acc[i][j]);
}
}
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/lp_unexp.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp> // raft::pow, raft::abs
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/**
* @brief the unexpanded Lp (Minkowski) distance matrix calculation
*
* It computes the following equation:
*
* c_ij = (sum_k |x_ik - y_jk|^p)^(1/p)
*/
template <typename DataType, typename AccType, typename IdxType>
struct lp_unexp_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
DataT p;
lp_unexp_distance_op(DataT p_) noexcept : p(p_) {}
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = true;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
const auto diff = raft::abs(x - y);
acc += raft::pow(diff, p);
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
const auto one_over_p = 1.0f / p;
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = raft::pow(acc[i][j], one_over_p);
}
}
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/cutlass.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <type_traits> // std::false_type
#include <utility> // std::declval
namespace raft::distance::detail::ops {
// This file defines the named requirement "has_cutlass_op" that can be used to
// determine if a distance operation has a CUTLASS op that can be used to pass
// to CUTLASS. Examples of distance operations that satisfy this requirement are
// cosine_distance_op and l2_exp_distance_op.
// Primary template handles types that do not support CUTLASS.
// This pattern is described in:
// https://en.cppreference.com/w/cpp/types/void_t
template <typename, typename = void>
struct has_cutlass_op : std::false_type {};
// Specialization recognizes types that do support CUTLASS
template <typename T>
struct has_cutlass_op<T, std::void_t<decltype(std::declval<T>().get_cutlass_op())>>
: std::true_type {};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/correlation.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/** @brief The correlation distance
*
* It computes the following equation:
*
* d(x, y) = ((x - mean(x)) ⋅ (y - mean(y)))
* /
* (|| x - mean(x) ||_2 || y - mean(y) ||_2)
*/
template <typename DataType, typename AccType, typename IdxType>
struct correlation_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
const DataT* x2n;
const DataT* y2n;
IdxT m;
IdxT n;
IdxT k;
correlation_distance_op(
bool is_row_major, const DataT* x2n_, const DataT* y2n_, IdxT m_, IdxT n_, IdxT k_) noexcept
: x2n(x2n_), y2n(y2n_), m(m_), n(n_), k(k_)
{
// The distance op is typically created before the row-major/col-major
// swapping has been done. So we do it here.
if (!is_row_major) {
std::swap<const DataT*>(x2n, y2n);
std::swap(m, n);
}
}
// Load norms of input data
static constexpr bool use_norms = true;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize + (2 * (Policy::Mblk + Policy::Nblk) * sizeof(DataT));
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += x * y; };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
// Note how we can sneakily get a pointer to shared memory here, to store
// more data. If the implementation of PairwiseDistanceMatKernel ever
// changes, this will be where we find the bugs.
extern __shared__ char smem[];
DataT regx2n[Policy::AccRowsPerTh], regy2n[Policy::AccColsPerTh];
DataT* sx2Norm =
(DataT*)(&smem[Policy::SmemSize + (Policy::Mblk + Policy::Nblk) * sizeof(DataT)]);
DataT* sy2Norm = (&sx2Norm[Policy::Mblk]);
// Load x & y norms required by this threadblock in shmem buffer
if (gridStrideX == blockIdx.x * Policy::Nblk) {
for (int i = threadIdx.x; i < Policy::Mblk; i += Policy::Nthreads) {
auto idx = gridStrideY + i;
sx2Norm[i] = idx < m ? x2n[idx] : 0;
}
}
for (int i = threadIdx.x; i < Policy::Nblk; i += Policy::Nthreads) {
auto idx = gridStrideX + i;
sy2Norm[i] = idx < n ? y2n[idx] : 0;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
regx2n[i] = sx2Norm[i * Policy::AccThRows + (threadIdx.x / Policy::AccThCols)];
}
#pragma unroll
for (int i = 0; i < Policy::AccColsPerTh; ++i) {
regy2n[i] = sy2Norm[i * Policy::AccThCols + (threadIdx.x % Policy::AccThCols)];
}
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
auto numer = k * acc[i][j] - (regxn[i] * regyn[j]);
auto Q_denom = k * regx2n[i] - (regxn[i] * regxn[i]);
auto R_denom = k * regy2n[j] - (regyn[j] * regyn[j]);
acc[i][j] = 1 - (numer / raft::sqrt(Q_denom * R_denom));
}
}
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/russel_rao.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/**
* @brief the Russell Rao distance matrix calculation
*
* It computes the following equation:
*
* c_ij = (k - (sum_k x_ik * y_kj)) / k
*/
template <typename DataType, typename AccType, typename IdxType>
struct russel_rao_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
IdxT k;
const float one_over_k;
russel_rao_distance_op(IdxT k_) noexcept : k(k_), one_over_k(1.0f / k_) {}
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += x * y; };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = (k - acc[i][j]) * one_over_k;
}
}
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/hellinger.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/**
* @brief the Hellinger distance matrix calculation
*
* It computes the following equation:
*
* c_ij = sqrt(1 - sum_k sqrt(x_ik * y_kj))
*
*/
template <typename DataType, typename AccType, typename IdxType>
struct hellinger_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
// This is sqrt(x) * sqrt(y).
const auto product = x * y;
acc += product;
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
// Adjust to replace NaN in sqrt with 0 if input to sqrt is negative
const auto finalVal = (1 - acc[i][j]);
const auto rectifier = (!signbit(finalVal));
acc[i][j] = raft::sqrt(rectifier * finalVal);
}
}
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/jensen_shannon.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp> // raft::log
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
// Describes the computation the jensen_shannon distance
/**
* @brief the Jensen Shannon distance matrix calculation
*
* It computes the following equation:
*
* c_ij = sqrt(0.5 * sum( -x_i * (log(0.5 * (x_i + y_i)) - log(x_i))
* + (-y_i * (log(0.5 * (x_i + y_i)) - log(y_i)))))
*/
template <typename DataType, typename AccType, typename IdxType>
struct jensen_shannon_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = true;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
const DataT m = 0.5f * (x + y);
const bool m_zero = (m == 0);
const auto logM = (!m_zero) * raft::log(m + m_zero);
const bool x_zero = (x == 0);
const bool y_zero = (y == 0);
acc += (-x * (logM - raft::log(x + x_zero))) + (-y * (logM - raft::log(y + y_zero)));
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = raft::sqrt(0.5 * acc[i][j]);
}
}
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/cosine.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
// Epilogue operator for CUTLASS based kernel
template <typename DataT, typename AccT>
struct cosine_cutlass_op {
__device__ cosine_cutlass_op() noexcept {}
__device__ AccT operator()(DataT& aNorm, const DataT& bNorm, DataT& accVal) const noexcept
{
return static_cast<AccT>(1.0) - static_cast<AccT>(accVal / (aNorm * bNorm));
}
__device__ AccT operator()(DataT aData) const noexcept { return aData; }
};
/**
* @brief the expanded cosine distance matrix calculation
*
* It computes the following equation:
*
* d(x, y) = 1 - (x ⋅ y) / ( ||x||_2 ||y||_2)
*/
template <typename DataType, typename AccType, typename IdxType>
struct cosine_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Load norms of input data
static constexpr bool use_norms = true;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize + ((Policy::Mblk + Policy::Nblk) * sizeof(DataT));
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += x * y; };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = 1.0 - (acc[i][j] / (regxn[i] * regyn[j]));
}
}
}
constexpr cosine_cutlass_op<DataT, AccT> get_cutlass_op() const
{
return cosine_cutlass_op<DataT, AccT>();
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/l_inf.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/**
* @brief the L_inf (Chebyshev) distance matrix calculation
*
* It computes the following equation:
*
* c_ij = max_k | x_ik - y_kj |
*/
template <typename DataType, typename AccType, typename IdxType>
struct l_inf_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
const auto diff = raft::abs(x - y);
acc = raft::max(acc, diff);
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
return;
}
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/distance_ops/l2_unexp.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft::distance::detail::ops {
/**
* @brief the unexpanded euclidean distance matrix calculation
*
* It computes the following equation:
*
* c_ij = optional_sqrt ( sum_k (x_ik - y_kj)^2 )
*/
template <typename DataType, typename AccType, typename IdxType>
struct l2_unexp_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
bool sqrt;
l2_unexp_distance_op(bool sqrt_) noexcept : sqrt(sqrt_) {}
// Do not load norms of data, the computation of L1 distance does not use them.
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
const auto diff = x - y;
acc += diff * diff;
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
if (sqrt) {
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = raft::sqrt(acc[i][j]);
}
}
}
};
};
} // namespace raft::distance::detail::ops
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/fused_distance_nn/epilogue_elementwise.cuh | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
/*! \file
\brief Functor performing distance operations used by epilogues of pairwise distance
* kernels.
* This is adapted from LinearCombinationBiasElementwise from CUTLASS 2.9.0
* customized for applying elementwise distance formula on accumulated GEMM value
* and applying user-defined operation which can convert distance values to key-value pair.
* .
*/
#pragma once
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/functional.h>
#include <cutlass/numeric_conversion.h>
#include <cutlass/numeric_types.h>
#include <cutlass/epilogue/thread/activation.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
typename DistanceOp_,
typename CGReduceOp_,
typename ReduceOpT_,
typename KVPReduceOpT_>
class FusedDistanceNNEpilogueElementwise {
public:
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using DistanceOp = DistanceOp_;
using CGReduceOp = CGReduceOp_;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using OutValT = typename CGReduceOp::AccTypeT;
using FragmentT = Array<OutValT, kElementsPerAccess>;
using FragmentOutput = FragmentZ;
static bool const kIsHeavy = true; // ElementwiseOp::kIsHeavy;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = false; // We don't store anything in Z,
/// If true, the 'T' tensor is stored
static bool const kStoreT = true; // this is our final output storage.
/// Host-constructable parameters structure
struct Params {
CGReduceOp_ cg_reduce_op;
DistanceOp_ dist_op_;
KVPReduceOpT_ pair_redop_;
ReduceOpT_ red_op_;
int* mutexes_;
using CGReduceT = CGReduceOp_;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(DistanceOp_ dist_op,
CGReduceOp cg_reduce_op,
ReduceOpT_ red_op,
KVPReduceOpT_ pair_redop,
int* mutexes)
: cg_reduce_op(cg_reduce_op),
dist_op_(dist_op),
pair_redop_(pair_redop),
red_op_(red_op),
mutexes_(mutexes)
{
}
CUTLASS_HOST_DEVICE
Params() {}
};
private:
//
// Data members
//
DistanceOp_ elementwise_op;
KVPReduceOpT_ pair_redop;
public:
ReduceOpT_ red_op;
//
// Methods
//
/// Constructor from Params
CUTLASS_HOST_DEVICE
FusedDistanceNNEpilogueElementwise(Params const& params)
: elementwise_op(params.dist_op_), pair_redop(params.pair_redop_), red_op(params.red_op_)
{
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const
{
// we use for making sure C matrix is used for A mat norm.
return true;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentC const& frag_C,
FragmentCompute const& V) const
{
FragmentCompute tmp_Accum =
NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_C =
NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C);
FragmentCompute result_Z;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute res_Z = elementwise_op(tmp_C[i], V[i], tmp_Accum[i]);
frag_T[i] = res_Z;
}
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(FragmentZ& frag_Z,
FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentCompute const& V) const
{
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/fused_distance_nn/custom_epilogue_with_broadcast.h | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
This file contains a customized version of EpilogueWithBroadcast from CUTLASS 2.9.1
(https://github.com/NVIDIA/cutlass/blob/v2.9.1/include/cutlass/epilogue/threadblock/epilogue_with_broadcast.h)
Changes:
- customized the compute_source_needed_() and apply_output_operator_() to suit the needs of per row
reduction
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#include <cuda/std/utility>
#else
#include <assert.h>
#include <utility>
#endif
#include <cutlass/aligned_buffer.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/fast_math.h>
#include <cutlass/functional.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/layout/vector.h>
#include <cutlass/numeric_conversion.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_coord.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
#include <cutlass/transform/threadblock/regular_tile_iterator.h>
#include <cutlass/epilogue/threadblock/epilogue_base.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator.h>
#include <cutlass/numeric_types.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
bool StoreZ = true,
bool StoreT = true>
struct EpilogueWithBroadcastOpBaseCustom {
using ElementOutput = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
static int const kElementsPerAccess = ElementsPerAccess;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = StoreZ;
/// If true, the 'T' tensor is stored
static bool const kStoreT = StoreT;
/// Parameters structure - required
struct Params {};
//
// Methods
//
/// Constructor from Params
EpilogueWithBroadcastOpBaseCustom(Params const& params_) {}
/// Determine if the source is needed. May return false if
bool is_source_needed() const { return true; }
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(FragmentZ& frag_Z,
FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentC const& frag_C,
FragmentCompute const& V) const
{
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(FragmentZ& frag_Z,
FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentCompute const& V) const
{
}
};
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator with bias vector broadcast over columns.
///
/// Computes the following:
///
///
/// Z, T = OutputOp(AB, C, Broadcast)
///
/// if (ElementwiseOp::kStoreZ) {
/// store(converted_u);
/// }
///
/// if (ElementwiseOp::kStoreT) {
/// store(v);
/// }
///
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors (z)
typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands (t)
typename ElementVector_, ///< Pointer to broadcast vector
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator - concept is EpilogueWithBroadcastOp
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept:
///< MatrixShape)
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value)>
class EpilogueWithBroadcastCustom : public EpilogueBase<Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Base = EpilogueBase<Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used to store the broadcast values
using BroadcastFragment =
Array<ElementCompute, ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType =
Array<typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType =
Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
static int constexpr kSmemTiles =
Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
/// Used for the broadcast
struct BroadcastDetail {
/// Number of threads per warp
static int const kWarpSize = 32;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread =
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread =
ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the
/// threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow =
const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<kThreadRows, Shape::kN>;
/// Debug printing
CUTLASS_DEVICE
static void print()
{
#if 0
printf("BroadcastDetail {\n");
printf(
" kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
};
CUTLASS_HOST_DEVICE
SharedStorage() {}
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == TensorTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess,
"OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithBroadcastCustom(SharedStorage& shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
)
: Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const& output_op, ///< Output operator
ElementVector const* broadcast_ptr, ///< Broadcast vector
AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator
tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const&
problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const&
threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord())
{
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
compute_source_needed_(
output_op, broadcast_fragment, accumulators, source_iterator, tensor_iterator);
}
private:
CUTLASS_DEVICE
void load_broadcast_fragment_(
BroadcastFragment&
broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
ElementVector const* broadcast_ptr, ///< Broadcast vector
MatrixCoord const&
problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const&
threadblock_offset ///< Threadblock's initial offset within the problem size space
)
{
broadcast_fragment.clear();
// If no pointer is supplied, set with all zeros and avoid memory accesses
if (!broadcast_ptr) { return; }
int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column();
int thread_column_idx = threadblock_offset.column() + thread_initial_column;
broadcast_ptr += thread_initial_column;
NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess>
converter;
using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>;
using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>;
ComputeFragmentType* frag_ptr = reinterpret_cast<ComputeFragmentType*>(&broadcast_fragment);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) {
AccessType loaded;
loaded.clear();
if (thread_column_idx < problem_size.column()) {
loaded = *reinterpret_cast<AccessType const*>(broadcast_ptr);
}
ComputeFragmentType cvt = converter(loaded);
frag_ptr[j] = cvt;
thread_column_idx += ThreadMap::Delta::kColumn;
broadcast_ptr += ThreadMap::Delta::kColumn;
}
}
template <class Seq>
struct acc2smem_source_not_needed;
template <size_t... Seq>
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator& warp_tile_iterator)
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
(1 - Base::kFragmentsPerIteration));
}
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const& iterator_begin,
WarpTileIterator& warp_tile_iterator)
{
int dummy[] = {
(pos == (Seq * Base::kFragmentsPerIteration)) &&
(helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...};
CUTLASS_UNUSED(dummy[0]);
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const& output_op, ///< Output operator
BroadcastFragment const&
broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
)
{
}
template <class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator& warp_tile_iterator)
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const& iterator_begin,
WarpTileIterator& warp_tile_iterator)
{
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const& output_op, ///< Output operator
BroadcastFragment const&
broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator
source_iterator, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
)
{
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Convert and store fragment
//
//__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
//
// Apply output operation
//
typename TensorTileIterator::Fragment frag_T;
//
// Load the source
//
source_iterator.load(source_fragment);
++source_iterator;
apply_output_operator_(
frag_T, output_op, aligned_accum_fragment[0], source_fragment, broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(typename TensorTileIterator::Fragment& frag_T,
OutputOp const& output_op,
typename SharedLoadIterator::Fragment const& frag_AB,
typename OutputTileIterator::Fragment const& frag_C,
BroadcastFragment const& frag_Broadcast)
{
using AccessTypeT = Array<typename TensorTileIterator::OutValT, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeT* frag_T_ptr = reinterpret_cast<AccessTypeT*>(&frag_T);
AccumulatorAccessType const* frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const*>(&frag_AB);
OutputAccessType const* frag_C_ptr = reinterpret_cast<OutputAccessType const*>(&frag_C);
AccessTypeBroadcast const* frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const*>(&frag_Broadcast);
int const kOutputOpIterations =
TensorTileIterator::Fragment::kElements / TensorTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(frag_T_ptr[i],
frag_AB_ptr[i],
frag_C_ptr[(i / ThreadMap::Iterations::kColumn)],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
typename OutputTileIterator::Fragment& frag_Z,
typename TensorTileIterator::Fragment& frag_T,
OutputOp const& output_op,
typename SharedLoadIterator::Fragment const& frag_AB,
BroadcastFragment const& frag_Broadcast)
{
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/fused_distance_nn/cutlass_base.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wtautological-compare"
// We define CUTLASS_NAMESPACE in case
// RAFT cmake is not used
#ifndef CUTLASS_NAMESPACE
#define cutlass raft_cutlass
#endif
#include <cutlass/cutlass.h>
#include <cutlass/gemm/device/gemm.h>
#include <cutlass/gemm/device/gemm_grouped.h>
#include <cutlass/gemm/device/gemm_universal_adapter.h>
#include <rmm/device_uvector.hpp>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_coord.h>
#include <cutlass/tensor_view.h>
#include <raft/distance/detail/fused_distance_nn/epilogue_elementwise.cuh> // FusedDistanceNNEpilogueElementwise
#include <raft/distance/detail/fused_distance_nn/gemm.h> // FusedDistanceNNGemm
#include <raft/util/cudart_utils.hpp> // getMultiProcessorCount
#include <raft/util/cutlass_utils.cuh> // RAFT_CUTLASS_TRY
namespace raft {
namespace distance {
namespace detail {
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
int VecLen,
typename CGReduceOpT,
typename DistanceFn,
typename ReduceOpT,
typename KVPReduceOpT>
void cutlassFusedDistanceNN(const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
OutT* dOutput,
int* mutexes,
CGReduceOpT cg_reduce_op,
DistanceFn dist_op,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
cudaStream_t stream)
{
using EpilogueOutputOp = cutlass::epilogue::thread::FusedDistanceNNEpilogueElementwise<
DataT, // ElementC_
AccT, // ElementAccumulator_
DataT, // ElementCompute_
AccT, // ElementZ_
OutT, // ElementT_
// 128 / cutlass::sizeof_bits<DataT>::value,
1, // Elements per access 1
DistanceFn,
CGReduceOpT,
ReduceOpT,
KVPReduceOpT>;
constexpr int batch_count = 1;
typename EpilogueOutputOp::Params epilog_op_param(
dist_op, cg_reduce_op, redOp, pairRedOp, mutexes);
// Number of pipelines you want to use
constexpr int NumStages = 3;
// Alignment
constexpr int Alignment = VecLen;
// default initialize problem size with row major inputs
auto problem_size = cutlass::gemm::GemmCoord(m, n, k);
constexpr bool isRowMajor = true;
using fusedDistanceNNKernel =
typename cutlass::gemm::kernel::FusedDistanceNNGemm<DataT,
Alignment,
DataT,
Alignment,
AccT,
AccT,
EpilogueOutputOp,
NumStages, // Number of pipeline stages
isRowMajor>::GemmKernel;
using fusedDistanceNN = cutlass::gemm::device::GemmGrouped<fusedDistanceNNKernel>;
int num_blocks_per_sm = fusedDistanceNN::maximum_active_blocks();
int num_sms = raft::getMultiProcessorCount();
int full_wave = num_blocks_per_sm * num_sms;
constexpr int mmaShapeM = fusedDistanceNNKernel::Mma::Shape::kM;
constexpr int mmaShapeN = fusedDistanceNNKernel::Mma::Shape::kN;
int columnTiles = (problem_size.n() - 1 + mmaShapeN) / mmaShapeN;
int rowTiles = (problem_size.m() - 1 + mmaShapeM) / mmaShapeM;
int totalTiles = columnTiles * rowTiles;
int thread_blocks =
rowTiles < full_wave ? (totalTiles < full_wave ? totalTiles : full_wave) : rowTiles;
typename fusedDistanceNN::Arguments arguments{
problem_size,
batch_count, // num of problems.
thread_blocks,
epilog_op_param,
x,
y,
xn, // C matrix eq vector param, which here is A norm
(DataT*)yn, // this is broadcast vec, which is required to be non-const param
dOutput, // Output distance matrix
(int64_t)lda, // stride A
(int64_t)ldb, // stride B
(int64_t)1, // stride A norm
(int64_t)ldd // stride Output matrix
};
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = fusedDistanceNN::get_workspace_size(arguments);
// Allocate workspace memory
rmm::device_uvector<uint8_t> workspace(workspace_size, stream);
// Instantiate CUTLASS kernel depending on templates
fusedDistanceNN fusedDistanceNN_op;
// Check the problem size is supported or not
RAFT_CUTLASS_TRY(fusedDistanceNN_op.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
RAFT_CUTLASS_TRY(fusedDistanceNN_op.initialize(arguments, workspace.data(), stream));
// Launch initialized CUTLASS kernel
RAFT_CUTLASS_TRY(fusedDistanceNN_op.run(stream));
}
}; // namespace detail
}; // namespace distance
}; // namespace raft
#pragma GCC diagnostic pop
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/fused_distance_nn/predicated_tile_iterator_reduced_vec.h | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This file contains a customized version of PredicatedTileIterator from CUTLASS 2.9.0
(https://github.com/NVIDIA/cutlass/blob/v2.9.0/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h#L75)
Changes:
- added `Layout_` template param
- PredicatedTileIteratorParams() is customized to not stride by layout.stride(0).
- makes use of `SharedStorage` to store reduced values across warps to gmem in coalesced manner.
- customized the store_with_byte_offset() to perform reduction per row and write final value to
gmem.
- customized the Params() struct to take user inputs from epilogueOp params.
*/
#pragma once
#include <cooperative_groups.h>
#include <cooperative_groups/reduce.h>
#include <cutlass/arch/arch.h>
#include <cutlass/arch/memory.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/epilogue/threadblock/output_tile_thread_map.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator_params.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_ref.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
namespace cg = cooperative_groups;
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
typename Layout_,
typename EpilogueOpParams_,
bool ScatterD = false, ///< Scatter D operand or not
bool UseCUDAStore = false>
class PredicatedTileIteratorReducedVec {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
using EpilogueOpParams = EpilogueOpParams_;
using OutIdxT = typename EpilogueOpParams::CGReduceT::IndexT;
using OutValT = typename EpilogueOpParams::CGReduceT::AccTypeT;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
static_assert(ThreadMap::Iterations::kCluster > 0, "ThreadMap::Iterations::kCluster must be > 0");
static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
static_assert(!UseCUDAStore, "UseCUDAStore path is not supported");
static int const total_rows = ThreadMap::kWarpCount * ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
ThreadMap::Count::kTile * ThreadMap::Delta::kRow;
/// Fragment object
using Fragment =
Array<OutValT,
ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * kElementsPerAccess>;
// Memory access size
using AccessType = AlignedArray<Element, kElementsPerAccess>;
using AccessTypeValT = AlignedArray<OutValT, kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
EpilogueOpParams user_param;
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>())
{
}
CUTLASS_HOST_DEVICE
Params(Layout const& layout, EpilogueOpParams const& user_param_)
: PredicatedTileIteratorParams(int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()),
user_param(user_param_)
{
}
CUTLASS_HOST_DEVICE
Params(Base const& base) : Base(base) {}
};
/// Mask object
struct Mask {
// static int const kCount = ThreadMap::Iterations::kColumn;
static int const kCount = ThreadMap::Iterations::kColumn * kElementsPerAccess;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() { enable(); }
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
/// Shared storage allocation needed by the predicated tile
// iterator for reduction.
struct SharedStorage {
//
// Type definitions
//
using Shape = MatrixShape<total_rows, 1>;
/// Shape of the shared memory allocation for the reduced values store
using StorageShape = MatrixShape<Shape::kRow, Shape::kColumn>;
//
// Data members
//
// Methods
//
AlignedBuffer<Element, StorageShape::kCount> storage;
CUTLASS_DEVICE
Element* data() { return storage.data(); }
SharedStorage() {}
CUTLASS_DEVICE
void initSmem(EpilogueOpParams const& user_params)
{
Element* shared_elem_arr = data();
constexpr auto maxVal = std::numeric_limits<OutValT>::max();
for (int row = threadIdx.x; row < total_rows; row += blockDim.x) {
user_params.red_op_.init(&shared_elem_arr[row], maxVal);
}
}
};
template <typename cg_reduce_op_t,
typename cg_group_t,
typename IdxT,
typename ValT,
typename OutT>
struct select_reduce {
/// Performs warp level reduction and stores a reduced output to memory
CUTLASS_DEVICE
select_reduce(OutT value,
ValT prev_red_val,
cg_reduce_op_t reduce_op,
cg_group_t cg_warp_group,
OutT& shmem_ptr)
{
if (cg_warp_group.any(reduce_op.isAmin(value, prev_red_val))) {
OutT reduced_val = cg::reduce(cg_warp_group, value, reduce_op);
if (cg_warp_group.thread_rank() == 0) { shmem_ptr = reduced_val; }
}
}
};
template <typename cg_reduce_op_t, typename cg_group_t, typename IdxT>
struct select_reduce<cg_reduce_op_t, cg_group_t, IdxT, float, raft::KeyValuePair<IdxT, float>> {
using ValT = float;
using Ty = raft::KeyValuePair<IdxT, ValT>;
/// Performs warp level reduction of key value pair and stores a reduced output to memory
CUTLASS_DEVICE
select_reduce(Ty val_to_red,
float prev_red_val,
cg_reduce_op_t cg_reduce_op,
cg_group_t cg_warp_group,
Ty& shmem_ptr)
{
ValT val = val_to_red.value;
if (cg_warp_group.any(cg_reduce_op.isAmin(val, prev_red_val))) {
ValT reduced_val = cg::reduce(cg_warp_group, val, cg_reduce_op);
bool pred = (reduced_val == val);
auto subTile = cg::binary_partition(cg_warp_group, pred);
if (pred) {
if (subTile.thread_rank() == 0) { shmem_ptr = val_to_red; }
}
}
}
};
template <typename cg_reduce_op_t, typename cg_group_t, typename IdxT>
struct select_reduce<cg_reduce_op_t, cg_group_t, IdxT, double, raft::KeyValuePair<IdxT, double>> {
using ValT = double;
using Ty = raft::KeyValuePair<IdxT, ValT>;
/// Performs warp level reduction of key value pair and stores a reduced output to memory
CUTLASS_DEVICE
select_reduce(Ty val_to_red,
double prev_red_val,
cg_reduce_op_t cg_reduce_op,
cg_group_t cg_warp_group,
Ty& shmem_ptr)
{
ValT val = val_to_red.value;
if (cg_warp_group.any(cg_reduce_op.isAmin(val, prev_red_val))) {
ValT reduced_val = cg::reduce(cg_warp_group, val, cg_reduce_op);
bool pred = (reduced_val == val);
auto subTile = cg::binary_partition(cg_warp_group, pred);
if (pred) {
if (subTile.thread_rank() == 0) { shmem_ptr = val_to_red; }
}
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
Params params_;
/// Byte-level pointer
uint8_t* byte_pointer_;
/// Byte-level pointer first tile offset of this threadblock.
uint8_t* first_tile_byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
Index block_start_row_first_tile_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
// mutable int shared_tile_id;
/// Scatter indices
int const* indices_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(Params::stride) == 8, "Expected 64b strides");
protected:
SharedStorage& shared_storage_;
const bool& do_gmem_reduction_;
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorReducedVec(SharedStorage& shared_storage,
Params const& params,
Element* pointer,
TensorCoord extent,
int thread_idx,
const bool& do_gmem_reduction,
TensorCoord threadblock_offset = TensorCoord(),
int const* indices = nullptr)
: params_(params),
indices_(indices),
shared_storage_(shared_storage),
do_gmem_reduction_(do_gmem_reduction)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
TensorCoord block_offset = ThreadMap::initial_offset(0) + threadblock_offset;
block_start_row_first_tile_ = block_offset.row();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn * kElementsPerAccess; ++c) {
int columnPerAccess = (c / kElementsPerAccess);
int columnWithinPerAccess = c % kElementsPerAccess;
mask_.predicates[c] = ((thread_offset.column() + ThreadMap::Delta::kColumn * columnPerAccess +
columnWithinPerAccess) < extent.column());
}
if (threadblock_offset.column() == 0) {
EpilogueOpParams const& user_params = params_.user_param;
shared_storage_.initSmem(user_params);
}
// Null pointer performs no accesses
if (!pointer) { mask_.clear(); }
if (ScatterD && !indices) { mask_.clear(); }
// Initialize pointer
first_tile_byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(block_offset.row()) * LongIndex(params_.stride);
if (ScatterD) {
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Destructor
CUTLASS_DEVICE
~PredicatedTileIteratorReducedVec()
{
if (do_gmem_reduction_) {
EpilogueOpParams const& user_params = params_.user_param;
auto gmem_ptr = reinterpret_cast<Element*>(first_tile_byte_pointer_);
Element* shared_elem_arr = shared_storage_.data();
const uint32_t mutex_id = (block_start_row_first_tile_ / total_rows);
bool useGmemMutex = (gridDim.x != ((extent_row_ - 1 + total_rows) / total_rows));
// If this is not optimal grid size perform mutex based gmem reduce.
if (useGmemMutex) {
// single lock per block for multiple rows
if (threadIdx.x == 0 && block_start_row_first_tile_ < extent_row_) {
// acquire mutex lock.
unsigned int ns = 8;
while (atomicCAS(user_params.mutexes_ + mutex_id, 0, 1) == 1) {
__nanosleep(ns);
if (ns < 256) { ns *= 2; }
}
}
}
__syncthreads();
for (int row = threadIdx.x; row < total_rows; row += blockDim.x) {
if (block_start_row_first_tile_ + row < extent_row_) {
user_params.red_op_(
block_start_row_first_tile_ + row, &gmem_ptr[row], shared_elem_arr[row]);
}
}
if (useGmemMutex) {
__threadfence();
__syncthreads();
if (threadIdx.x == 0 && block_start_row_first_tile_ < extent_row_) {
// release mutex lock.
atomicExch(user_params.mutexes_ + mutex_id, 0);
}
}
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset)
{
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Performs reduction and Stores a reduced output to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment& frag, int64_t byte_offset) const
{
AccessTypeValT* frag_ptr = reinterpret_cast<AccessTypeValT*>(&frag);
cg::thread_block cta = cg::this_thread_block();
// tile_width 16 is required if kElementPerAccess > 1
constexpr int tile_width = (32 / ThreadMap::Delta::kColumn) ? 32 : 16;
cg::thread_block_tile<tile_width> tile32 = cg::tiled_partition<tile_width>(cta);
EpilogueOpParams const& user_params = params_.user_param;
using cg_reduce_t = decltype(user_params.cg_reduce_op);
using tile32_t = decltype(tile32);
Element* shared_elem_arr = shared_storage_.data();
constexpr auto maxVal = std::numeric_limits<OutValT>::max();
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
const OutIdxT row_id = row_offset + thread_start_row_;
bool row_guard = (row_id < extent_row_);
const int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn * kElementsPerAccess;
Element red_val;
user_params.red_op_.init(&red_val, maxVal);
if (row_guard) {
const int iter_row = (row_id % total_rows);
const auto prev_red_val = user_params.red_op_.get_value(shared_elem_arr[iter_row]);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn * kElementsPerAccess;
++column) {
int columnPerAccess = column / kElementsPerAccess;
int columnWithPerAccess = column % kElementsPerAccess;
bool guard = mask_.predicates[column];
if (guard) {
const OutIdxT key_id = thread_start_column_ +
ThreadMap::Delta::kColumn * columnPerAccess +
columnWithPerAccess;
const int frag_col_idx = frag_idx + column;
Element this_val;
user_params.red_op_.init(&this_val, (*frag_ptr)[frag_col_idx]);
user_params.red_op_.init_key(this_val, key_id);
user_params.red_op_(row_id, &red_val, this_val);
}
}
// select_reduce doesn't need to use `red_op_` as at the warp level we use cg_reduce_op,
// this satisfies the requirement of mst/single linkage of checking colors buffer.
select_reduce<cg_reduce_t, tile32_t, OutIdxT, OutValT, Element> red_obj(
red_val, prev_red_val, user_params.cg_reduce_op, tile32, shared_elem_arr[iter_row]);
}
}
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment& frag) const { store_with_byte_offset(frag, 0); }
CUTLASS_DEVICE
MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const { return thread_start_row_; }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const { return thread_start_column_; }
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const { return extent_row_; }
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const { return extent_column_; }
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorReducedVec& operator++()
{
++state_[0];
if (!ScatterD) { byte_pointer_ += params_.advance_row; }
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ +=
(ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; }
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/fused_distance_nn/epilogue.cuh | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This is adapted from DefaultEpilogueWithBroadcastTensorOp from CUTLASS 2.9.0
(https://github.com/NVIDIA/cutlass/blob/master/include/cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h#L75)
This epilogue allows us to load norm buffers using PredicatedTileIteratorNormVec
and EpilogueWithBroadcast used for distances L2/cosine as well as applies user-define elementwise
operation.
-- A norm load is provided PredicatedTileIteratorNormVec
-- B norm load is provided by EpilogueWithBroadcast
-- elementwise operation is provided by OutputOp
*/
#pragma once
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/numeric_types.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/epilogue/threadblock/default_epilogue_tensor_op.h>
#include <cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h>
#include <cutlass/epilogue/threadblock/epilogue.h>
#include <raft/distance/detail/fused_distance_nn/custom_epilogue_with_broadcast.h>
#include <raft/distance/detail/fused_distance_nn/predicated_tile_iterator_normvec_smem.h>
#include <raft/distance/detail/fused_distance_nn/predicated_tile_iterator_reduced_vec.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
typename LayoutT,
int ElementsPerAccess,
bool ScatterD = false>
struct FusedDistanceNNEpilogue {
/// Use defaults related to the existing epilogue
using Base =
DefaultEpilogueTensorOp<Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess>;
//
// Stores the result z = (y = GEMM(A, B, C), broadcast)
//
using RowNormTileIterator = cutlass::epilogue::threadblock::
PredicatedTileIteratorNormVecSmem<typename Base::OutputTileThreadMap, ElementOutput, LayoutT>;
//
// Additional tensor tile iterator - stores t = Elementwise(z)
//
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorReducedVec<
typename Base::OutputTileThreadMap,
ElementTensor,
LayoutT,
typename OutputOp::Params>;
/// Define the epilogue
using Epilogue = cutlass::epilogue::threadblock::EpilogueWithBroadcastCustom<
Shape,
WarpMmaTensorOp,
PartitionsK,
RowNormTileIterator,
OutputTileIterator,
ElementVector,
typename Base::AccumulatorFragmentIterator,
typename Base::WarpTileIterator,
typename Base::SharedLoadIterator,
OutputOp,
typename Base::Padding,
Base::kFragmentsPerIteration>;
};
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/fused_distance_nn/persistent_gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Problem visitor for grouped GEMMs
This file contains heavily customized version of GemmGrouped from CUTLASS 2.10.0
(https://github.com/NVIDIA/cutlass/blob/v2.10.0/include/cutlass/gemm/kernel/gemm_grouped.h)
Changes:
- adds support for only single problem size to be launched persistently
where each threablock processes more than one tile of the same problem.
*/
#pragma once
#include <cutlass/complex.h>
#include <cutlass/cutlass.h>
#include <cutlass/fast_math.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/matrix_coord.h>
#include <cutlass/semaphore.h>
#include <cutlass/gemm/kernel/gemm_grouped_problem_visitor.h>
#include <cutlass/gemm/kernel/gemm_transpose_operands.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/trace.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform
bool Transposed = false>
struct FusedDistanceNNPersistent {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
static bool const kTransposed = Transposed;
// Optional transpose
using MapArguments = kernel::detail::MapArguments<typename Mma::IteratorA::Element,
typename Mma::IteratorA::Layout,
Mma::kTransformA,
Mma::IteratorA::AccessType::kElements,
typename Mma::IteratorB::Element,
typename Mma::IteratorB::Layout,
Mma::kTransformB,
Mma::IteratorB::AccessType::kElements,
typename Mma::LayoutC,
kTransposed>;
// Public-facing type definitions related to operand element type, layout, and complex conjugate
// operation. Must interact with the 'kTransposed' notion.
using ElementA = typename MapArguments::ElementA;
using LayoutA = typename MapArguments::LayoutA;
using ElementB = typename MapArguments::ElementB;
using LayoutB = typename MapArguments::LayoutB;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename MapArguments::LayoutC;
static ComplexTransform const kTransformA = MapArguments::kTransformA;
static ComplexTransform const kTransformB = MapArguments::kTransformB;
// Type definitions about the mainloop.
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = MapArguments::kAlignmentA;
static int const kAlignmentB = MapArguments::kAlignmentB;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ProblemVisitor = GemmGroupedProblemVisitor<ThreadblockShape,
kGroupScheduleMode,
kThreadCount,
kThreadCount,
kTransposed>;
//
// Structures
//
struct temp_problem_visitor {
int problem_count;
CUTLASS_HOST_DEVICE temp_problem_visitor() : problem_count(0){};
CUTLASS_HOST_DEVICE temp_problem_visitor(int problem_count_) : problem_count(problem_count_){};
};
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_sizes;
temp_problem_visitor problem_visitor;
int problem_count;
int threadblock_count;
typename EpilogueOutputOp::Params output_op;
void const* ptr_A;
void const* ptr_B;
void const* ptr_C;
void* ptr_Vector;
void* ptr_Tensor;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldt;
// Only used by device-level operator
GemmCoord* host_problem_sizes;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments()
: // problem_count(0),
threadblock_count(0),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_Vector(nullptr),
ptr_Tensor(nullptr),
lda(0),
ldb(0),
ldc(0),
ldt(0),
host_problem_sizes(nullptr)
{
}
/// Ctor
CUTLASS_HOST_DEVICE
Arguments(GemmCoord problem_sizes,
int problem_count,
int threadblock_count,
typename EpilogueOutputOp::Params output_op,
void const* ptr_A,
void const* ptr_B,
void const* ptr_C,
void* ptr_Vector,
void* ptr_Tensor,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldt,
GemmCoord* host_problem_sizes = nullptr)
: problem_sizes(problem_sizes),
threadblock_count(threadblock_count),
output_op(output_op),
ptr_A(ptr_A),
ptr_B(ptr_B),
ptr_C(ptr_C),
ptr_Vector(ptr_Vector),
ptr_Tensor(ptr_Tensor),
lda(lda),
ldb(ldb),
ldc(ldc),
ldt(ldt),
host_problem_sizes(host_problem_sizes)
{
problem_visitor.problem_count = problem_count;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
// typename ProblemVisitor::Params problem_visitor;
temp_problem_visitor problem_visitor;
int threadblock_count;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::TensorTileIterator::Params params_Tensor;
typename EpilogueOutputOp::Params output_op;
void* ptr_A;
void* ptr_B;
void* ptr_C;
void* ptr_Vector;
void* ptr_Tensor;
GemmCoord problem_size;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldt;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params()
: params_A(0),
params_B(0),
params_C(0),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_Vector(nullptr),
ptr_Tensor(nullptr),
lda(0),
ldb(0),
ldc(0),
ldt(0)
{
}
CUTLASS_HOST_DEVICE
Params(Arguments const& args, void* workspace = nullptr, int tile_count = 0)
: problem_size(args.problem_sizes),
threadblock_count(args.threadblock_count),
output_op(args.output_op),
params_A(args.lda),
params_B(args.ldb),
params_C(args.ldc),
// Here we pass additional user args via args.output_op
// to the reduction output tile iterator
params_Tensor(args.ldt, args.output_op),
ptr_A(const_cast<void*>(args.ptr_A)),
ptr_B(const_cast<void*>(args.ptr_B)),
ptr_C(const_cast<void*>(args.ptr_C)),
ptr_Vector(args.ptr_Vector),
ptr_Tensor(args.ptr_Tensor),
lda(args.lda),
ldb(args.ldb),
ldc(args.ldc),
ldt(args.ldt)
{
problem_visitor.problem_count = args.problem_visitor.problem_count;
}
CUTLASS_HOST_DEVICE
void update(Arguments const& args, void* workspace = nullptr, int tile_count = 0)
{
threadblock_count = args.threadblock_count;
output_op = args.output_op;
ptr_A = const_cast<void*>(args.ptr_A);
ptr_B = const_cast<void*>(args.ptr_B);
ptr_C = const_cast<void*>(args.ptr_C);
ptr_Vector = args.ptr_Vector;
ptr_Tensor = args.ptr_Tensor;
lda = args.lda;
ldb = args.ldb;
ldc = args.ldc;
ldt = args.ldt;
problem_size = args.problem_sizes;
}
};
/// Shared memory storage structure
struct SharedStorage {
union {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
} kernel;
typename Epilogue::TensorTileIterator::SharedStorage reduced_store;
typename Epilogue::OutputTileIterator::SharedStorage rownorm_store;
};
public:
//
// Methods
//
CUTLASS_DEVICE
FusedDistanceNNPersistent() {}
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const& problem_size)
{
return Status::kSuccess;
}
static Status can_implement(Arguments const& args) { return Status::kSuccess; }
static size_t get_extra_workspace_size(Arguments const& args,
cutlass::gemm::GemmCoord const& grid_tiled_shape)
{
return 0;
}
CUTLASS_DEVICE
static uint32_t tile_count(const cutlass::MatrixCoord& grid)
{
return grid.row() * grid.column();
}
/// Get the grid shape
CUTLASS_DEVICE
static cutlass::MatrixCoord grid_shape(const cutlass::gemm::GemmCoord& problem)
{
return cutlass::MatrixCoord(((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM),
((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN));
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const& params, SharedStorage& shared_storage)
{
#if __CUDA_ARCH__ >= 800
//
// These types shadow the type-level definitions and support the ability to implement
// a 'transposed' GEMM that computes the transposed problems.
//
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
const GemmCoord& problem_size = params.problem_size;
const auto grid_shape_ = grid_shape(problem_size);
const uint32_t problem_chunk = (tile_count(grid_shape_) - 1 + gridDim.x) / gridDim.x;
const uint32_t problem_chunk_end = blockIdx.x * problem_chunk + problem_chunk;
typename LayoutB::Index column =
((blockIdx.x * problem_chunk) % grid_shape_.column()) * Mma::Shape::kN;
typename LayoutB::Index row =
((blockIdx.x * problem_chunk) / grid_shape_.column()) * Mma::Shape::kM;
if (column) {
shared_storage.reduced_store.initSmem(params.output_op);
shared_storage.rownorm_store.initSmem(params.ptr_C, problem_size.m(), row, sizeof(ElementC));
}
// Outer 'persistent' loop to iterate over tiles
for (uint32_t tile_idx = blockIdx.x * problem_chunk; tile_idx < problem_chunk_end; tile_idx++) {
const auto grid_shape_ = grid_shape(problem_size);
cutlass::MatrixCoord threadblock_offset(
int(tile_idx / grid_shape_.column()) * Mma::Shape::kM,
int(tile_idx % grid_shape_.column()) * Mma::Shape::kN);
const bool isNextTile = ((tile_idx + 1) < problem_chunk_end);
const bool doesRowChange =
((threadblock_offset.column() + Mma::Shape::kN) >= problem_size.n());
const bool do_gmem_reduce = (doesRowChange || !isNextTile) ? true : false;
ElementA* ptr_A = static_cast<ElementA*>(params.ptr_A);
ElementB* ptr_B = static_cast<ElementB*>(params.ptr_B);
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{threadblock_offset.row(), 0};
cutlass::MatrixCoord tb_offset_B{0, threadblock_offset.column()};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A, ptr_A, {problem_size.m(), problem_size.k()}, thread_idx, tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B, ptr_B, {problem_size.k(), problem_size.n()}, thread_idx, tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Matrix multiply phase
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.kernel.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Wait for all threads to finish their epilogue phases from the previous tile.
//__syncthreads();
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
ElementC* ptr_C = static_cast<ElementC*>(params.ptr_C);
typename Epilogue::ElementTensor* ptr_Tensor =
static_cast<typename Epilogue::ElementTensor*>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector* ptr_Vector =
static_cast<typename Epilogue::ElementVector*>(params.ptr_Vector);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_rownorm(shared_storage.rownorm_store,
params.params_C,
ptr_C,
problem_size.mn(),
thread_idx,
threadblock_offset);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(shared_storage.reduced_store,
params.params_Tensor,
// Only the final block outputs Tensor
ptr_Tensor,
problem_size.mn(),
thread_idx,
do_gmem_reduce,
threadblock_offset);
Epilogue epilogue(shared_storage.kernel.epilogue, thread_idx, warp_idx, lane_idx);
// Execute the epilogue operator to update the destination tensor.
// Move to appropriate location for this output tile
if (ptr_Vector) { ptr_Vector += threadblock_offset.column(); }
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
ptr_Vector,
// iterator_D,
accumulators,
iterator_rownorm,
tensor_iterator,
problem_size.mn(),
threadblock_offset);
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/fused_distance_nn/predicated_tile_iterator_normvec_smem.h | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This file contains a customized version of PredicatedTileIterator from CUTLASS 2.9.0
(https://github.com/NVIDIA/cutlass/blob/v2.9.0/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h#L75)
Changes:
- added `Layout_` template param
- Only the row index is used to load the data in load_with_byte_offset().
This way the same normalization data is used across all columns in a row.
*/
#pragma once
#include <cutlass/arch/arch.h>
#include <cutlass/arch/memory.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/epilogue/threadblock/output_tile_thread_map.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator_params.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_ref.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
#include <raft/util/device_loads_stores.cuh>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
typename Layout_,
bool ScatterD = false, ///< Scatter D operand or not
bool UseCUDAStore = false>
class PredicatedTileIteratorNormVecSmem {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static int const total_rows = ThreadMap::kWarpCount * ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
ThreadMap::Count::kTile * ThreadMap::Delta::kRow;
static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
static_assert(ThreadMap::Iterations::kCluster > 0, "ThreadMap::Iterations::kCluster must be > 0");
static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
using Fragment = Array<Element,
ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>())
{
}
CUTLASS_HOST_DEVICE
Params(Base const& base) : Base(base) {}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() { enable(); }
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
/// Shared storage allocation needed by the predicated tile
// iterator for storing rowNorm chunk.
struct SharedStorage {
//
// Type definitions
//
using Shape = MatrixShape<total_rows, 1>;
/// Shape of the shared memory allocation
using StorageShape = MatrixShape<Shape::kRow, Shape::kColumn>;
//
// Data members
//
// Methods
//
AlignedBuffer<Element, StorageShape::kCount> storage;
CUTLASS_DEVICE
Element* data() { return storage.data(); }
SharedStorage() {}
CUTLASS_DEVICE
void initSmem(void* pointer,
const Index& num_rows,
const Index& tb_row_offset,
const LongIndex& stride)
{
Element* shared_elem_arr = data();
uint8_t* first_tile_byte_pointer_ =
reinterpret_cast<uint8_t*>(pointer) + LongIndex(tb_row_offset) * LongIndex(stride);
const auto gmem_ptr = reinterpret_cast<Element*>(first_tile_byte_pointer_);
for (int row = threadIdx.x; row < total_rows; row += blockDim.x) {
bool guard = (tb_row_offset + row) < num_rows;
cutlass::arch::cp_async<sizeof(Element)>(shared_elem_arr + row, gmem_ptr + row, guard);
cutlass::arch::cp_async_wait<0>();
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Byte-level pointer
uint8_t* byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
/// Scatter indices
int const* indices_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
protected:
SharedStorage& shared_storage_;
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorNormVecSmem(SharedStorage& shared_storage,
PredicatedTileIteratorParams const& params,
Element* pointer,
TensorCoord extent,
int thread_idx,
TensorCoord& threadblock_offset,
int const* indices = nullptr)
: params_(params), indices_(indices), shared_storage_(shared_storage)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] =
((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column());
}
// Null pointer performs no accesses
if (!pointer) {
mask_.clear();
return;
}
if (ScatterD && !indices) { mask_.clear(); }
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.row()) * LongIndex(params_.stride);
if (ScatterD) {
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
}
if (threadblock_offset.column() == 0) {
shared_storage_.initSmem(pointer, extent_row_, threadblock_offset.row(), params_.stride);
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset)
{
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment& frag, int64_t byte_offset) const
{
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
Element* shared_elem_arr = shared_storage_.data();
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
int iter_row = ((row_offset + thread_start_row_) % total_rows);
Element val = shared_elem_arr[iter_row];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
(*frag_ptr)[frag_row_idx + i] = val;
}
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment& frag) const { load_with_byte_offset(frag, 0); }
CUTLASS_DEVICE
MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const { return thread_start_row_; }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const { return thread_start_column_; }
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const { return extent_row_; }
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const { return extent_column_; }
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorNormVecSmem& operator++()
{
++state_[0];
if (!ScatterD) { byte_pointer_ += params_.advance_row; }
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ +=
(ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; }
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/fused_distance_nn/gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/gemm/kernel/default_gemm_universal.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <raft/distance/detail/fused_distance_nn/epilogue.cuh>
#include <raft/distance/detail/fused_distance_nn/persistent_gemm.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*
* This configuration is used for float inputs with veclen(kAlignmentA/B) = 2 or 4,
* ideal threadblock tile shape is 32x256x16 for such cases as there is no
* registers spills for it.
*
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Number of stages used in the pipelined mainloop
int Stages,
/// data layout row/column major of inputs
bool isRowMajor>
struct FusedDistanceNNGemm {
// This struct is specialized for fp32/3xTF32
/// Threadblock-level tile size (concept: GemmShape)
// <- threadblock tile M = 32, N = 256, K = 16
// this is more performant but note that for veclen = 1
// this shape has register spills
using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 16>;
// <- threadblock tile M = 32, N = 128, K = 16
// this shape has high occupancy but less perf
// this is less performant but this shape has *no* register spills
// for any veclens(1, 2, 4)
// using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes tile size a warp will compute
// <- warp tile M = 64, N = 64, K = 16
// this is more performant for veclen 2,4.
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
// this shape has high occupancy but less perf used for 32x128x16
// using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes the size of MMA op
// <- MMA Op tile M = 16, N = 8, K = 4
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>;
/// Operation performed by GEMM
using Operator = cutlass::arch::OpMultiplyAddFastF32;
// using Operator = cutlass::arch::OpMultiplyAdd; // this runs only 1xTF32 for float inputs
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using OperatorClass = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using ArchTag = cutlass::arch::Sm80;
// This code section describes how threadblocks are scheduled on GPU
/// Threadblock-level swizzling operator
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
/// data layout for final output matrix.
// we keep this same layout even for column major inputs
using LayoutOutput = cutlass::layout::RowMajor;
typedef typename std::conditional<isRowMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor>::type NormXLayout;
typedef typename std::
conditional<isRowMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor>::type LayoutA_;
typedef typename std::
conditional<isRowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor>::type LayoutB_;
using GemmBase = typename DefaultGemmUniversal<ElementA_,
LayoutA_,
cutlass::ComplexTransform::kNone,
kAlignmentA,
ElementB_,
LayoutB_,
cutlass::ComplexTransform::kNone,
kAlignmentB,
ElementC_,
LayoutOutput,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::FusedDistanceNNEpilogue<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementAccumulator,
typename EpilogueOutputOp::ElementT,
ElementAccumulator,
EpilogueOutputOp,
NormXLayout,
GemmBase::Epilogue::kElementsPerAccess>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = FusedDistanceNNPersistent<typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
GroupScheduleMode::kDeviceOnly>;
};
/*
* This configuration is used for float inputs with veclen(kAlignmentA/B) = 1,
* ideal threadblock tile shape is 32x128x16 for such cases as there is no
* registers spills for it.
*
*/
template <
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Number of stages used in the pipelined mainloop
int Stages,
/// data layout row/column major of inputs
bool isRowMajor>
struct FusedDistanceNNGemm<float, /// Element type for A matrix operand
1, /// Layout type (veclen) for A matrix operand
float, /// Element type for B matrix operand
1, /// Layout type (veclen) for B matrix operand
ElementC_,
ElementAccumulator,
EpilogueOutputOp,
Stages,
isRowMajor> {
// This struct is specialized for fp32/3xTF32
using ElementA_ = float;
using ElementB_ = float;
/// Threadblock-level tile size (concept: GemmShape)
// <- threadblock tile M = 32, N = 128, K = 16
// this shape has high occupancy and no register spills for veclen = 1.
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes tile size a warp will compute
// <- warp tile M = 32, N = 32, K = 16
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes the size of MMA op
// <- MMA Op tile M = 16, N = 8, K = 4
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>;
/// Operation performed by GEMM
using Operator = cutlass::arch::OpMultiplyAddFastF32;
// using Operator = cutlass::arch::OpMultiplyAdd; // this runs only 1xTF32 for float inputs
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using OperatorClass = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using ArchTag = cutlass::arch::Sm80;
// This code section describes how threadblocks are scheduled on GPU
/// Threadblock-level swizzling operator
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
/// data layout for final output matrix.
// we keep this same layout even for column major inputs
using LayoutOutput = cutlass::layout::RowMajor;
typedef typename std::conditional<isRowMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor>::type NormXLayout;
typedef typename std::
conditional<isRowMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor>::type LayoutA_;
typedef typename std::
conditional<isRowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor>::type LayoutB_;
using GemmBase = typename DefaultGemmUniversal<ElementA_,
LayoutA_,
cutlass::ComplexTransform::kNone,
1,
ElementB_,
LayoutB_,
cutlass::ComplexTransform::kNone,
1,
ElementC_,
LayoutOutput,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::FusedDistanceNNEpilogue<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementAccumulator,
typename EpilogueOutputOp::ElementT,
ElementAccumulator,
EpilogueOutputOp,
NormXLayout,
GemmBase::Epilogue::kElementsPerAccess>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = FusedDistanceNNPersistent<typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
GroupScheduleMode::kDeviceOnly>;
};
template <
/// Layout type for A matrix operand
int kAlignmentA,
/// Layout type for B matrix operand
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Number of stages used in the pipelined mainloop
int Stages,
/// data layout row/column major of inputs
bool isRowMajor>
struct FusedDistanceNNGemm<double,
kAlignmentA,
double,
kAlignmentB,
ElementC_,
ElementAccumulator,
EpilogueOutputOp,
Stages,
isRowMajor> {
// Threadblock-level tile size (concept: GemmShape)
// <- threadblock tile M = 64, N = 64, K = 16
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
// using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes tile size a warp will compute
// <- warp tile M = 32, N = 32, K = 16
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
// using WarpShape = cutlass::gemm::GemmShape<16, 32, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
// Operation performed by GEMM
using Operator = cutlass::arch::OpMultiplyAdd;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using OperatorClass = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using ArchTag = cutlass::arch::Sm80;
// This code section describes how threadblocks are scheduled on GPU
/// Threadblock-level swizzling operator
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
/// data layout for final output matrix.
// we keep this same layout even for column major inputs
using LayoutOutput = cutlass::layout::RowMajor;
typedef typename std::conditional<isRowMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor>::type NormXLayout;
typedef typename std::
conditional<isRowMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor>::type LayoutA_;
typedef typename std::
conditional<isRowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor>::type LayoutB_;
using GemmBase = typename DefaultGemmUniversal<double,
LayoutA_,
cutlass::ComplexTransform::kNone,
1,
double,
LayoutB_,
cutlass::ComplexTransform::kNone,
1,
ElementC_,
LayoutOutput,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::FusedDistanceNNEpilogue<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementC_,
typename EpilogueOutputOp::ElementT,
ElementC_,
EpilogueOutputOp,
NormXLayout,
GemmBase::Epilogue::kElementsPerAccess>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = FusedDistanceNNPersistent<typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
GroupScheduleMode::kDeviceOnly>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass | 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm> // std::min
#include <raft/distance/detail/pairwise_matrix/dispatch_layout.cuh> // dispatch_layout
#include <raft/distance/detail/pairwise_matrix/kernel_sm60.cuh> // pairwise_matrix_sm60_wrapper
#include <raft/linalg/contractions.cuh> // raft::linalg::Policy4x4
namespace raft::distance::detail {
template <typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT,
typename SM_compat_t>
pairwise_matrix_sm60_wrapper<OpT, IdxT, DataT, OutT, FinOpT> pairwise_matrix_sm60_get_wrapper(
OpT distance_op,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params,
SM_compat_t sm_compat_range)
{
int vec_len = determine_vec_len(params);
// f takes compile-time constants row_major and vec_len aligned and returns
// the corresponding kernel wrapper. The wrapper contains the launch
// parameters of the kernel: a pointer to the kernel function, grid size,
// block size, and shared memory size.
auto f = [&](auto row_major, auto vec_len_aligned) {
// row_major and vec_len are std::integral_constants of type bool and int
// respectively.
// To keep compile times in check, we only specialize on veclen > 1 when
// the inner loop is relatively cheap (< 5 flops).
constexpr int vec_len_op = distance_op.expensive_inner_loop ? 1 : vec_len_aligned();
// Prevent double, vec_len=4 combination (this is not supported)
constexpr int vec_len = std::min(vec_len_op, static_cast<int>(16 / sizeof(DataT)));
using RowPolicy = typename raft::linalg::Policy4x4<DataT, vec_len>::Policy;
using ColPolicy = typename raft::linalg::Policy4x4<DataT, vec_len>::ColPolicy;
using Policy = typename std::conditional<row_major(), RowPolicy, ColPolicy>::type;
auto wrapper =
make_pairwise_matrix_sm60_wrapper<Policy, row_major()>(distance_op, params, sm_compat_range);
return wrapper;
};
// Dispatch_layout calls f with appropriate compile time constants based on
// the runtime values of params.is_row_major and vec_len.
return dispatch_layout(params.is_row_major, vec_len, f);
}
template <typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT,
typename SM_compat_t>
void pairwise_matrix_sm60_dispatch(OpT distance_op,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params,
SM_compat_t sm_compat_range,
cudaStream_t stream)
{
auto wrapper = pairwise_matrix_sm60_get_wrapper(distance_op, params, sm_compat_range);
wrapper.launch(distance_op, params, stream);
}
} // namespace raft::distance::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_matrix/dispatch_layout.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm> // std::min
#include <cstdint> // size_t
#include <raft/core/error.hpp> // RAFT_EXPECTS
#include <raft/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
#include <type_traits> // std::integral_constant
namespace raft::distance::detail {
/**
* @brief: Computes minimal common alignment of the rows in a 2D array in bytes
*
* The 2D matrix `x` is assumed to be row-major. This function computes the
* minimal alignment in bytes of the first elements of each row.
* Output can be 16, 8, 4, 2, 1.
*
* @param x Base pointer of row-major input matrix
* @param stride Stride in number of element between consecutive rows.
*/
template <typename DataT>
size_t alignment_of_2d_array(const DataT* x, size_t stride)
{
auto base = reinterpret_cast<uintptr_t>(x);
size_t stride_bytes = sizeof(DataT) * stride;
for (int align = 16; align >= 0; align /= 2) {
bool base_aligned = base % align == 0;
bool stride_aligned = stride_bytes % align == 0;
if (base_aligned && stride_aligned) { return align; }
}
return 1;
}
/**
* @brief: Computes the vec_len parameter kernel policy parameter
*
* @param params Kernel parameters
*/
template <typename IdxT, typename DataT, typename OutT, typename FinOpT>
int determine_vec_len(pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params)
{
size_t align_x = alignment_of_2d_array(params.x, params.ldx);
size_t align_y = alignment_of_2d_array(params.y, params.ldy);
size_t byte_alignment = min(align_x, align_y);
// Since alignment is in bytes, it could be smaller than sizeof(DataT).
// Handle this (unlikely) case here.
RAFT_EXPECTS(sizeof(DataT) <= byte_alignment,
"Input matrix must be aligned to size of elements.");
// Compute number of elements that can be loaded in one instruction
// without causing misalignent errors.
int vec_len_aligned = (byte_alignment % sizeof(DataT) == 0) ? byte_alignment / sizeof(DataT) : 1;
// In the future, pairwise_matrix might support `int8_t` input. In that case,
// byte_alignment / sizeof(DataT) might exceed 4. We maximize at 4 here, to
// prevent adding more cases in dispatch_layout below (which are expensive to
// compile).
vec_len_aligned = std::min(vec_len_aligned, 4);
return vec_len_aligned;
}
template <int n>
using vec_len_constant = std::integral_constant<int, n>;
/**
* @brief: Converts run-time arguments to compile-time arguments
*
* Converts run-time arguments row_major and vec_len to compile-time arguments
* and dispatches a lambda f with these compile-time arguments.
*
* This is equivalent to copying and pasting the lambda function `f` in each of
* the switch case statements.
*
* @tparam F Type of lambda f.
* @param row_major Boolean indicating whether input arrays have row-major layout.
* @param vec_len Integer value 1, 2, or 4 specifying the Veclen template parameter of
* the KernelPolicy.
* @param f Lambda that takes two std::integral_constant parameters representing
* row_major and vec_len.
*/
template <typename F>
auto dispatch_layout(bool row_major, int vec_len, F&& f)
{
if (row_major) {
switch (vec_len) {
case 4: return f(std::true_type(), vec_len_constant<4>());
case 2: return f(std::true_type(), vec_len_constant<2>());
default: return f(std::true_type(), vec_len_constant<1>());
}
} else {
switch (vec_len) {
case 4: return f(std::false_type(), vec_len_constant<4>());
case 2: return f(std::false_type(), vec_len_constant<2>());
default: return f(std::false_type(), vec_len_constant<1>());
}
}
}
}; // namespace raft::distance::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_matrix/dispatch.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "dispatch-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "dispatch-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_matrix/dispatch-ext.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/distance/detail/distance_ops/all_ops.cuh> // ops::*
#include <raft/distance/detail/distance_ops/cutlass.cuh> // ops::has_cutlass_op
#include <raft/distance/detail/kernels/rbf_fin_op.cuh> // rbf_fin_op
#include <raft/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace raft::distance::detail {
template <typename OpT,
typename DataT,
typename AccT,
typename OutT,
typename FinOpT,
typename IdxT = int>
void pairwise_matrix_dispatch(OpT distance_op,
IdxT m,
IdxT n,
IdxT k,
const DataT* x,
const DataT* y,
const DataT* x_norm,
const DataT* y_norm,
OutT* out,
FinOpT fin_op,
cudaStream_t stream,
bool is_row_major) RAFT_EXPLICIT;
}; // namespace raft::distance::detail
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_distance_detail_pairwise_matrix_dispatch( \
OpT, DataT, AccT, OutT, FinOpT, IdxT) \
extern template void raft::distance::detail:: \
pairwise_matrix_dispatch<OpT<DataT, AccT, IdxT>, DataT, AccT, OutT, FinOpT, IdxT>( \
OpT<DataT, AccT, IdxT> distance_op, \
IdxT m, \
IdxT n, \
IdxT k, \
const DataT* x, \
const DataT* y, \
const DataT* x_norm, \
const DataT* y_norm, \
OutT* out, \
FinOpT fin_op, \
cudaStream_t stream, \
bool is_row_major)
/*
* Hierarchy of instantiations:
*
* This file defines extern template instantiations of the distance kernels. The
* instantiation of the public API is handled in raft/distance/distance-ext.cuh.
*
* After adding an instance here, make sure to also add the instance there.
*/
// The following two instances are used in the RBF kernel object. Note the use of int64_t for the
// index type.
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l2_unexp_distance_op,
float,
float,
float,
raft::distance::kernels::detail::rbf_fin_op<float>,
int64_t);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l2_unexp_distance_op,
double,
double,
double,
raft::distance::kernels::detail::rbf_fin_op<double>,
int64_t);
// Rest of instances
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::canberra_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::canberra_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::correlation_distance_op,
float,
float,
float,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::correlation_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::cosine_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::cosine_distance_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::hamming_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::hamming_distance_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::hellinger_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::hellinger_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::jensen_shannon_distance_op,
float,
float,
float,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::jensen_shannon_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::kl_divergence_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::kl_divergence_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l1_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l1_distance_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l2_exp_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l2_exp_distance_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l2_unexp_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l2_unexp_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l_inf_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l_inf_distance_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::lp_unexp_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::lp_unexp_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::russel_rao_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::russel_rao_distance_op,
double,
double,
double,
raft::identity_op,
int);
#undef instantiate_raft_distance_detail_pairwise_matrix_dispatch
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_matrix/dispatch-inl.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/* This file has two responsibilities:
*
* 1. Dispatch to the correct implementation of a kernel based on the
* architecture of the device on which the kernel will be launched. For
* instance, the cosine distance has a CUTLASS-based implementation that can
* be used on SM80+ and the normal implementation that is used on older
* architectures.
*
* 2. Provide concise function templates that can be instantiated in
* src/distance/detail/pairwise_matrix/. Previously,
* raft::distance::detail::distance was instantiated. The function
* necessarily required a large set of include files, which slowed down the
* build. The raft::distance::detail::pairwise_matrix_arch_dispatch functions
* do not require as large an include files set, which speeds up the build.
*/
#include <raft/distance/detail/distance_ops/cutlass.cuh> // ops::has_cutlass_op
#include <raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh> // dispatch_sm60
#include <raft/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
#include <raft/util/arch.cuh> // raft::util::arch::SM_*
// NOTE: to minimize compile times, we do not include dispatch_sm80.cuh.
// Including dispatch_sm80.cuh can slow down compile times (due to CUTLASS).
// Therefore, it is the including file's responsibility to include the correct
// dispatch_smXX.cuh headers, as is done in raft/distance/detail/distance.cuh
// and src/distance/detail/pairwise_matrix/dispatch_*.cu.
namespace raft::distance::detail {
// This forward-declaration ensures that we do not need to include
// dispatch_sm80.cuh if we are not calling it in practice. This makes compiling
// all the non-CUTLASS based distance instantiations faster. For CUTLASS-based
// distances, dispatch_sm80.cuh has to be included by the file including this
// file.
template <typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT,
typename SM_compat_t>
void pairwise_matrix_sm80_dispatch(OpT,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT>,
SM_compat_t,
cudaStream_t);
template <typename OpT,
typename DataT,
typename AccT,
typename OutT,
typename FinOpT,
typename IdxT = int>
void pairwise_matrix_dispatch(OpT distance_op,
IdxT m,
IdxT n,
IdxT k,
const DataT* x,
const DataT* y,
const DataT* x_norm,
const DataT* y_norm,
OutT* out,
FinOpT fin_op,
cudaStream_t stream,
bool is_row_major)
{
// Create kernel parameter struct. Flip x and y if column major.
IdxT ldx = is_row_major ? k : m;
IdxT ldy = is_row_major ? k : n;
IdxT ld_out = is_row_major ? n : m;
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params{
m, n, k, ldx, ldy, ld_out, x, y, x_norm, y_norm, out, fin_op, is_row_major};
if (!params.is_row_major) { params.flip_x_and_y(); }
// Dispatch rule:
// - execute CUTLASS-based kernel on SM_80 and above
// - execute normal kernel below SM_80
namespace arch = raft::util::arch;
constexpr bool cutlass_op_unavailable = !ops::has_cutlass_op<OpT>();
if constexpr (cutlass_op_unavailable) {
// Always execute legacy kernels when no cutlass op is available
auto any_range = arch::SM_range(arch::SM_min(), arch::SM_future());
pairwise_matrix_sm60_dispatch(distance_op, params, any_range, stream);
} else {
auto cutlass_range = arch::SM_range(arch::SM_80(), arch::SM_future());
auto legacy_range = arch::SM_range(arch::SM_min(), arch::SM_80());
// Get pointer to SM60 kernel to determine the best compute architecture
// out of all for which the kernel was compiled for that matches closely
// to the current device. Other methods to determine the architecture (that do not
// require a pointer) can be error prone. See:
// https://github.com/NVIDIA/cub/issues/545
auto sm60_wrapper = pairwise_matrix_sm60_get_wrapper(distance_op, params, legacy_range);
void* kernel_ptr = reinterpret_cast<void*>(sm60_wrapper.kernel_ptr);
auto runtime_arch = arch::kernel_virtual_arch(kernel_ptr);
if (cutlass_range.contains(runtime_arch)) {
// If device is SM_80 or later, use CUTLASS-based kernel.
pairwise_matrix_sm80_dispatch(distance_op, params, cutlass_range, stream);
} else {
// Reuse kernel wrapper that we obtained above. This avoids performing the
// dispatch twice.
sm60_wrapper.launch(distance_op, params, stream);
}
}
}
}; // namespace raft::distance::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_matrix/params.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft::distance::detail {
template <typename IdxT, typename DataT, typename OutT, typename FinOpT>
struct pairwise_matrix_params {
IdxT m;
IdxT n;
IdxT k;
IdxT ldx;
IdxT ldy;
IdxT ld_out;
const DataT* x;
const DataT* y;
const DataT* x_norm;
const DataT* y_norm;
OutT* out;
FinOpT fin_op;
bool is_row_major;
/// @brief: Flips the x and y input and corresponding sizes
void flip_x_and_y()
{
// Flip m, n; ldx, ldy; x, y; x_norm, y_norm.
std::swap(m, n);
std::swap(ldx, ldy);
std::swap(x, y);
std::swap(x_norm, y_norm);
}
};
} // namespace raft::distance::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_matrix/dispatch_sm80.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm> // std::min
#include <raft/distance/detail/pairwise_distance_cutlass_base.cuh> // cutlassDistanceKernel
#include <raft/distance/detail/pairwise_matrix/dispatch_layout.cuh> // dispatch_layout
namespace raft::distance::detail {
template <typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT,
typename SM_compat_t>
void pairwise_matrix_sm80_dispatch(OpT distance_op,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params,
SM_compat_t sm_compat_range,
cudaStream_t stream)
{
int vec_len = determine_vec_len(params);
// f takes compile-time constants row_major and vec_len aligned and runs the
// corresponding cutlass launch code.
auto f = [&](auto row_major, auto vec_len_aligned) {
// row_major and vec_len are std::integral_constants of type bool and int
// respectively.
// Prevent double, vec_len=4 combination (this is not supported)
constexpr int vec_len = std::min(vec_len_aligned(), static_cast<int>(16 / sizeof(DataT)));
using AccT = typename OpT::AccT;
cutlassDistanceKernel<DataT, AccT, OutT, IdxT, vec_len, FinOpT, OpT, row_major()>(params.x,
params.y,
params.x_norm,
params.y_norm,
params.m,
params.n,
params.k,
params.ldx,
params.ldy,
params.ld_out,
params.out,
params.fin_op,
distance_op,
stream);
};
// Dispatch_layout calls f with appropriate compile time constants based on
// the runtime values of params.is_row_major and vec_len.
dispatch_layout(params.is_row_major, vec_len, f);
}
}; // namespace raft::distance::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/pairwise_matrix/kernel_sm60.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert> // assert
#include <raft/core/operators.hpp> // raft::void_op
#include <raft/distance/detail/pairwise_distance_base.cuh> // PairwiseDistances
#include <raft/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
#include <raft/util/arch.cuh> // raft::util::arch::SM_compute_arch
namespace raft::distance::detail {
template <typename Policy,
bool row_major,
typename SM_compat_t,
typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT>
__launch_bounds__(Policy::Nthreads, 2) RAFT_KERNEL
pairwise_matrix_kernel(OpT distance_op, pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params)
{
// Early exit to minimize the size of the kernel when it is not supposed to be compiled.
constexpr SM_compat_t sm_compat_range{};
if constexpr (!sm_compat_range.contains(raft::util::arch::SM_compute_arch())) {
assert(false);
return;
}
extern __shared__ char smem[];
// The epilog is already provided by distance_op. Do not provide additional
// epilogs.
auto epilog_op = raft::void_op();
// No support for row_epilog_op.
auto row_epilog_op = raft::void_op();
// Always write output
constexpr bool write_out = true;
constexpr bool use_norms = distance_op.use_norms;
PairwiseDistances<DataT,
OutT,
IdxT,
Policy,
decltype(distance_op),
decltype(epilog_op),
decltype(params.fin_op),
decltype(row_epilog_op),
row_major,
write_out>
obj(params.x,
params.y,
params.m,
params.n,
params.k,
params.ldx,
params.ldy,
params.ld_out,
params.x_norm,
params.y_norm,
params.out,
smem,
distance_op,
epilog_op,
params.fin_op,
row_epilog_op);
obj.run();
}
// The type of a pointer to the pairwise matrix kernel. The following template
// arguments are type-erased:
//
// - The kernel policy
// - row_major
// - SM_compat_t
template <typename OpT, typename IdxT, typename DataT, typename OutT, typename FinOpT>
using pairwise_matrix_kernel_t = void (*)(OpT, pairwise_matrix_params<IdxT, DataT, OutT, FinOpT>);
// A wrapper for the pairwise matrix kernel launch. Includes kernel launch
// parameters.
template <typename OpT, typename IdxT, typename DataT, typename OutT, typename FinOpT>
struct pairwise_matrix_sm60_wrapper {
dim3 grid;
dim3 block;
int smem_size;
pairwise_matrix_kernel_t<OpT, IdxT, DataT, OutT, FinOpT> kernel_ptr;
void launch(OpT distance_op,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params,
cudaStream_t stream)
{
kernel_ptr<<<grid, block, smem_size, stream>>>(distance_op, params);
RAFT_CUDA_TRY(cudaGetLastError());
}
};
/** @brief: Create kernel launch wrapper for pairwise matrix kernel
*
* This can be used to type-erase the kernel execution policy, row_major, and SM
* compatibility range.
*
* @tparam Policy: Kernel execution policy
* @tparam row_major: Indicates whether input matrices are row major
* @tparam OpT: Type of distance operation
* @tparam IdxT: Index type
* @tparam DataT: Data type
* @tparam OutT: Output data type
* @tparam FinOpT: Final operation type
* @tparam SM_compat_t: Type of the SM architecture compatibility
*
* @param distance_op: Distance operation
* @param params: Parameters
* @param sm_compat_range: Which SM architectures to compile for.
*/
template <typename Policy,
bool row_major,
typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT,
typename SM_compat_t>
pairwise_matrix_sm60_wrapper<OpT, IdxT, DataT, OutT, FinOpT> make_pairwise_matrix_sm60_wrapper(
OpT distance_op,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params,
SM_compat_t sm_compat_range)
{
dim3 block(Policy::Nthreads);
// Use ::template to disambiguate (See:
// https://en.cppreference.com/w/cpp/language/dependent_name)
int smem_size = OpT::template shared_mem_size<Policy>();
// Obtain function pointer to kernel
auto kernel =
pairwise_matrix_kernel<Policy, row_major, SM_compat_t, OpT, IdxT, DataT, OutT, FinOpT>;
dim3 grid = launchConfigGenerator<Policy>(params.m, params.n, smem_size, kernel);
return pairwise_matrix_sm60_wrapper<OpT, IdxT, DataT, OutT, FinOpT>{
grid, block, smem_size, kernel};
}
}; // namespace raft::distance::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/kernels/kernel_matrices.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "gram_matrix.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/detail/kernels/rbf_fin_op.cuh>
#include <raft/distance/distance.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/sparse/linalg/norm.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft::distance::kernels::detail {
/** Epiloge function for polynomial kernel without padding.
* Calculates output = (gain*in + offset)^exponent
* @param inout device vector in column major format, size [len]
* @param len array length
* @param exponent
* @param gain
* @param offset
*/
template <typename math_t, typename exp_t>
RAFT_KERNEL polynomial_kernel_nopad(
math_t* inout, size_t len, exp_t exponent, math_t gain, math_t offset)
{
for (size_t tid = threadIdx.x + blockIdx.x * blockDim.x; tid < len;
tid += blockDim.x * gridDim.x) {
inout[tid] = pow(gain * inout[tid] + offset, exponent);
}
}
/** Epiloge function for polynomial kernel with padding.
* Calculates output = (gain*input + offset)^exponent
* @param inout device vector in column major format, size [ld * cols]
* @param ld leading dimension of the inout buffer
* @param rows number of rows (rows <= ld)
* @param cols number of columns
* @param exponent
* @param gain
* @param offset
*/
template <typename math_t, typename exp_t>
RAFT_KERNEL polynomial_kernel(
math_t* inout, int ld, int rows, int cols, exp_t exponent, math_t gain, math_t offset)
{
for (size_t tidy = threadIdx.y + blockIdx.y * blockDim.y; tidy < cols;
tidy += blockDim.y * gridDim.y)
for (size_t tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < rows;
tidx += blockDim.x * gridDim.x) {
inout[tidx + tidy * ld] = pow(gain * inout[tidx + tidy * ld] + offset, exponent);
}
}
/** Epiloge function for tanh kernel without padding.
* Calculates output = tanh(gain*input + offset)
* @param inout device vector, size [len]
* @param len length of the input vector
* @param gain
* @param offset
*/
template <typename math_t>
RAFT_KERNEL tanh_kernel_nopad(math_t* inout, size_t len, math_t gain, math_t offset)
{
for (size_t tid = threadIdx.x + blockIdx.x * blockDim.x; tid < len;
tid += blockDim.x * gridDim.x) {
inout[tid] = tanh(gain * inout[tid] + offset);
}
}
/** Epiloge function for tanh kernel without padding.
* Calculates output = tanh(gain*input + offset)
* @param inout device vector in column major format, size [ld * cols]
* @param ld leading dimension of the inout buffer
* @param rows number of rows (rows <= ld)
* @param cols number of columns
* @param gain
* @param offset
*/
template <typename math_t>
RAFT_KERNEL tanh_kernel(math_t* inout, int ld, int rows, int cols, math_t gain, math_t offset)
{
for (size_t tidy = threadIdx.y + blockIdx.y * blockDim.y; tidy < cols;
tidy += blockDim.y * gridDim.y)
for (size_t tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < rows;
tidx += blockDim.x * gridDim.x) {
inout[tidx + tidy * ld] = tanh(gain * inout[tidx + tidy * ld] + offset);
}
}
/** Epiloge function for rbf kernel using expansion.
*
* Calculates output_ij = exp(-gain * (norm_x_i + norm_y_j - 2*input_ij));
*
* Intended usage
* - input is the product of two matrices X and Y input_ij = sum_k X_ik * Y_jk
* - norm_x_i = l2_norm(x_i), where x_i is the i-th row of matrix X
* - norm_y_j = l2_norm(y_j), where y_j is the j-th row of matrix Y
*
* @param inout device vector in column major format, size [ld * cols]
* @param ld leading dimension of the inout buffer
* @param rows number of rows (rows <= ld)
* @param cols number of columns
* @param norm_x l2-norm of X's rows
* @param norm_y l2-norm of Y's rows
* @param gain
*/
template <typename math_t>
RAFT_KERNEL rbf_kernel_expanded(
math_t* inout, int ld, int rows, int cols, math_t* norm_x, math_t* norm_y, math_t gain)
{
for (size_t tidy = threadIdx.y + blockIdx.y * blockDim.y; tidy < cols;
tidy += blockDim.y * gridDim.y) {
math_t norm_y_val = norm_y[tidy];
for (size_t tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < rows;
tidx += blockDim.x * gridDim.x) {
inout[tidx + tidy * ld] =
exp(-1.0 * gain * (norm_x[tidx] + norm_y_val - inout[tidx + tidy * ld] * 2));
}
}
}
namespace {
std::tuple<dim3, dim3> generateLaunchConfig2dElementwiseOp(int n1, int n2)
{
dim3 block_shape = dim3(32, 4);
const int num_blocks_x = raft::ceildiv(n1, 32);
const int num_blocks_y = std::min(raft::ceildiv(n2, 32), (1 << 16) - 1);
dim3 grid_shape = dim3(num_blocks_x, num_blocks_y);
return std::make_tuple(grid_shape, block_shape);
}
} // namespace
/**
* Create a kernel matrix using polynomial kernel function.
*/
template <typename math_t, typename exp_t>
class PolynomialKernel : public GramMatrixBase<math_t> {
exp_t exponent;
math_t gain;
math_t offset;
void applyKernel(
math_t* inout, int ld, int rows, int cols, bool is_row_major, cudaStream_t stream)
{
const int n_minor = is_row_major ? cols : rows;
if (ld == n_minor) {
polynomial_kernel_nopad<<<raft::ceildiv<size_t>((size_t)rows * cols, 128), 128, 0, stream>>>(
inout, rows * cols, exponent, gain, offset);
} else {
int n1 = is_row_major ? cols : rows;
int n2 = is_row_major ? rows : cols;
auto [grid_shape, block_shape] = generateLaunchConfig2dElementwiseOp(n1, n2);
polynomial_kernel<<<grid_shape, block_shape, 0, stream>>>(
inout, ld, n1, n2, exponent, gain, offset);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
public:
/**
* Constructs a polynomial kernel object.
* It evaluates the kernel matrix using the following formula:
* K_ij = (gain*<x1_i, x2_k> + offset)^exponent
*
* @tparam math_t floating point type
* @tparam exp_t type of exponent
* @param exponent
* @param gain
* @param offset
*/
PolynomialKernel(exp_t exponent, math_t gain, math_t offset)
: GramMatrixBase<math_t>(), exponent(exponent), gain(gain), offset(offset)
{
}
[[deprecated]] PolynomialKernel(exp_t exponent, math_t gain, math_t offset, cublasHandle_t handle)
: GramMatrixBase<math_t>(handle), exponent(exponent), gain(gain), offset(offset)
{
}
/** Evaluate kernel matrix using polynomial kernel.
*
* output[i,k] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using polynomial kernel.
*
* output[i,k] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using polynomial kernel.
*
* output[i,k] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate the Gram matrix using the legacy interface.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1 (usually it is n1)
* @param ld2 leading dimension of x2 (usually it is n2)
* @param ld_out leading dimension of out (usually it is n1)
*/
[[deprecated]] void evaluate(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1,
int ld2,
int ld_out)
{
ASSERT(GramMatrixBase<math_t>::legacy_interface,
"Legacy interface can only be used with legacy ctor.");
GramMatrixBase<math_t>::linear(
x1, n1, n_cols, x2, n2, out, is_row_major, stream, ld1, ld2, ld_out);
applyKernel(out, ld_out, n1, n2, is_row_major, stream);
}
};
/**
* Create a kernel matrix using tanh kernel function.
*/
template <typename math_t>
class TanhKernel : public GramMatrixBase<math_t> {
math_t gain, offset;
void applyKernel(
math_t* inout, int ld, int rows, int cols, bool is_row_major, cudaStream_t stream)
{
const int n_minor = is_row_major ? cols : rows;
if (ld == n_minor) {
tanh_kernel_nopad<<<raft::ceildiv<size_t>((size_t)rows * cols, 128), 128, 0, stream>>>(
inout, rows * cols, gain, offset);
} else {
int n1 = is_row_major ? cols : rows;
int n2 = is_row_major ? rows : cols;
auto [grid_shape, block_shape] = generateLaunchConfig2dElementwiseOp(n1, n2);
tanh_kernel<<<grid_shape, block_shape, 0, stream>>>(inout, ld, n1, n2, gain, offset);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
public:
/**
* Constructs a tanh kernel object.
* It evaluates the kernel matrix using the following formula:
* K_ij = tanh(gain*<x1_i, x2_k> + offset)
*
* @tparam math_t floating point type
* @param gain
* @param offset
*/
TanhKernel(math_t gain, math_t offset) : GramMatrixBase<math_t>(), gain(gain), offset(offset) {}
[[deprecated]] TanhKernel(math_t gain, math_t offset, cublasHandle_t handle)
: GramMatrixBase<math_t>(handle), gain(gain), offset(offset)
{
}
/** Evaluate kernel matrix using tanh kernel.
*
* output_[i + k*n1] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using tanh kernel.
*
* output_[i + k*n1] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using tanh kernel.
*
* output_[i + k*n1] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate the Gram matrix using the legacy interface.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1 (usually it is n1)
* @param ld2 leading dimension of x2 (usually it is n2)
* @param ld_out leading dimension of out (usually it is n1)
*/
[[deprecated]] void evaluate(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1,
int ld2,
int ld_out)
{
ASSERT(GramMatrixBase<math_t>::legacy_interface,
"Legacy interface can only be used with legacy ctor.");
GramMatrixBase<math_t>::linear(
x1, n1, n_cols, x2, n2, out, is_row_major, stream, ld1, ld2, ld_out);
applyKernel(out, ld_out, n1, n2, is_row_major, stream);
}
};
/**
* Create a kernel matrix using RBF kernel function.
*/
template <typename math_t>
class RBFKernel : public GramMatrixBase<math_t> {
math_t gain;
void applyKernel(math_t* inout,
int ld,
int rows,
int cols,
math_t* norm_x1,
math_t* norm_x2,
bool is_row_major,
cudaStream_t stream)
{
int n1 = is_row_major ? cols : rows;
int n2 = is_row_major ? rows : cols;
math_t* norm_n1 = is_row_major ? norm_x2 : norm_x1;
math_t* norm_n2 = is_row_major ? norm_x1 : norm_x2;
auto [grid_shape, block_shape] = generateLaunchConfig2dElementwiseOp(n1, n2);
rbf_kernel_expanded<<<grid_shape, block_shape, 0, stream>>>(
inout, ld, n1, n2, norm_n1, norm_n2, gain);
}
public:
/**
* Constructs a RBF kernel object.
* It evaluates the kernel matrix using the following formula:
* K_ij = exp(-gain*|x1_i- x2_k|^2)
*
* @tparam math_t floating point type
* @param gain
*/
RBFKernel(math_t gain) : GramMatrixBase<math_t>(), gain(gain) {}
[[deprecated]] RBFKernel(math_t gain, cublasHandle_t handle)
: GramMatrixBase<math_t>(handle), gain(gain)
{
}
void matrixRowNormL2(raft::resources const& handle,
dense_input_matrix_view_t<math_t> matrix,
math_t* target)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(matrix);
int minor = is_row_major ? matrix.extent(1) : matrix.extent(0);
int ld = is_row_major ? matrix.stride(0) : matrix.stride(1);
ASSERT(ld == minor, "RBF Kernel lazy rowNorm compute does not support ld parameter");
raft::linalg::rowNorm(target,
matrix.data_handle(),
matrix.extent(1),
matrix.extent(0),
raft::linalg::NormType::L2Norm,
is_row_major,
resource::get_cuda_stream(handle));
}
void matrixRowNormL2(raft::resources const& handle,
csr_input_matrix_view_t<math_t> matrix,
math_t* target)
{
auto matrix_structure = matrix.structure_view();
raft::sparse::linalg::rowNormCsr(handle,
matrix_structure.get_indptr().data(),
matrix.get_elements().data(),
matrix_structure.get_nnz(),
matrix_structure.get_n_rows(),
target,
raft::linalg::NormType::L2Norm);
}
/** Evaluate kernel matrix using RBF kernel.
*
* output_[i + k*n1] = exp(-gain*|x1_i - x2_k|^2),
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and | | euclidean distance.
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void evaluate(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
// lazy compute norms if not given
rmm::device_uvector<math_t> tmp_norm_x1(0, stream);
rmm::device_uvector<math_t> tmp_norm_x2(0, stream);
if (norm_x1 == nullptr) {
tmp_norm_x1.reserve(x1.extent(0), stream);
norm_x1 = tmp_norm_x1.data();
matrixRowNormL2(handle, x1, norm_x1);
}
if (norm_x2 == nullptr) {
tmp_norm_x2.reserve(x2.extent(0), stream);
norm_x2 = tmp_norm_x2.data();
matrixRowNormL2(handle, x2, norm_x2);
}
// compute L2expanded
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
norm_x1,
norm_x2,
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using RBF kernel.
*
* output_[i + k*n1] = exp(-gain*|x1_i - x2_k|^2),
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and | | euclidean distance.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
// lazy compute norms if not given
rmm::device_uvector<math_t> tmp_norm_x1(0, stream);
rmm::device_uvector<math_t> tmp_norm_x2(0, stream);
if (norm_x1 == nullptr) {
tmp_norm_x1.reserve(x1.structure_view().get_n_rows(), stream);
norm_x1 = tmp_norm_x1.data();
matrixRowNormL2(handle, x1, norm_x1);
}
if (norm_x2 == nullptr) {
tmp_norm_x2.reserve(x2.extent(0), stream);
norm_x2 = tmp_norm_x2.data();
matrixRowNormL2(handle, x2, norm_x2);
}
// compute L2expanded
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
norm_x1,
norm_x2,
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using RBF kernel.
*
* output_[i + k*n1] = exp(-gain*|x1_i - x2_k|^2),
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and | | euclidean distance.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
// lazy compute norms if not given
rmm::device_uvector<math_t> tmp_norm_x1(0, stream);
rmm::device_uvector<math_t> tmp_norm_x2(0, stream);
if (norm_x1 == nullptr) {
tmp_norm_x1.reserve(x1.structure_view().get_n_rows(), stream);
norm_x1 = tmp_norm_x1.data();
matrixRowNormL2(handle, x1, norm_x1);
}
if (norm_x2 == nullptr) {
tmp_norm_x2.reserve(x2.structure_view().get_n_rows(), stream);
norm_x2 = tmp_norm_x2.data();
matrixRowNormL2(handle, x2, norm_x2);
}
// compute L2expanded
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
norm_x1,
norm_x2,
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate the Gram matrix using the legacy interface.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1 (usually it is n1)
* @param ld2 leading dimension of x2 (usually it is n2)
* @param ld_out leading dimension of out (usually it is n1)
*/
[[deprecated]] void evaluate(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1,
int ld2,
int ld_out)
{
ASSERT(GramMatrixBase<math_t>::legacy_interface,
"Legacy interface can only be used with legacy ctor.");
int minor1 = is_row_major ? n_cols : n1;
int minor2 = is_row_major ? n_cols : n2;
int minor_out = is_row_major ? n2 : n1;
ASSERT(ld1 == minor1, "RBF Kernel distance does not support ld1 parameter");
ASSERT(ld2 == minor2, "RBF Kernel distance does not support ld2 parameter");
ASSERT(ld_out == minor_out, "RBF Kernel distance does not support ld_out parameter");
math_t gain = this->gain;
using index_t = int64_t;
rbf_fin_op fin_op{gain};
raft::resources handle;
resource::set_cuda_stream(handle, stream);
raft::distance::distance<raft::distance::DistanceType::L2Unexpanded,
math_t,
math_t,
math_t,
decltype(fin_op),
index_t>(handle,
const_cast<math_t*>(x1),
const_cast<math_t*>(x2),
out,
n1,
n2,
n_cols,
NULL,
0,
fin_op,
is_row_major);
}
};
}; // end namespace raft::distance::kernels::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/kernels/rbf_fin_op.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/*
* This file defines rbf_fin_op, which is used in GramMatrixBase.
*
* This struct has been moved to a separate file, so that it is cheap to include
* in distance/distance-ext.cuh, where an instance of raft::distance::distance
* with the rbf_fin_op is instantiated.
*
*/
#include <raft/core/math.hpp> // raft::exp
#include <raft/util/cuda_dev_essentials.cuh> // HD
namespace raft::distance::kernels::detail {
/** @brief: Final op for Gram matrix with RBF kernel.
*
* Calculates output = e^(-gain * in)
*
*/
template <typename OutT>
struct rbf_fin_op {
OutT gain;
explicit HD rbf_fin_op(OutT gain_) noexcept : gain(gain_) {}
template <typename... Args>
HDI OutT operator()(OutT d_val, Args... unused_args)
{
return raft::exp(-gain * d_val);
}
}; // struct rbf_fin_op
} // namespace raft::distance::kernels::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/kernels/kernel_factory.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "gram_matrix.cuh"
#include "kernel_matrices.cuh"
#include <raft/distance/distance_types.hpp>
#include <raft/util/cudart_utils.hpp>
namespace raft::distance::kernels::detail {
template <typename math_t>
class KernelFactory {
public:
static GramMatrixBase<math_t>* create(KernelParams params)
{
GramMatrixBase<math_t>* res;
// KernelParams is not templated, we convert the parameters to math_t here:
math_t coef0 = params.coef0;
math_t gamma = params.gamma;
switch (params.kernel) {
case LINEAR: res = new GramMatrixBase<math_t>(); break;
case POLYNOMIAL: res = new PolynomialKernel<math_t, int>(params.degree, gamma, coef0); break;
case TANH: res = new TanhKernel<math_t>(gamma, coef0); break;
case RBF: res = new RBFKernel<math_t>(gamma); break;
default: throw raft::exception("Kernel not implemented");
}
return res;
}
[[deprecated]] static GramMatrixBase<math_t>* create(KernelParams params, cublasHandle_t handle)
{
GramMatrixBase<math_t>* res;
// KernelParams is not templated, we convert the parameters to math_t here:
math_t coef0 = params.coef0;
math_t gamma = params.gamma;
switch (params.kernel) {
case LINEAR: res = new GramMatrixBase<math_t>(handle); break;
case POLYNOMIAL:
res = new PolynomialKernel<math_t, int>(params.degree, gamma, coef0, handle);
break;
case TANH: res = new TanhKernel<math_t>(gamma, coef0, handle); break;
case RBF: res = new RBFKernel<math_t>(gamma, handle); break;
default: throw raft::exception("Kernel not implemented");
}
return res;
}
};
}; // end namespace raft::distance::kernels::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/distance/detail/kernels/gram_matrix.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_csr_matrix.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance.cuh>
#include <raft/distance/distance_types.hpp>
// #include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/sparse/distance/distance.cuh>
#include <raft/sparse/linalg/spmm.cuh>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/gemm.cuh>
namespace raft::distance::kernels::detail {
template <typename math_t>
using dense_input_matrix_view_t = raft::device_matrix_view<const math_t, int, layout_stride>;
template <typename math_t>
using dense_output_matrix_view_t = raft::device_matrix_view<math_t, int, layout_stride>;
template <typename math_t>
using csr_input_matrix_view_t = raft::device_csr_matrix_view<const math_t, int, int, int>;
/**
* Base class for general Gram matrices
* A Gram matrix is the Hermitian matrix of inner probucts G_ik = <x_i, x_k>
* Here, the inner product is evaluated for all elements from vectors sets X1,
* and X2.
*
* To be more precise, on exit the output buffer will store:
* - if is_row_major == true: out[j+k*n1] = <x1_j, x2_k>,
* - if is_row_major == false: out[j*n2 + k] = <x1_j, x2_k>,
* where x1_j is the j-th vector from the x1 set and x2_k is the k-th vector
* from the x2 set.
*/
template <typename math_t>
class GramMatrixBase {
protected:
cublasHandle_t cublas_handle;
bool legacy_interface;
public:
GramMatrixBase() : legacy_interface(false){};
[[deprecated]] GramMatrixBase(cublasHandle_t cublas_handle)
: cublas_handle(cublas_handle), legacy_interface(true){};
virtual ~GramMatrixBase(){};
/** Convenience function to evaluate the Gram matrix for two vector sets.
* Vector sets are provided in Matrix format
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void operator()(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1 = nullptr,
math_t* norm_x2 = nullptr)
{
evaluate(handle, x1, x2, out, norm_x1, norm_x2);
}
/** Convenience function to evaluate the Gram matrix for two vector sets.
* Vector sets are provided in Matrix format
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void operator()(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1 = nullptr,
math_t* norm_x2 = nullptr)
{
evaluate(handle, x1, x2, out, norm_x1, norm_x2);
}
/** Convenience function to evaluate the Gram matrix for two vector sets.
* Vector sets are provided in Matrix format
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void operator()(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1 = nullptr,
math_t* norm_x2 = nullptr)
{
evaluate(handle, x1, x2, out, norm_x1, norm_x2);
}
// unfortunately, 'evaluate' cannot be templatized as it needs to be virtual
/** Evaluate the Gram matrix for two vector sets using simple dot product.
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
virtual void evaluate(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
linear(handle, x1, x2, out);
}
/** Evaluate the Gram matrix for two vector sets using simple dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
virtual void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
linear(handle, x1, x2, out);
}
/** Evaluate the Gram matrix for two vector sets using simple dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
virtual void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
linear(handle, x1, x2, out);
}
/** Evaluate the Gram matrix for two vector sets using simple dot product.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1 (usually it is n1)
* @param ld2 leading dimension of x2 (usually it is n2)
* @param ld_out leading dimension of out (usually it is n1)
*/
[[deprecated]] virtual void evaluate(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1,
int ld2,
int ld_out)
{
linear(x1, n1, n_cols, x2, n2, out, is_row_major, stream, ld1, ld2, ld_out);
}
/** Convenience function to evaluate the Gram matrix for two vector sets.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1
* @param ld2 leading dimension of x2
* @param ld_out leading dimension of out
*/
[[deprecated]] void operator()(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1 = 0,
int ld2 = 0,
int ld_out = 0)
{
ASSERT(legacy_interface, "Legacy interface can only be used with legacy ctor.");
if (ld1 <= 0) { ld1 = is_row_major ? n_cols : n1; }
if (ld2 <= 0) { ld2 = is_row_major ? n_cols : n2; }
if (ld_out <= 0) { ld_out = is_row_major ? n2 : n1; }
evaluate(x1, n1, n_cols, x2, n2, out, is_row_major, stream, ld1, ld2, ld_out);
}
protected:
/** Calculates the Gram matrix using simple dot product between vector sets.
*
* out = x1 * x2
*
* Can be used as a building block for more complex kernel functions.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1
* @param ld2 leading dimension of x2
* @param ld_out leading dimension of out
*/
[[deprecated]] void linear(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1,
int ld2,
int ld_out)
{
math_t alpha = 1.0;
math_t beta = 0.0;
if (is_row_major) {
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
n2,
n1,
n_cols,
&alpha,
x2,
ld2,
x1,
ld1,
&beta,
out,
ld_out,
stream));
} else {
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
n1,
n2,
n_cols,
&alpha,
x1,
ld1,
x2,
ld2,
&beta,
out,
ld_out,
stream));
}
}
protected:
bool get_is_row_major(dense_output_matrix_view_t<math_t> matrix)
{
return (matrix.stride(1) == 1);
}
bool get_is_row_major(dense_input_matrix_view_t<math_t> matrix)
{
return (matrix.stride(1) == 1);
}
bool get_is_col_major(dense_output_matrix_view_t<math_t> matrix)
{
return (matrix.stride(0) == 1);
}
bool get_is_col_major(dense_input_matrix_view_t<math_t> matrix)
{
return (matrix.stride(0) == 1);
}
/** Calculates the Gram matrix using simple dot product between vector sets.
*
* out = x1 * x2
*
* Can be used as a building block for more complex kernel functions.
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
*/
void linear(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out)
{
// check is_row_major consistency
bool is_row_major = get_is_row_major(x1) && get_is_row_major(x2) && get_is_row_major(out);
bool is_col_major = get_is_col_major(x1) && get_is_col_major(x2) && get_is_col_major(out);
ASSERT(is_row_major || is_col_major,
"GramMatrix leading dimensions for x1, x2 and out do not match");
// check dimensions
int n1 = out.extent(0);
int n2 = out.extent(1);
int n_cols = x1.extent(1);
ASSERT(x1.extent(0) == n1, "GramMatrix input matrix dimensions for x1 and out do not match");
ASSERT(x2.extent(0) == n2, "GramMatrix input matrix dimensions for x2 and out do not match");
ASSERT(x2.extent(1) == n_cols, "GramMatrix input matrix dimensions for x1 and x2 do not match");
// extract major stride
int ld1 = is_row_major ? x1.stride(0) : x1.stride(1);
int ld2 = is_row_major ? x2.stride(0) : x2.stride(1);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
math_t alpha = 1.0;
math_t beta = 0.0;
if (is_row_major) {
// #TODO: Use mdspan-based API when stride-capable
// https://github.com/rapidsai/raft/issues/875
raft::linalg::gemm(handle,
true,
false,
n2,
n1,
n_cols,
&alpha,
x2.data_handle(),
ld2,
x1.data_handle(),
ld1,
&beta,
out.data_handle(),
ld_out,
resource::get_cuda_stream(handle));
} else {
// #TODO: Use mdspan-based API when stride-capable
// https://github.com/rapidsai/raft/issues/875
raft::linalg::gemm(handle,
false,
true,
n1,
n2,
n_cols,
&alpha,
x1.data_handle(),
ld1,
x2.data_handle(),
ld2,
&beta,
out.data_handle(),
ld_out,
resource::get_cuda_stream(handle));
}
}
/** Calculates the Gram matrix using simple dot product between vector sets.
*
* out = x1 * x2
*
* Can be used as a building block for more complex kernel functions.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
*/
void linear(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out)
{
// check is_row_major consistency
bool is_row_major = get_is_row_major(x2) && get_is_row_major(out);
bool is_col_major = get_is_col_major(x2) && get_is_col_major(out);
ASSERT(is_row_major || is_col_major,
"GramMatrix leading dimensions for x2 and out do not match");
// check dimensions
auto x1_structure = x1.structure_view();
ASSERT(x1_structure.get_n_rows() == out.extent(0),
"GramMatrix input matrix dimensions for x1 and out do not match");
ASSERT(x2.extent(0) == out.extent(1),
"GramMatrix input matrix dimensions for x2 and out do not match");
ASSERT(x2.extent(1) == x1_structure.get_n_cols(),
"GramMatrix input matrix dimensions for x1 and x2 do not match");
math_t alpha = 1.0;
math_t beta = 0.0;
raft::sparse::linalg::spmm(handle, false, true, &alpha, x1, x2, &beta, out);
}
/** Calculates the Gram matrix using simple dot product between vector sets.
*
* out = x1 * x2
*
* Can be used as a building block for more complex kernel functions.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
*/
void linear(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out)
{
// check layout consistency (w.r.t. strides a matrix might be both row & col major)
bool is_row_major_nopad = get_is_row_major(out) && out.stride(0) == out.extent(1);
bool is_col_major_nopad = get_is_col_major(out) && out.stride(1) == out.extent(0);
ASSERT(is_row_major_nopad || is_col_major_nopad,
"Sparse linear Kernel distance does not support ld_out parameter");
// switch a,b based on is_row_major
if (is_col_major_nopad) {
auto out_row_major = raft::make_device_matrix_view<math_t, int, raft::row_major>(
out.data_handle(), out.extent(1), out.extent(0));
raft::sparse::distance::pairwise_distance(
handle, x2, x1, out_row_major, raft::distance::DistanceType::InnerProduct, 0.0);
} else {
auto out_row_major = raft::make_device_matrix_view<math_t, int, raft::row_major>(
out.data_handle(), out.extent(0), out.extent(1));
raft::sparse::distance::pairwise_distance(
handle, x1, x2, out_row_major, raft::distance::DistanceType::InnerProduct, 0.0);
}
}
};
}; // end namespace raft::distance::kernels::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/specializations/distance.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/distance | rapidsai_public_repos/raft/cpp/include/raft/distance/specializations/fused_l2_nn_min.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ball_cover-ext.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // uint32_t
#include <raft/distance/distance_types.hpp> // raft::distance::DistanceType
#include <raft/neighbors/ball_cover_types.hpp> // BallCoverIndex
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace raft::neighbors::ball_cover {
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void build_index(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index) RAFT_EXPLICIT;
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void all_knn_query(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
int_t k,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0) RAFT_EXPLICIT;
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void all_knn_query(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
raft::device_matrix_view<idx_t, matrix_idx_t, row_major> inds,
raft::device_matrix_view<value_t, matrix_idx_t, row_major> dists,
int_t k,
bool perform_post_filtering = true,
float weight = 1.0) RAFT_EXPLICIT;
template <typename idx_t, typename value_t, typename int_t>
void knn_query(raft::resources const& handle,
const BallCoverIndex<idx_t, value_t, int_t>& index,
int_t k,
const value_t* query,
int_t n_query_pts,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0) RAFT_EXPLICIT;
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void knn_query(raft::resources const& handle,
const BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
raft::device_matrix_view<const value_t, matrix_idx_t, row_major> query,
raft::device_matrix_view<idx_t, matrix_idx_t, row_major> inds,
raft::device_matrix_view<value_t, matrix_idx_t, row_major> dists,
int_t k,
bool perform_post_filtering = true,
float weight = 1.0) RAFT_EXPLICIT;
} // namespace raft::neighbors::ball_cover
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_neighbors_ball_cover(idx_t, value_t, int_t, matrix_idx_t) \
extern template void \
raft::neighbors::ball_cover::build_index<idx_t, value_t, int_t, matrix_idx_t>( \
raft::resources const& handle, \
raft::neighbors::ball_cover::BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index); \
\
extern template void \
raft::neighbors::ball_cover::all_knn_query<idx_t, value_t, int_t, matrix_idx_t>( \
raft::resources const& handle, \
raft::neighbors::ball_cover::BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index, \
int_t k, \
idx_t* inds, \
value_t* dists, \
bool perform_post_filtering, \
float weight); \
\
extern template void \
raft::neighbors::ball_cover::all_knn_query<idx_t, value_t, int_t, matrix_idx_t>( \
raft::resources const& handle, \
raft::neighbors::ball_cover::BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index, \
raft::device_matrix_view<idx_t, matrix_idx_t, row_major> inds, \
raft::device_matrix_view<value_t, matrix_idx_t, row_major> dists, \
int_t k, \
bool perform_post_filtering, \
float weight); \
\
extern template void raft::neighbors::ball_cover::knn_query<idx_t, value_t, int_t>( \
raft::resources const& handle, \
const raft::neighbors::ball_cover::BallCoverIndex<idx_t, value_t, int_t>& index, \
int_t k, \
const value_t* query, \
int_t n_query_pts, \
idx_t* inds, \
value_t* dists, \
bool perform_post_filtering, \
float weight); \
\
extern template void \
raft::neighbors::ball_cover::knn_query<idx_t, value_t, int_t, matrix_idx_t>( \
raft::resources const& handle, \
const raft::neighbors::ball_cover::BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index, \
raft::device_matrix_view<const value_t, matrix_idx_t, row_major> query, \
raft::device_matrix_view<idx_t, matrix_idx_t, row_major> inds, \
raft::device_matrix_view<value_t, matrix_idx_t, row_major> dists, \
int_t k, \
bool perform_post_filtering, \
float weight);
instantiate_raft_neighbors_ball_cover(int64_t, float, uint32_t, uint32_t);
#undef instantiate_raft_neighbors_ball_cover
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/refine-inl.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/neighbors/detail/refine.cuh>
#include <raft/spatial/knn/detail/ann_utils.cuh>
namespace raft::neighbors {
/**
* @defgroup ann_refine Approximate Nearest Neighbors Refinement
* @{
*/
/**
* @brief Refine nearest neighbor search.
*
* Refinement is an operation that follows an approximate NN search. The approximate search has
* already selected n_candidates neighbor candidates for each query. We narrow it down to k
* neighbors. For each query, we calculate the exact distance between the query and its
* n_candidates neighbor candidate, and select the k nearest ones.
*
* The k nearest neighbors and distances are returned.
*
* Example usage
* @code{.cpp}
* using namespace raft::neighbors;
* // use default index parameters
* ivf_pq::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = ivf_pq::build(handle, index_params, dataset, N, D);
* // use default search parameters
* ivf_pq::search_params search_params;
* // search m = 4 * k nearest neighbours for each of the N queries
* ivf_pq::search(handle, search_params, index, queries, N, 4 * k, neighbor_candidates,
* out_dists_tmp);
* // refine it to the k nearest one
* refine(handle, dataset, queries, neighbor_candidates, out_indices, out_dists,
* index.metric());
* @endcode
*
*
* @param[in] handle the raft handle
* @param[in] dataset device matrix that stores the dataset [n_rows, dims]
* @param[in] queries device matrix of the queries [n_queris, dims]
* @param[in] neighbor_candidates indices of candidate vectors [n_queries, n_candidates], where
* n_candidates >= k
* @param[out] indices device matrix that stores the refined indices [n_queries, k]
* @param[out] distances device matrix that stores the refined distances [n_queries, k]
* @param[in] metric distance metric to use. Euclidean (L2) is used by default
*/
template <typename idx_t, typename data_t, typename distance_t, typename matrix_idx>
void refine(raft::resources const& handle,
raft::device_matrix_view<const data_t, matrix_idx, row_major> dataset,
raft::device_matrix_view<const data_t, matrix_idx, row_major> queries,
raft::device_matrix_view<const idx_t, matrix_idx, row_major> neighbor_candidates,
raft::device_matrix_view<idx_t, matrix_idx, row_major> indices,
raft::device_matrix_view<distance_t, matrix_idx, row_major> distances,
distance::DistanceType metric = distance::DistanceType::L2Unexpanded)
{
detail::refine_device(handle, dataset, queries, neighbor_candidates, indices, distances, metric);
}
/** Same as above, but all input and out data is in host memory.
* @param[in] handle the raft handle
* @param[in] dataset host matrix that stores the dataset [n_rows, dims]
* @param[in] queries host matrix of the queries [n_queris, dims]
* @param[in] neighbor_candidates host matrix with indices of candidate vectors [n_queries,
* n_candidates], where n_candidates >= k
* @param[out] indices host matrix that stores the refined indices [n_queries, k]
* @param[out] distances host matrix that stores the refined distances [n_queries, k]
* @param[in] metric distance metric to use. Euclidean (L2) is used by default
*/
template <typename idx_t, typename data_t, typename distance_t, typename matrix_idx>
void refine(raft::resources const& handle,
raft::host_matrix_view<const data_t, matrix_idx, row_major> dataset,
raft::host_matrix_view<const data_t, matrix_idx, row_major> queries,
raft::host_matrix_view<const idx_t, matrix_idx, row_major> neighbor_candidates,
raft::host_matrix_view<idx_t, matrix_idx, row_major> indices,
raft::host_matrix_view<distance_t, matrix_idx, row_major> distances,
distance::DistanceType metric = distance::DistanceType::L2Unexpanded)
{
detail::refine_host(dataset, queries, neighbor_candidates, indices, distances, metric);
}
/** @} */ // end group ann_refine
} // namespace raft::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_pq-inl.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/neighbors/detail/ivf_pq_build.cuh>
#include <raft/neighbors/detail/ivf_pq_search.cuh>
#include <raft/neighbors/ivf_pq_serialize.cuh>
#include <raft/neighbors/ivf_pq_types.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/device_memory_resource.hpp>
#include <raft/core/resources.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <memory> // shared_ptr
namespace raft::neighbors::ivf_pq {
/**
* @defgroup ivf_pq IVF PQ Algorithm
* @{
*/
/**
* @brief Build the index from the dataset for efficient search.
*
* NB: Currently, the following distance metrics are supported:
* - L2Expanded
* - L2Unexpanded
* - InnerProduct
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] params configure the index building
* @param[in] dataset a device matrix view to a row-major matrix [n_rows, dim]
*
* @return the constructed ivf-pq index
*/
template <typename T, typename IdxT = uint32_t>
index<IdxT> build(raft::resources const& handle,
const index_params& params,
raft::device_matrix_view<const T, IdxT, row_major> dataset)
{
IdxT n_rows = dataset.extent(0);
IdxT dim = dataset.extent(1);
return detail::build(handle, params, dataset.data_handle(), n_rows, dim);
}
/**
* @brief Extend the index with the new data.
* *
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] new_vectors a device matrix view to a row-major matrix [n_rows, idx.dim()]
* @param[in] new_indices a device vector view to a vector of indices [n_rows].
* If the original index is empty (`idx.size() == 0`), you can pass `std::nullopt`
* here to imply a continuous range `[0...n_rows)`.
* @param[inout] idx
*/
template <typename T, typename IdxT>
index<IdxT> extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices,
const index<IdxT>& idx)
{
ASSERT(new_vectors.extent(1) == idx.dim(),
"new_vectors should have the same dimension as the index");
IdxT n_rows = new_vectors.extent(0);
if (new_indices.has_value()) {
ASSERT(n_rows == new_indices.value().extent(0),
"new_vectors and new_indices have different number of rows");
}
return detail::extend(handle,
idx,
new_vectors.data_handle(),
new_indices.has_value() ? new_indices.value().data_handle() : nullptr,
n_rows);
}
/**
* @brief Extend the index with the new data.
* *
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] new_vectors a device matrix view to a row-major matrix [n_rows, idx.dim()]
* @param[in] new_indices a device vector view to a vector of indices [n_rows].
* If the original index is empty (`idx.size() == 0`), you can pass `std::nullopt`
* here to imply a continuous range `[0...n_rows)`.
* @param[inout] idx
*/
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices,
index<IdxT>* idx)
{
ASSERT(new_vectors.extent(1) == idx->dim(),
"new_vectors should have the same dimension as the index");
IdxT n_rows = new_vectors.extent(0);
if (new_indices.has_value()) {
ASSERT(n_rows == new_indices.value().extent(0),
"new_vectors and new_indices have different number of rows");
}
*idx = detail::extend(handle,
*idx,
new_vectors.data_handle(),
new_indices.has_value() ? new_indices.value().data_handle() : nullptr,
n_rows);
}
/**
* @brief Search ANN using the constructed index with the given filter.
*
* See the [ivf_pq::build](#ivf_pq::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`.
* The exact size of the temporary buffer depends on multiple factors and is an implementation
* detail. However, you can safely specify a small initial size for the memory pool, so that only a
* few allocations happen to grow it during the first invocations of the `search`.
*
* @tparam T data element type
* @tparam IdxT type of the indices
* @tparam IvfSampleFilterT Device filter function, with the signature
* `(uint32_t query_ix, uint32 cluster_ix, uint32_t sample_ix) -> bool` or
* `(uint32_t query_ix, uint32 sample_ix) -> bool`
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] idx ivf-pq constructed index
* @param[in] queries a device matrix view to a row-major matrix [n_queries, index->dim()]
* @param[out] neighbors a device matrix view to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device matrix view to the distances to the selected neighbors [n_queries,
* k]
* @param[in] sample_filter a device filter function that greenlights samples for a given query.
*/
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
raft::device_matrix_view<const T, uint32_t, row_major> queries,
raft::device_matrix_view<IdxT, uint32_t, row_major> neighbors,
raft::device_matrix_view<float, uint32_t, row_major> distances,
IvfSampleFilterT sample_filter = IvfSampleFilterT{})
{
RAFT_EXPECTS(
queries.extent(0) == neighbors.extent(0) && queries.extent(0) == distances.extent(0),
"Number of rows in output neighbors and distances matrices must equal the number of queries.");
RAFT_EXPECTS(neighbors.extent(1) == distances.extent(1),
"Number of columns in output neighbors and distances matrices must equal k");
RAFT_EXPECTS(queries.extent(1) == idx.dim(),
"Number of query dimensions should equal number of dimensions in the index.");
std::uint32_t k = neighbors.extent(1);
detail::search(handle,
params,
idx,
queries.data_handle(),
queries.extent(0),
k,
neighbors.data_handle(),
distances.data_handle(),
sample_filter);
}
/**
* @brief Search ANN using the constructed index.
*
* See the [ivf_pq::build](#ivf_pq::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`.
* The exact size of the temporary buffer depends on multiple factors and is an implementation
* detail. However, you can safely specify a small initial size for the memory pool, so that only a
* few allocations happen to grow it during the first invocations of the `search`.
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] idx ivf-pq constructed index
* @param[in] queries a device matrix view to a row-major matrix [n_queries, index->dim()]
* @param[out] neighbors a device matrix view to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device matrix view to the distances to the selected neighbors [n_queries,
* k]
*/
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
raft::device_matrix_view<const T, uint32_t, row_major> queries,
raft::device_matrix_view<IdxT, uint32_t, row_major> neighbors,
raft::device_matrix_view<float, uint32_t, row_major> distances)
{
search_with_filtering(handle,
params,
idx,
queries,
neighbors,
distances,
raft::neighbors::filtering::none_ivf_sample_filter{});
}
/** @} */ // end group ivf_pq
/**
* @brief Build the index from the dataset for efficient search.
*
* NB: Currently, the following distance metrics are supported:
* - L2Expanded
* - L2Unexpanded
* - InnerProduct
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* // use default index parameters
* ivf_pq::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = ivf_pq::build(handle, index_params, dataset, N, D);
* // use default search parameters
* ivf_pq::search_params search_params;
* // search K nearest neighbours for each of the N queries
* ivf_pq::search(handle, search_params, index, queries, N, K, out_inds, out_dists);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] params configure the index building
* @param[in] dataset a device/host pointer to a row-major matrix [n_rows, dim]
* @param[in] n_rows the number of samples
* @param[in] dim the dimensionality of the data
*
* @return the constructed ivf-pq index
*/
template <typename T, typename IdxT = uint32_t>
auto build(raft::resources const& handle,
const index_params& params,
const T* dataset,
IdxT n_rows,
uint32_t dim) -> index<IdxT>
{
return detail::build(handle, params, dataset, n_rows, dim);
}
/**
* @brief Build a new index containing the data of the original plus new extra vectors.
*
* Implementation note:
* The new data is clustered according to existing kmeans clusters, the cluster
* centers are unchanged.
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* ivf_pq::index_params index_params;
* index_params.add_data_on_build = false; // don't populate index on build
* index_params.kmeans_trainset_fraction = 1.0; // use whole dataset for kmeans training
* // train the index from a [N, D] dataset
* auto index_empty = ivf_pq::build(handle, index_params, dataset, N, D);
* // fill the index with the data
* auto index = ivf_pq::extend(handle, index_empty, dataset, nullptr, N);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[inout] idx original index
* @param[in] new_vectors a device/host pointer to a row-major matrix [n_rows, idx.dim()]
* @param[in] new_indices a device/host pointer to a vector of indices [n_rows].
* If the original index is empty (`idx.size() == 0`), you can pass `nullptr`
* here to imply a continuous range `[0...n_rows)`.
* @param[in] n_rows the number of samples
*
* @return the constructed extended ivf-pq index
*/
template <typename T, typename IdxT>
auto extend(raft::resources const& handle,
const index<IdxT>& idx,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows) -> index<IdxT>
{
return detail::extend(handle, idx, new_vectors, new_indices, n_rows);
}
/**
* @brief Extend the index with the new data.
* *
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[inout] idx
* @param[in] new_vectors a device/host pointer to a row-major matrix [n_rows, idx.dim()]
* @param[in] new_indices a device/host pointer to a vector of indices [n_rows].
* If the original index is empty (`idx.size() == 0`), you can pass `nullptr`
* here to imply a continuous range `[0...n_rows)`.
* @param[in] n_rows the number of samples
*/
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
index<IdxT>* idx,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows)
{
detail::extend(handle, idx, new_vectors, new_indices, n_rows);
}
/**
* @brief Search ANN using the constructed index with the given filter.
*
* See the [ivf_pq::build](#ivf_pq::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`:
* @code{.cpp}
* ...
* // use default search parameters
* ivf_pq::search_params search_params;
* filtering::none_ivf_sample_filter filter;
* // Use the same allocator across multiple searches to reduce the number of
* // cuda memory allocations
* ivf_pq::search_with_filtering(
* handle, search_params, index, queries1, N1, K, out_inds1, out_dists1, filter);
* ivf_pq::search_with_filtering(
* handle, search_params, index, queries2, N2, K, out_inds2, out_dists2, filter);
* ivf_pq::search_with_filtering(
* handle, search_params, index, queries3, N3, K, out_inds3, out_dists3, nfilter);
* ...
* @endcode
* The exact size of the temporary buffer depends on multiple factors and is an implementation
* detail. However, you can safely specify a small initial size for the memory pool, so that only a
* few allocations happen to grow it during the first invocations of the `search`.
*
* @tparam T data element type
* @tparam IdxT type of the indices
* @tparam IvfSampleFilterT Device filter function, with the signature
* `(uint32_t query_ix, uint32 cluster_ix, uint32_t sample_ix) -> bool` or
* `(uint32_t query_ix, uint32 sample_ix) -> bool`
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] idx ivf-pq constructed index
* @param[in] queries a device pointer to a row-major matrix [n_queries, index->dim()]
* @param[in] n_queries the batch size
* @param[in] k the number of neighbors to find for each query.
* @param[out] neighbors a device pointer to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device pointer to the distances to the selected neighbors [n_queries, k]
* @param[in] sample_filter a device filter function that greenlights samples for a given query
*/
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
IvfSampleFilterT sample_filter = IvfSampleFilterT{})
{
detail::search(handle, params, idx, queries, n_queries, k, neighbors, distances, sample_filter);
}
/**
* This function is deprecated and will be removed in a future.
* Please drop the `mr` argument and use `raft::resource::set_workspace_resource` instead.
*/
template <typename T, typename IdxT, typename IvfSampleFilterT>
[[deprecated(
"Drop the `mr` argument and use `raft::resource::set_workspace_resource` instead")]] void
search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr,
IvfSampleFilterT sample_filter = IvfSampleFilterT{})
{
if (mr != nullptr) {
// Shallow copy of the resource with the automatic lifespan:
// change the workspace resource temporarily
raft::resources res_local(handle);
resource::set_workspace_resource(
res_local, std::shared_ptr<rmm::mr::device_memory_resource>{mr, void_op{}});
return search_with_filtering(
res_local, params, idx, queries, n_queries, k, neighbors, distances, sample_filter);
} else {
return search_with_filtering(
handle, params, idx, queries, n_queries, k, neighbors, distances, sample_filter);
}
}
/**
* @brief Search ANN using the constructed index.
*
* See the [ivf_pq::build](#ivf_pq::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`:
* @code{.cpp}
* ...
* // Create a pooling memory resource with a pre-defined initial size.
* rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> mr(
* rmm::mr::get_current_device_resource(), 1024 * 1024);
* // use default search parameters
* ivf_pq::search_params search_params;
* // Use the same allocator across multiple searches to reduce the number of
* // cuda memory allocations
* ivf_pq::search(handle, search_params, index, queries1, N1, K, out_inds1, out_dists1, &mr);
* ivf_pq::search(handle, search_params, index, queries2, N2, K, out_inds2, out_dists2, &mr);
* ivf_pq::search(handle, search_params, index, queries3, N3, K, out_inds3, out_dists3, &mr);
* ...
* @endcode
* The exact size of the temporary buffer depends on multiple factors and is an implementation
* detail. However, you can safely specify a small initial size for the memory pool, so that only a
* few allocations happen to grow it during the first invocations of the `search`.
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] idx ivf-pq constructed index
* @param[in] queries a device pointer to a row-major matrix [n_queries, index->dim()]
* @param[in] n_queries the batch size
* @param[in] k the number of neighbors to find for each query.
* @param[out] neighbors a device pointer to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device pointer to the distances to the selected neighbors [n_queries, k]
*/
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances)
{
return search_with_filtering(handle,
params,
idx,
queries,
n_queries,
k,
neighbors,
distances,
raft::neighbors::filtering::none_ivf_sample_filter{});
}
/**
* This function is deprecated and will be removed in a future.
* Please drop the `mr` argument and use `raft::resource::set_workspace_resource` instead.
*/
template <typename T, typename IdxT>
[[deprecated(
"Drop the `mr` argument and use `raft::resource::set_workspace_resource` instead")]] void
search(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr)
{
return search_with_filtering(handle,
params,
idx,
queries,
n_queries,
k,
neighbors,
distances,
mr,
raft::neighbors::filtering::none_ivf_sample_filter{});
}
} // namespace raft::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ball_cover-inl.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __BALL_COVER_H
#define __BALL_COVER_H
#pragma once
#include <cstdint>
#include <raft/distance/distance_types.hpp>
#include <raft/neighbors/ball_cover_types.hpp>
#include <raft/spatial/knn/detail/ball_cover.cuh>
#include <raft/spatial/knn/detail/ball_cover/common.cuh>
#include <thrust/transform.h>
namespace raft::neighbors::ball_cover {
/**
* @defgroup random_ball_cover Random Ball Cover algorithm
* @{
*/
/**
* Builds and populates a previously unbuilt BallCoverIndex
*
* Usage example:
* @code{.cpp}
*
* #include <raft/core/resources.hpp>
* #include <raft/neighbors/ball_cover.cuh>
* #include <raft/distance/distance_types.hpp>
* using namespace raft::neighbors;
*
* raft::resources handle;
* ...
* auto metric = raft::distance::DistanceType::L2Expanded;
* BallCoverIndex index(handle, X, metric);
*
* ball_cover::build_index(handle, index);
* @endcode
*
* @tparam idx_t knn index type
* @tparam value_t knn value type
* @tparam int_t integral type for knn params
* @tparam matrix_idx_t matrix indexing type
* @param[in] handle library resource management handle
* @param[inout] index an empty (and not previous built) instance of BallCoverIndex
*/
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void build_index(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
if (index.metric == raft::distance::DistanceType::Haversine) {
raft::spatial::knn::detail::rbc_build_index(
handle, index, spatial::knn::detail::HaversineFunc<value_t, int_t>());
} else if (index.metric == raft::distance::DistanceType::L2SqrtExpanded ||
index.metric == raft::distance::DistanceType::L2SqrtUnexpanded) {
raft::spatial::knn::detail::rbc_build_index(
handle, index, spatial::knn::detail::EuclideanFunc<value_t, int_t>());
} else {
RAFT_FAIL("Metric not support");
}
index.set_index_trained();
}
/** @} */ // end group random_ball_cover
/**
* Performs a faster exact knn in metric spaces using the triangle
* inequality with a number of landmark points to reduce the
* number of distance computations from O(n^2) to O(sqrt(n)). This
* performs an all neighbors knn, which can reuse memory when
* the index and query are the same array. This function will
* build the index and assumes rbc_build_index() has not already
* been called.
* @tparam idx_t knn index type
* @tparam value_t knn distance type
* @tparam int_t type for integers, such as number of rows/cols
* @param[in] handle raft handle for resource management
* @param[inout] index ball cover index which has not yet been built
* @param[in] k number of nearest neighbors to find
* @param[in] perform_post_filtering if this is false, only the closest k landmarks
* are considered (which will return approximate
* results).
* @param[out] inds output knn indices
* @param[out] dists output knn distances
* @param[in] weight a weight for overlap between the closest landmark and
* the radius of other landmarks when pruning distances.
* Setting this value below 1 can effectively turn off
* computing distances against many other balls, enabling
* approximate nearest neighbors. Recall can be adjusted
* based on how many relevant balls are ignored. Note that
* many datasets can still have great recall even by only
* looking in the closest landmark.
*/
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void all_knn_query(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
int_t k,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
if (index.metric == raft::distance::DistanceType::Haversine) {
raft::spatial::knn::detail::rbc_all_knn_query(
handle,
index,
k,
inds,
dists,
spatial::knn::detail::HaversineFunc<value_t, int_t>(),
perform_post_filtering,
weight);
} else if (index.metric == raft::distance::DistanceType::L2SqrtExpanded ||
index.metric == raft::distance::DistanceType::L2SqrtUnexpanded) {
raft::spatial::knn::detail::rbc_all_knn_query(
handle,
index,
k,
inds,
dists,
spatial::knn::detail::EuclideanFunc<value_t, int_t>(),
perform_post_filtering,
weight);
} else {
RAFT_FAIL("Metric not supported");
}
index.set_index_trained();
}
/**
* @ingroup random_ball_cover
* @{
*/
/**
* Performs a faster exact knn in metric spaces using the triangle
* inequality with a number of landmark points to reduce the
* number of distance computations from O(n^2) to O(sqrt(n)). This
* performs an all neighbors knn, which can reuse memory when
* the index and query are the same array. This function will
* build the index and assumes rbc_build_index() has not already
* been called.
*
* Usage example:
* @code{.cpp}
*
* #include <raft/core/resources.hpp>
* #include <raft/neighbors/ball_cover.cuh>
* #include <raft/distance/distance_types.hpp>
* using namespace raft::neighbors;
*
* raft::resources handle;
* ...
* auto metric = raft::distance::DistanceType::L2Expanded;
*
* // Construct a ball cover index
* BallCoverIndex index(handle, X, metric);
*
* // Perform all neighbors knn query
* ball_cover::all_knn_query(handle, index, inds, dists, k);
* @endcode
*
* @tparam idx_t knn index type
* @tparam value_t knn distance type
* @tparam int_t type for integers, such as number of rows/cols
* @tparam matrix_idx_t matrix indexing type
*
* @param[in] handle raft handle for resource management
* @param[in] index ball cover index which has not yet been built
* @param[out] inds output knn indices
* @param[out] dists output knn distances
* @param[in] k number of nearest neighbors to find
* @param[in] perform_post_filtering if this is false, only the closest k landmarks
* are considered (which will return approximate
* results).
* @param[in] weight a weight for overlap between the closest landmark and
* the radius of other landmarks when pruning distances.
* Setting this value below 1 can effectively turn off
* computing distances against many other balls, enabling
* approximate nearest neighbors. Recall can be adjusted
* based on how many relevant balls are ignored. Note that
* many datasets can still have great recall even by only
* looking in the closest landmark.
*/
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void all_knn_query(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
raft::device_matrix_view<idx_t, matrix_idx_t, row_major> inds,
raft::device_matrix_view<value_t, matrix_idx_t, row_major> dists,
int_t k,
bool perform_post_filtering = true,
float weight = 1.0)
{
RAFT_EXPECTS(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
RAFT_EXPECTS(k <= index.m,
"k must be less than or equal to the number of data points in the index");
RAFT_EXPECTS(inds.extent(1) == dists.extent(1) && dists.extent(1) == static_cast<matrix_idx_t>(k),
"Number of columns in output indices and distances matrices must be equal to k");
RAFT_EXPECTS(inds.extent(0) == dists.extent(0) && dists.extent(0) == index.get_X().extent(0),
"Number of rows in output indices and distances matrices must equal number of rows "
"in index matrix.");
all_knn_query(
handle, index, k, inds.data_handle(), dists.data_handle(), perform_post_filtering, weight);
}
/** @} */
/**
* Performs a faster exact knn in metric spaces using the triangle
* inequality with a number of landmark points to reduce the
* number of distance computations from O(n^2) to O(sqrt(n)). This
* function does not build the index and assumes rbc_build_index() has
* already been called. Use this function when the index and
* query arrays are different, otherwise use rbc_all_knn_query().
* @tparam idx_t index type
* @tparam value_t distances type
* @tparam int_t integer type for size info
* @param[in] handle raft handle for resource management
* @param[inout] index ball cover index which has not yet been built
* @param[in] k number of nearest neighbors to find
* @param[in] query the
* @param[in] perform_post_filtering if this is false, only the closest k landmarks
* are considered (which will return approximate
* results).
* @param[out] inds output knn indices
* @param[out] dists output knn distances
* @param[in] weight a weight for overlap between the closest landmark and
* the radius of other landmarks when pruning distances.
* Setting this value below 1 can effectively turn off
* computing distances against many other balls, enabling
* approximate nearest neighbors. Recall can be adjusted
* based on how many relevant balls are ignored. Note that
* many datasets can still have great recall even by only
* looking in the closest landmark.
* @param[in] n_query_pts number of query points
*/
template <typename idx_t, typename value_t, typename int_t>
void knn_query(raft::resources const& handle,
const BallCoverIndex<idx_t, value_t, int_t>& index,
int_t k,
const value_t* query,
int_t n_query_pts,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
if (index.metric == raft::distance::DistanceType::Haversine) {
raft::spatial::knn::detail::rbc_knn_query(handle,
index,
k,
query,
n_query_pts,
inds,
dists,
spatial::knn::detail::HaversineFunc<value_t, int_t>(),
perform_post_filtering,
weight);
} else if (index.metric == raft::distance::DistanceType::L2SqrtExpanded ||
index.metric == raft::distance::DistanceType::L2SqrtUnexpanded) {
raft::spatial::knn::detail::rbc_knn_query(handle,
index,
k,
query,
n_query_pts,
inds,
dists,
spatial::knn::detail::EuclideanFunc<value_t, int_t>(),
perform_post_filtering,
weight);
} else {
RAFT_FAIL("Metric not supported");
}
}
/**
* @ingroup random_ball_cover
* @{
*/
/**
* Performs a faster exact knn in metric spaces using the triangle
* inequality with a number of landmark points to reduce the
* number of distance computations from O(n^2) to O(sqrt(n)). This
* function does not build the index and assumes rbc_build_index() has
* already been called. Use this function when the index and
* query arrays are different, otherwise use rbc_all_knn_query().
*
* Usage example:
* @code{.cpp}
*
* #include <raft/core/resources.hpp>
* #include <raft/neighbors/ball_cover.cuh>
* #include <raft/distance/distance_types.hpp>
* using namespace raft::neighbors;
*
* raft::resources handle;
* ...
* auto metric = raft::distance::DistanceType::L2Expanded;
*
* // Build a ball cover index
* BallCoverIndex index(handle, X, metric);
* ball_cover::build_index(handle, index);
*
* // Perform all neighbors knn query
* ball_cover::knn_query(handle, index, inds, dists, k);
* @endcode
*
* @tparam idx_t index type
* @tparam value_t distances type
* @tparam int_t integer type for size info
* @tparam matrix_idx_t
* @param[in] handle raft handle for resource management
* @param[in] index ball cover index which has not yet been built
* @param[in] query device matrix containing query data points
* @param[out] inds output knn indices
* @param[out] dists output knn distances
* @param[in] k number of nearest neighbors to find
* @param[in] perform_post_filtering if this is false, only the closest k landmarks
* are considered (which will return approximate
* results).
* @param[in] weight a weight for overlap between the closest landmark and
* the radius of other landmarks when pruning distances.
* Setting this value below 1 can effectively turn off
* computing distances against many other balls, enabling
* approximate nearest neighbors. Recall can be adjusted
* based on how many relevant balls are ignored. Note that
* many datasets can still have great recall even by only
* looking in the closest landmark.
*/
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void knn_query(raft::resources const& handle,
const BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
raft::device_matrix_view<const value_t, matrix_idx_t, row_major> query,
raft::device_matrix_view<idx_t, matrix_idx_t, row_major> inds,
raft::device_matrix_view<value_t, matrix_idx_t, row_major> dists,
int_t k,
bool perform_post_filtering = true,
float weight = 1.0)
{
RAFT_EXPECTS(k <= index.m,
"k must be less than or equal to the number of data points in the index");
RAFT_EXPECTS(inds.extent(1) == dists.extent(1) && dists.extent(1) == static_cast<idx_t>(k),
"Number of columns in output indices and distances matrices must be equal to k");
RAFT_EXPECTS(inds.extent(0) == dists.extent(0) && dists.extent(0) == query.extent(0),
"Number of rows in output indices and distances matrices must equal number of rows "
"in search matrix.");
RAFT_EXPECTS(query.extent(1) == index.get_X().extent(1),
"Number of columns in query and index matrices must match.");
knn_query(handle,
index,
k,
query.data_handle(),
query.extent(0),
inds.data_handle(),
dists.data_handle(),
perform_post_filtering,
weight);
}
/** @} */
// TODO: implement functions for:
// 4. rbc_eps_neigh() - given a populated index, perform query against different query array
// 5. rbc_all_eps_neigh() - populate a BallCoverIndex and query against training data
} // namespace raft::neighbors::ball_cover
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/sample_filter_types.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef>
#include <cstdint>
#include <raft/core/detail/macros.hpp>
namespace raft::neighbors::filtering {
/* A filter that filters nothing. This is the default behavior. */
struct none_ivf_sample_filter {
inline _RAFT_HOST_DEVICE bool operator()(
// query index
const uint32_t query_ix,
// the current inverted list index
const uint32_t cluster_ix,
// the index of the current sample inside the current inverted list
const uint32_t sample_ix) const
{
return true;
}
};
/* A filter that filters nothing. This is the default behavior. */
struct none_cagra_sample_filter {
inline _RAFT_HOST_DEVICE bool operator()(
// query index
const uint32_t query_ix,
// the index of the current sample
const uint32_t sample_ix) const
{
return true;
}
};
template <typename filter_t, typename = void>
struct takes_three_args : std::false_type {};
template <typename filter_t>
struct takes_three_args<
filter_t,
std::void_t<decltype(std::declval<filter_t>()(uint32_t{}, uint32_t{}, uint32_t{}))>>
: std::true_type {};
/**
* @brief Filter used to convert the cluster index and sample index
* of an IVF search into a sample index. This can be used as an
* intermediate filter.
*
* @tparam index_t Indexing type
* @tparam filter_t
*/
template <typename index_t, typename filter_t>
struct ivf_to_sample_filter {
const index_t* const* inds_ptrs_;
const filter_t next_filter_;
ivf_to_sample_filter(const index_t* const* inds_ptrs, const filter_t next_filter)
: inds_ptrs_{inds_ptrs}, next_filter_{next_filter}
{
}
/** If the original filter takes three arguments, then don't modify the arguments.
* If the original filter takes two arguments, then we are using `inds_ptr_` to obtain the sample
* index.
*/
inline _RAFT_HOST_DEVICE bool operator()(
// query index
const uint32_t query_ix,
// the current inverted list index
const uint32_t cluster_ix,
// the index of the current sample inside the current inverted list
const uint32_t sample_ix) const
{
if constexpr (takes_three_args<filter_t>::value) {
return next_filter_(query_ix, cluster_ix, sample_ix);
} else {
return next_filter_(query_ix, inds_ptrs_[cluster_ix][sample_ix]);
}
}
};
/**
* If the filtering depends on the index of a sample, then the following
* filter template can be used:
*
* template <typename IdxT>
* struct index_ivf_sample_filter {
* using index_type = IdxT;
*
* const index_type* const* inds_ptr = nullptr;
*
* index_ivf_sample_filter() {}
* index_ivf_sample_filter(const index_type* const* _inds_ptr)
* : inds_ptr{_inds_ptr} {}
* index_ivf_sample_filter(const index_ivf_sample_filter&) = default;
* index_ivf_sample_filter(index_ivf_sample_filter&&) = default;
* index_ivf_sample_filter& operator=(const index_ivf_sample_filter&) = default;
* index_ivf_sample_filter& operator=(index_ivf_sample_filter&&) = default;
*
* inline _RAFT_HOST_DEVICE bool operator()(
* const uint32_t query_ix,
* const uint32_t cluster_ix,
* const uint32_t sample_ix) const {
* index_type database_idx = inds_ptr[cluster_ix][sample_ix];
*
* // return true or false, depending on the database_idx
* return true;
* }
* };
*
* Initialize it as:
* using filter_type = index_ivf_sample_filter<idx_t>;
* filter_type filter(raft_ivfpq_index.inds_ptrs().data_handle());
*
* Use it as:
* raft::neighbors::ivf_pq::search_with_filtering<data_t, idx_t, filter_type>(
* ...regular parameters here...,
* filter
* );
*
* Another example would be the following filter that greenlights samples according
* to a contiguous bit mask vector.
*
* template <typename IdxT>
* struct bitmask_ivf_sample_filter {
* using index_type = IdxT;
*
* const index_type* const* inds_ptr = nullptr;
* const uint64_t* const bit_mask_ptr = nullptr;
* const int64_t bit_mask_stride_64 = 0;
*
* bitmask_ivf_sample_filter() {}
* bitmask_ivf_sample_filter(
* const index_type* const* _inds_ptr,
* const uint64_t* const _bit_mask_ptr,
* const int64_t _bit_mask_stride_64)
* : inds_ptr{_inds_ptr},
* bit_mask_ptr{_bit_mask_ptr},
* bit_mask_stride_64{_bit_mask_stride_64} {}
* bitmask_ivf_sample_filter(const bitmask_ivf_sample_filter&) = default;
* bitmask_ivf_sample_filter(bitmask_ivf_sample_filter&&) = default;
* bitmask_ivf_sample_filter& operator=(const bitmask_ivf_sample_filter&) = default;
* bitmask_ivf_sample_filter& operator=(bitmask_ivf_sample_filter&&) = default;
*
* inline _RAFT_HOST_DEVICE bool operator()(
* const uint32_t query_ix,
* const uint32_t cluster_ix,
* const uint32_t sample_ix) const {
* const index_type database_idx = inds_ptr[cluster_ix][sample_ix];
* const uint64_t bit_mask_element =
* bit_mask_ptr[query_ix * bit_mask_stride_64 + database_idx / 64];
* const uint64_t masked_bool =
* bit_mask_element & (1ULL << (uint64_t)(database_idx % 64));
* const bool is_bit_set = (masked_bool != 0);
*
* return is_bit_set;
* }
* };
*/
} // namespace raft::neighbors::filtering
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/nn_descent.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/nn_descent.cuh"
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
namespace raft::neighbors::experimental::nn_descent {
/**
* @defgroup nn-descent CUDA gradient descent nearest neighbor
* @{
*/
/**
* @brief Build nn-descent Index with dataset in device memory
*
* The following distance metrics are supported:
* - L2
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors::experimental;
* // use default index parameters
* nn_descent::index_params index_params;
* // create and fill the index from a [N, D] raft::device_matrix_view dataset
* auto index = cagra::build(res, index_params, dataset);
* // index.graph() provides a raft::host_matrix_view of an
* // all-neighbors knn graph of dimensions [N, k] of the input
* // dataset
* @endcode
*
* @tparam T data-type of the input dataset
* @tparam IdxT data-type for the output index
* @param[in] res raft::resources is an object mangaging resources
* @param[in] params an instance of nn_descent::index_params that are parameters
* to run the nn-descent algorithm
* @param[in] dataset raft::device_matrix_view input dataset expected to be located
* in device memory
* @return index<IdxT> index containing all-neighbors knn graph in host memory
*/
template <typename T, typename IdxT = uint32_t>
index<IdxT> build(raft::resources const& res,
index_params const& params,
raft::device_matrix_view<const T, int64_t, row_major> dataset)
{
return detail::build<T, IdxT>(res, params, dataset);
}
/**
* @brief Build nn-descent Index with dataset in device memory
*
* The following distance metrics are supported:
* - L2
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors::experimental;
* // use default index parameters
* nn_descent::index_params index_params;
* // create and fill the index from a [N, D] raft::device_matrix_view dataset
* auto knn_graph = raft::make_host_matrix<uint32_t, int64_t>(N, D);
* auto index = nn_descent::index{res, knn_graph.view()};
* cagra::build(res, index_params, dataset, index);
* // index.graph() provides a raft::host_matrix_view of an
* // all-neighbors knn graph of dimensions [N, k] of the input
* // dataset
* @endcode
*
* @tparam T data-type of the input dataset
* @tparam IdxT data-type for the output index
* @param res raft::resources is an object mangaging resources
* @param[in] params an instance of nn_descent::index_params that are parameters
* to run the nn-descent algorithm
* @param[in] dataset raft::device_matrix_view input dataset expected to be located
* in device memory
* @param[out] idx raft::neighbors::experimental::nn_descentindex containing all-neighbors knn graph
* in host memory
*/
template <typename T, typename IdxT = uint32_t>
void build(raft::resources const& res,
index_params const& params,
raft::device_matrix_view<const T, int64_t, row_major> dataset,
index<IdxT>& idx)
{
detail::build<T, IdxT>(res, params, dataset, idx);
}
/**
* @brief Build nn-descent Index with dataset in host memory
*
* The following distance metrics are supported:
* - L2
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors::experimental;
* // use default index parameters
* nn_descent::index_params index_params;
* // create and fill the index from a [N, D] raft::host_matrix_view dataset
* auto index = cagra::build(res, index_params, dataset);
* // index.graph() provides a raft::host_matrix_view of an
* // all-neighbors knn graph of dimensions [N, k] of the input
* // dataset
* @endcode
*
* @tparam T data-type of the input dataset
* @tparam IdxT data-type for the output index
* @param res raft::resources is an object mangaging resources
* @param[in] params an instance of nn_descent::index_params that are parameters
* to run the nn-descent algorithm
* @param[in] dataset raft::host_matrix_view input dataset expected to be located
* in host memory
* @return index<IdxT> index containing all-neighbors knn graph in host memory
*/
template <typename T, typename IdxT = uint32_t>
index<IdxT> build(raft::resources const& res,
index_params const& params,
raft::host_matrix_view<const T, int64_t, row_major> dataset)
{
return detail::build<T, IdxT>(res, params, dataset);
}
/**
* @brief Build nn-descent Index with dataset in host memory
*
* The following distance metrics are supported:
* - L2
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors::experimental;
* // use default index parameters
* nn_descent::index_params index_params;
* // create and fill the index from a [N, D] raft::host_matrix_view dataset
* auto knn_graph = raft::make_host_matrix<uint32_t, int64_t>(N, D);
* auto index = nn_descent::index{res, knn_graph.view()};
* cagra::build(res, index_params, dataset, index);
* // index.graph() provides a raft::host_matrix_view of an
* // all-neighbors knn graph of dimensions [N, k] of the input
* // dataset
* @endcode
*
* @tparam T data-type of the input dataset
* @tparam IdxT data-type for the output index
* @param[in] res raft::resources is an object mangaging resources
* @param[in] params an instance of nn_descent::index_params that are parameters
* to run the nn-descent algorithm
* @param[in] dataset raft::host_matrix_view input dataset expected to be located
* in host memory
* @param[out] idx raft::neighbors::experimental::nn_descentindex containing all-neighbors knn graph
* in host memory
*/
template <typename T, typename IdxT = uint32_t>
void build(raft::resources const& res,
index_params const& params,
raft::host_matrix_view<const T, int64_t, row_major> dataset,
index<IdxT>& idx)
{
detail::build<T, IdxT>(res, params, dataset, idx);
}
/** @} */ // end group nn-descent
} // namespace raft::neighbors::experimental::nn_descent
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_pq_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/neighbors/ann_types.hpp>
#include <raft/neighbors/ivf_list_types.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/util/integer_utils.hpp>
#include <thrust/fill.h>
#include <memory>
#include <type_traits>
namespace raft::neighbors::ivf_pq {
/**
* @addtogroup ivf_pq
* @{
*/
/** A type for specifying how PQ codebooks are created. */
enum class codebook_gen { // NOLINT
PER_SUBSPACE = 0, // NOLINT
PER_CLUSTER = 1, // NOLINT
};
struct index_params : ann::index_params {
/**
* The number of inverted lists (clusters)
*
* Hint: the number of vectors per cluster (`n_rows/n_lists`) should be approximately 1,000 to
* 10,000.
*/
uint32_t n_lists = 1024;
/** The number of iterations searching for kmeans centers (index building). */
uint32_t kmeans_n_iters = 20;
/** The fraction of data to use during iterative kmeans building. */
double kmeans_trainset_fraction = 0.5;
/**
* The bit length of the vector element after compression by PQ.
*
* Possible values: [4, 5, 6, 7, 8].
*
* Hint: the smaller the 'pq_bits', the smaller the index size and the better the search
* performance, but the lower the recall.
*/
uint32_t pq_bits = 8;
/**
* The dimensionality of the vector after compression by PQ. When zero, an optimal value is
* selected using a heuristic.
*
* NB: `pq_dim * pq_bits` must be a multiple of 8.
*
* Hint: a smaller 'pq_dim' results in a smaller index size and better search performance, but
* lower recall. If 'pq_bits' is 8, 'pq_dim' can be set to any number, but multiple of 8 are
* desirable for good performance. If 'pq_bits' is not 8, 'pq_dim' should be a multiple of 8.
* For good performance, it is desirable that 'pq_dim' is a multiple of 32. Ideally, 'pq_dim'
* should be also a divisor of the dataset dim.
*/
uint32_t pq_dim = 0;
/** How PQ codebooks are created. */
codebook_gen codebook_kind = codebook_gen::PER_SUBSPACE;
/**
* Apply a random rotation matrix on the input data and queries even if `dim % pq_dim == 0`.
*
* Note: if `dim` is not multiple of `pq_dim`, a random rotation is always applied to the input
* data and queries to transform the working space from `dim` to `rot_dim`, which may be slightly
* larger than the original space and and is a multiple of `pq_dim` (`rot_dim % pq_dim == 0`).
* However, this transform is not necessary when `dim` is multiple of `pq_dim`
* (`dim == rot_dim`, hence no need in adding "extra" data columns / features).
*
* By default, if `dim == rot_dim`, the rotation transform is initialized with the identity
* matrix. When `force_random_rotation == true`, a random orthogonal transform matrix is generated
* regardless of the values of `dim` and `pq_dim`.
*/
bool force_random_rotation = false;
/**
* By default, the algorithm allocates more space than necessary for individual clusters
* (`list_data`). This allows to amortize the cost of memory allocation and reduce the number of
* data copies during repeated calls to `extend` (extending the database).
*
* The alternative is the conservative allocation behavior; when enabled, the algorithm always
* allocates the minimum amount of memory required to store the given number of records. Set this
* flag to `true` if you prefer to use as little GPU memory for the database as possible.
*/
bool conservative_memory_allocation = false;
};
struct search_params : ann::search_params {
/** The number of clusters to search. */
uint32_t n_probes = 20;
/**
* Data type of look up table to be created dynamically at search time.
*
* Possible values: [CUDA_R_32F, CUDA_R_16F, CUDA_R_8U]
*
* The use of low-precision types reduces the amount of shared memory required at search time, so
* fast shared memory kernels can be used even for datasets with large dimansionality. Note that
* the recall is slightly degraded when low-precision type is selected.
*/
cudaDataType_t lut_dtype = CUDA_R_32F;
/**
* Storage data type for distance/similarity computed at search time.
*
* Possible values: [CUDA_R_16F, CUDA_R_32F]
*
* If the performance limiter at search time is device memory access, selecting FP16 will improve
* performance slightly.
*/
cudaDataType_t internal_distance_dtype = CUDA_R_32F;
/**
* Preferred fraction of SM's unified memory / L1 cache to be used as shared memory.
*
* Possible values: [0.0 - 1.0] as a fraction of the `sharedMemPerMultiprocessor`.
*
* One wants to increase the carveout to make sure a good GPU occupancy for the main search
* kernel, but not to keep it too high to leave some memory to be used as L1 cache. Note, this
* value is interpreted only as a hint. Moreover, a GPU usually allows only a fixed set of cache
* configurations, so the provided value is rounded up to the nearest configuration. Refer to the
* NVIDIA tuning guide for the target GPU architecture.
*
* Note, this is a low-level tuning parameter that can have drastic negative effects on the search
* performance if tweaked incorrectly.
*/
double preferred_shmem_carveout = 1.0;
};
static_assert(std::is_aggregate_v<index_params>);
static_assert(std::is_aggregate_v<search_params>);
/** Size of the interleaved group. */
constexpr static uint32_t kIndexGroupSize = 32;
/** Stride of the interleaved group for vectorized loads. */
constexpr static uint32_t kIndexGroupVecLen = 16;
/**
* Default value returned by `search` when the `n_probes` is too small and top-k is too large.
* One may encounter it if the combined size of probed clusters is smaller than the requested
* number of results per query.
*/
template <typename IdxT>
constexpr static IdxT kOutOfBoundsRecord = std::numeric_limits<IdxT>::max();
template <typename SizeT, typename IdxT>
struct list_spec {
using value_type = uint8_t;
using index_type = IdxT;
/** PQ-encoded data stored in the interleaved format:
*
* [ ceildiv(list_size, kIndexGroupSize)
* , ceildiv(pq_dim, (kIndexGroupVecLen * 8u) / pq_bits)
* , kIndexGroupSize
* , kIndexGroupVecLen
* ].
*/
using list_extents =
extents<SizeT, dynamic_extent, dynamic_extent, kIndexGroupSize, kIndexGroupVecLen>;
SizeT align_max;
SizeT align_min;
uint32_t pq_bits;
uint32_t pq_dim;
constexpr list_spec(uint32_t pq_bits, uint32_t pq_dim, bool conservative_memory_allocation)
: pq_bits(pq_bits),
pq_dim(pq_dim),
align_min(kIndexGroupSize),
align_max(conservative_memory_allocation ? kIndexGroupSize : 1024)
{
}
// Allow casting between different size-types (for safer size and offset calculations)
template <typename OtherSizeT>
constexpr explicit list_spec(const list_spec<OtherSizeT, IdxT>& other_spec)
: pq_bits{other_spec.pq_bits},
pq_dim{other_spec.pq_dim},
align_min{other_spec.align_min},
align_max{other_spec.align_max}
{
}
/** Determine the extents of an array enough to hold a given amount of data. */
constexpr auto make_list_extents(SizeT n_rows) const -> list_extents
{
// how many elems of pq_dim fit into one kIndexGroupVecLen-byte chunk
auto pq_chunk = (kIndexGroupVecLen * 8u) / pq_bits;
return make_extents<SizeT>(div_rounding_up_safe<SizeT>(n_rows, kIndexGroupSize),
div_rounding_up_safe<SizeT>(pq_dim, pq_chunk),
kIndexGroupSize,
kIndexGroupVecLen);
}
};
template <typename IdxT, typename SizeT = uint32_t>
using list_data = ivf::list<list_spec, SizeT, IdxT>;
/**
* @brief IVF-PQ index.
*
* In the IVF-PQ index, a database vector y is approximated with two level quantization:
*
* y = Q_1(y) + Q_2(y - Q_1(y))
*
* The first level quantizer (Q_1), maps the vector y to the nearest cluster center. The number of
* clusters is n_lists.
*
* The second quantizer encodes the residual, and it is defined as a product quantizer [1].
*
* A product quantizer encodes a `dim` dimensional vector with a `pq_dim` dimensional vector.
* First we split the input vector into `pq_dim` subvectors (denoted by u), where each u vector
* contains `pq_len` distinct components of y
*
* y_1, y_2, ... y_{pq_len}, y_{pq_len+1}, ... y_{2*pq_len}, ... y_{dim-pq_len+1} ... y_{dim}
* \___________________/ \____________________________/ \______________________/
* u_1 u_2 u_{pq_dim}
*
* Then each subvector encoded with a separate quantizer q_i, end the results are concatenated
*
* Q_2(y) = q_1(u_1),q_2(u_2),...,q_{pq_dim}(u_pq_dim})
*
* Each quantizer q_i outputs a code with pq_bit bits. The second level quantizers are also defined
* by k-means clustering in the corresponding sub-space: the reproduction values are the centroids,
* and the set of reproduction values is the codebook.
*
* When the data dimensionality `dim` is not multiple of `pq_dim`, the feature space is transformed
* using a random orthogonal matrix to have `rot_dim = pq_dim * pq_len` dimensions
* (`rot_dim >= dim`).
*
* The second-level quantizers are trained either for each subspace or for each cluster:
* (a) codebook_gen::PER_SUBSPACE:
* creates `pq_dim` second-level quantizers - one for each slice of the data along features;
* (b) codebook_gen::PER_CLUSTER:
* creates `n_lists` second-level quantizers - one for each first-level cluster.
* In either case, the centroids are again found using k-means clustering interpreting the data as
* having pq_len dimensions.
*
* [1] Product quantization for nearest neighbor search Herve Jegou, Matthijs Douze, Cordelia Schmid
*
* @tparam IdxT type of the indices in the source dataset
*
*/
template <typename IdxT>
struct index : ann::index {
static_assert(!raft::is_narrowing_v<uint32_t, IdxT>,
"IdxT must be able to represent all values of uint32_t");
public:
/** Total length of the index. */
[[nodiscard]] constexpr inline auto size() const noexcept -> IdxT
{
return accum_sorted_sizes_(n_lists());
}
/** Dimensionality of the input data. */
[[nodiscard]] constexpr inline auto dim() const noexcept -> uint32_t { return dim_; }
/**
* Dimensionality of the cluster centers:
* input data dim extended with vector norms and padded to 8 elems.
*/
[[nodiscard]] constexpr inline auto dim_ext() const noexcept -> uint32_t
{
return raft::round_up_safe(dim() + 1, 8u);
}
/**
* Dimensionality of the data after transforming it for PQ processing
* (rotated and augmented to be muplitple of `pq_dim`).
*/
[[nodiscard]] constexpr inline auto rot_dim() const noexcept -> uint32_t
{
return pq_len() * pq_dim();
}
/** The bit length of an encoded vector element after compression by PQ. */
[[nodiscard]] constexpr inline auto pq_bits() const noexcept -> uint32_t { return pq_bits_; }
/** The dimensionality of an encoded vector after compression by PQ. */
[[nodiscard]] constexpr inline auto pq_dim() const noexcept -> uint32_t { return pq_dim_; }
/** Dimensionality of a subspaces, i.e. the number of vector components mapped to a subspace */
[[nodiscard]] constexpr inline auto pq_len() const noexcept -> uint32_t
{
return raft::div_rounding_up_unsafe(dim(), pq_dim());
}
/** The number of vectors in a PQ codebook (`1 << pq_bits`). */
[[nodiscard]] constexpr inline auto pq_book_size() const noexcept -> uint32_t
{
return 1 << pq_bits();
}
/** Distance metric used for clustering. */
[[nodiscard]] constexpr inline auto metric() const noexcept -> raft::distance::DistanceType
{
return metric_;
}
/** How PQ codebooks are created. */
[[nodiscard]] constexpr inline auto codebook_kind() const noexcept -> codebook_gen
{
return codebook_kind_;
}
/** Number of clusters/inverted lists (first level quantization). */
[[nodiscard]] constexpr inline auto n_lists() const noexcept -> uint32_t { return lists_.size(); }
/**
* Whether to use convervative memory allocation when extending the list (cluster) data
* (see index_params.conservative_memory_allocation).
*/
[[nodiscard]] constexpr inline auto conservative_memory_allocation() const noexcept -> bool
{
return conservative_memory_allocation_;
}
// Don't allow copying the index for performance reasons (try avoiding copying data)
index(const index&) = delete;
index(index&&) = default;
auto operator=(const index&) -> index& = delete;
auto operator=(index&&) -> index& = default;
~index() = default;
/** Construct an empty index. It needs to be trained and then populated. */
index(raft::resources const& handle,
raft::distance::DistanceType metric,
codebook_gen codebook_kind,
uint32_t n_lists,
uint32_t dim,
uint32_t pq_bits = 8,
uint32_t pq_dim = 0,
bool conservative_memory_allocation = false)
: ann::index(),
metric_(metric),
codebook_kind_(codebook_kind),
dim_(dim),
pq_bits_(pq_bits),
pq_dim_(pq_dim == 0 ? calculate_pq_dim(dim) : pq_dim),
conservative_memory_allocation_(conservative_memory_allocation),
pq_centers_{make_device_mdarray<float>(handle, make_pq_centers_extents())},
lists_{n_lists},
rotation_matrix_{make_device_matrix<float, uint32_t>(handle, this->rot_dim(), this->dim())},
list_sizes_{make_device_vector<uint32_t, uint32_t>(handle, n_lists)},
centers_{make_device_matrix<float, uint32_t>(handle, n_lists, this->dim_ext())},
centers_rot_{make_device_matrix<float, uint32_t>(handle, n_lists, this->rot_dim())},
data_ptrs_{make_device_vector<uint8_t*, uint32_t>(handle, n_lists)},
inds_ptrs_{make_device_vector<IdxT*, uint32_t>(handle, n_lists)},
accum_sorted_sizes_{make_host_vector<IdxT, uint32_t>(n_lists + 1)}
{
check_consistency();
accum_sorted_sizes_(n_lists) = 0;
}
/** Construct an empty index. It needs to be trained and then populated. */
index(raft::resources const& handle, const index_params& params, uint32_t dim)
: index(handle,
params.metric,
params.codebook_kind,
params.n_lists,
dim,
params.pq_bits,
params.pq_dim,
params.conservative_memory_allocation)
{
}
using pq_centers_extents =
std::experimental::extents<uint32_t, dynamic_extent, dynamic_extent, dynamic_extent>;
/**
* PQ cluster centers
*
* - codebook_gen::PER_SUBSPACE: [pq_dim , pq_len, pq_book_size]
* - codebook_gen::PER_CLUSTER: [n_lists, pq_len, pq_book_size]
*/
inline auto pq_centers() noexcept -> device_mdspan<float, pq_centers_extents, row_major>
{
return pq_centers_.view();
}
[[nodiscard]] inline auto pq_centers() const noexcept
-> device_mdspan<const float, pq_centers_extents, row_major>
{
return pq_centers_.view();
}
/** Lists' data and indices. */
inline auto lists() noexcept -> std::vector<std::shared_ptr<list_data<IdxT>>>& { return lists_; }
[[nodiscard]] inline auto lists() const noexcept
-> const std::vector<std::shared_ptr<list_data<IdxT>>>&
{
return lists_;
}
/** Pointers to the inverted lists (clusters) data [n_lists]. */
inline auto data_ptrs() noexcept -> device_vector_view<uint8_t*, uint32_t, row_major>
{
return data_ptrs_.view();
}
[[nodiscard]] inline auto data_ptrs() const noexcept
-> device_vector_view<const uint8_t* const, uint32_t, row_major>
{
return make_mdspan<const uint8_t* const, uint32_t, row_major, false, true>(
data_ptrs_.data_handle(), data_ptrs_.extents());
}
/** Pointers to the inverted lists (clusters) indices [n_lists]. */
inline auto inds_ptrs() noexcept -> device_vector_view<IdxT*, uint32_t, row_major>
{
return inds_ptrs_.view();
}
[[nodiscard]] inline auto inds_ptrs() const noexcept
-> device_vector_view<const IdxT* const, uint32_t, row_major>
{
return make_mdspan<const IdxT* const, uint32_t, row_major, false, true>(
inds_ptrs_.data_handle(), inds_ptrs_.extents());
}
/** The transform matrix (original space -> rotated padded space) [rot_dim, dim] */
inline auto rotation_matrix() noexcept -> device_matrix_view<float, uint32_t, row_major>
{
return rotation_matrix_.view();
}
[[nodiscard]] inline auto rotation_matrix() const noexcept
-> device_matrix_view<const float, uint32_t, row_major>
{
return rotation_matrix_.view();
}
/**
* Accumulated list sizes, sorted in descending order [n_lists + 1].
* The last value contains the total length of the index.
* The value at index zero is always zero.
*
* That is, the content of this span is as if the `list_sizes` was sorted and then accumulated.
*
* This span is used during search to estimate the maximum size of the workspace.
*/
inline auto accum_sorted_sizes() noexcept -> host_vector_view<IdxT, uint32_t, row_major>
{
return accum_sorted_sizes_.view();
}
[[nodiscard]] inline auto accum_sorted_sizes() const noexcept
-> host_vector_view<const IdxT, uint32_t, row_major>
{
return accum_sorted_sizes_.view();
}
/** Sizes of the lists [n_lists]. */
inline auto list_sizes() noexcept -> device_vector_view<uint32_t, uint32_t, row_major>
{
return list_sizes_.view();
}
[[nodiscard]] inline auto list_sizes() const noexcept
-> device_vector_view<const uint32_t, uint32_t, row_major>
{
return list_sizes_.view();
}
/** Cluster centers corresponding to the lists in the original space [n_lists, dim_ext] */
inline auto centers() noexcept -> device_matrix_view<float, uint32_t, row_major>
{
return centers_.view();
}
[[nodiscard]] inline auto centers() const noexcept
-> device_matrix_view<const float, uint32_t, row_major>
{
return centers_.view();
}
/** Cluster centers corresponding to the lists in the rotated space [n_lists, rot_dim] */
inline auto centers_rot() noexcept -> device_matrix_view<float, uint32_t, row_major>
{
return centers_rot_.view();
}
[[nodiscard]] inline auto centers_rot() const noexcept
-> device_matrix_view<const float, uint32_t, row_major>
{
return centers_rot_.view();
}
/** fetch size of a particular IVF list in bytes using the list extents.
* Usage example:
* @code{.cpp}
* raft::resources res;
* // use default index params
* ivf_pq::index_params index_params;
* // extend the IVF lists while building the index
* index_params.add_data_on_build = true;
* // create and fill the index from a [N, D] dataset
* auto index = raft::neighbors::ivf_pq::build<int64_t>(res, index_params, dataset, N, D);
* // Fetch the size of the fourth list
* uint32_t size = index.get_list_size_in_bytes(3);
* @endcode
*
* @param[in] label list ID
*/
inline auto get_list_size_in_bytes(uint32_t label) -> uint32_t
{
RAFT_EXPECTS(label < this->n_lists(),
"Expected label to be less than number of lists in the index");
auto list_data = this->lists()[label]->data;
return list_data.size();
}
private:
raft::distance::DistanceType metric_;
codebook_gen codebook_kind_;
uint32_t dim_;
uint32_t pq_bits_;
uint32_t pq_dim_;
bool conservative_memory_allocation_;
// Primary data members
std::vector<std::shared_ptr<list_data<IdxT>>> lists_;
device_vector<uint32_t, uint32_t, row_major> list_sizes_;
device_mdarray<float, pq_centers_extents, row_major> pq_centers_;
device_matrix<float, uint32_t, row_major> centers_;
device_matrix<float, uint32_t, row_major> centers_rot_;
device_matrix<float, uint32_t, row_major> rotation_matrix_;
// Computed members for accelerating search.
device_vector<uint8_t*, uint32_t, row_major> data_ptrs_;
device_vector<IdxT*, uint32_t, row_major> inds_ptrs_;
host_vector<IdxT, uint32_t, row_major> accum_sorted_sizes_;
/** Throw an error if the index content is inconsistent. */
void check_consistency()
{
RAFT_EXPECTS(pq_bits() >= 4 && pq_bits() <= 8,
"`pq_bits` must be within closed range [4,8], but got %u.",
pq_bits());
RAFT_EXPECTS((pq_bits() * pq_dim()) % 8 == 0,
"`pq_bits * pq_dim` must be a multiple of 8, but got %u * %u = %u.",
pq_bits(),
pq_dim(),
pq_bits() * pq_dim());
}
auto make_pq_centers_extents() -> pq_centers_extents
{
switch (codebook_kind()) {
case codebook_gen::PER_SUBSPACE:
return make_extents<uint32_t>(pq_dim(), pq_len(), pq_book_size());
case codebook_gen::PER_CLUSTER:
return make_extents<uint32_t>(n_lists(), pq_len(), pq_book_size());
default: RAFT_FAIL("Unreachable code");
}
}
static inline auto calculate_pq_dim(uint32_t dim) -> uint32_t
{
// If the dimensionality is large enough, we can reduce it to improve performance
if (dim >= 128) { dim /= 2; }
// Round it down to 32 to improve performance.
auto r = raft::round_down_safe<uint32_t>(dim, 32);
if (r > 0) return r;
// If the dimensionality is really low, round it to the closest power-of-two
r = 1;
while ((r << 1) <= dim) {
r = r << 1;
}
return r;
}
};
/** @} */
} // namespace raft::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/cagra.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/cagra/cagra_build.cuh"
#include "detail/cagra/cagra_search.cuh"
#include "detail/cagra/graph_core.cuh"
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_device_accessor.hpp>
#include <raft/core/mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/neighbors/cagra_types.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace raft::neighbors::cagra {
/**
* @defgroup cagra CUDA ANN Graph-based nearest neighbor search
* @{
*/
/**
* @brief Build a kNN graph using IVF-PQ.
*
* The kNN graph is the first building block for CAGRA index.
*
* The output is a dense matrix that stores the neighbor indices for each point in the dataset.
* Each point has the same number of neighbors.
*
* See [cagra::build](#cagra::build) for an alternative method.
*
* The following distance metrics are supported:
* - L2Expanded
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* // use default index parameters
* ivf_pq::index_params build_params;
* ivf_pq::search_params search_params
* auto knn_graph = raft::make_host_matrix<IdxT, IdxT>(dataset.extent(0), 128);
* // create knn graph
* cagra::build_knn_graph(res, dataset, knn_graph.view(), 2, build_params, search_params);
* auto optimized_gaph = raft::make_host_matrix<IdxT, IdxT>(dataset.extent(0), 64);
* cagra::optimize(res, dataset, knn_graph.view(), optimized_graph.view());
* // Construct an index from dataset and optimized knn_graph
* auto index = cagra::index<T, IdxT>(res, build_params.metric(), dataset,
* optimized_graph.view());
* @endcode
*
* @tparam DataT data element type
* @tparam IdxT type of the dataset vector indices
*
* @param[in] res raft resources
* @param[in] dataset a matrix view (host or device) to a row-major matrix [n_rows, dim]
* @param[out] knn_graph a host matrix view to store the output knn graph [n_rows, graph_degree]
* @param[in] refine_rate (optional) refinement rate for ivf-pq search
* @param[in] build_params (optional) ivf_pq index building parameters for knn graph
* @param[in] search_params (optional) ivf_pq search parameters
*/
template <typename DataT, typename IdxT, typename accessor>
void build_knn_graph(raft::resources const& res,
mdspan<const DataT, matrix_extent<int64_t>, row_major, accessor> dataset,
raft::host_matrix_view<IdxT, int64_t, row_major> knn_graph,
std::optional<float> refine_rate = std::nullopt,
std::optional<ivf_pq::index_params> build_params = std::nullopt,
std::optional<ivf_pq::search_params> search_params = std::nullopt)
{
using internal_IdxT = typename std::make_unsigned<IdxT>::type;
auto knn_graph_internal = make_host_matrix_view<internal_IdxT, int64_t>(
reinterpret_cast<internal_IdxT*>(knn_graph.data_handle()),
knn_graph.extent(0),
knn_graph.extent(1));
auto dataset_internal = mdspan<const DataT, matrix_extent<int64_t>, row_major, accessor>(
dataset.data_handle(), dataset.extent(0), dataset.extent(1));
cagra::detail::build_knn_graph(
res, dataset_internal, knn_graph_internal, refine_rate, build_params, search_params);
}
/**
* @brief Build a kNN graph using NN-descent.
*
* The kNN graph is the first building block for CAGRA index.
*
* The output is a dense matrix that stores the neighbor indices for each point in the dataset.
* Each point has the same number of neighbors.
*
* See [cagra::build](#cagra::build) for an alternative method.
*
* The following distance metrics are supported:
* - L2Expanded
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* using namespace raft::neighbors::experimental;
* // use default index parameters
* nn_descent::index_params build_params;
* build_params.graph_degree = 128;
* auto knn_graph = raft::make_host_matrix<IdxT, IdxT>(dataset.extent(0), 128);
* // create knn graph
* cagra::build_knn_graph(res, dataset, knn_graph.view(), build_params);
* auto optimized_gaph = raft::make_host_matrix<IdxT, int64_t>(dataset.extent(0), 64);
* cagra::optimize(res, dataset, nn_descent_index.graph.view(), optimized_graph.view());
* // Construct an index from dataset and optimized knn_graph
* auto index = cagra::index<T, IdxT>(res, build_params.metric(), dataset,
* optimized_graph.view());
* @endcode
*
* @tparam DataT data element type
* @tparam IdxT type of the dataset vector indices
* @tparam accessor host or device accessor_type for the dataset
* @param[in] res raft::resources is an object mangaging resources
* @param[in] dataset input raft::host/device_matrix_view that can be located in
* in host or device memory
* @param[out] knn_graph a host matrix view to store the output knn graph [n_rows, graph_degree]
* @param[in] build_params an instance of experimental::nn_descent::index_params that are parameters
* to run the nn-descent algorithm
*/
template <typename DataT,
typename IdxT = uint32_t,
typename accessor =
host_device_accessor<std::experimental::default_accessor<DataT>, memory_type::device>>
void build_knn_graph(raft::resources const& res,
mdspan<const DataT, matrix_extent<int64_t>, row_major, accessor> dataset,
raft::host_matrix_view<IdxT, int64_t, row_major> knn_graph,
experimental::nn_descent::index_params build_params)
{
detail::build_knn_graph<DataT, IdxT>(res, dataset, knn_graph, build_params);
}
/**
* @brief Sort a KNN graph index.
* Preprocessing step for `cagra::optimize`: If a KNN graph is not built using
* `cagra::build_knn_graph`, then it is necessary to call this function before calling
* `cagra::optimize`. If the graph is built by `cagra::build_knn_graph`, it is already sorted and
* you do not need to call this function.
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* cagra::index_params build_params;
* auto knn_graph = raft::make_host_matrix<IdxT, IdxT>(dataset.extent(0), 128);
* // build KNN graph not using `cagra::build_knn_graph`
* // build(knn_graph, dataset, ...);
* // sort graph index
* sort_knn_graph(res, dataset.view(), knn_graph.view());
* // optimize graph
* cagra::optimize(res, dataset, knn_graph.view(), optimized_graph.view());
* // Construct an index from dataset and optimized knn_graph
* auto index = cagra::index<T, IdxT>(res, build_params.metric(), dataset,
* optimized_graph.view());
* @endcode
*
* @tparam DataT type of the data in the source dataset
* @tparam IdxT type of the dataset vector indices
*
* @param[in] res raft resources
* @param[in] dataset a matrix view (host or device) to a row-major matrix [n_rows, dim]
* @param[in,out] knn_graph a matrix view (host or device) of the input knn graph [n_rows,
* knn_graph_degree]
*/
template <typename DataT,
typename IdxT = uint32_t,
typename d_accessor =
host_device_accessor<std::experimental::default_accessor<DataT>, memory_type::device>,
typename g_accessor =
host_device_accessor<std::experimental::default_accessor<IdxT>, memory_type::host>>
void sort_knn_graph(raft::resources const& res,
mdspan<const DataT, matrix_extent<int64_t>, row_major, d_accessor> dataset,
mdspan<IdxT, matrix_extent<int64_t>, row_major, g_accessor> knn_graph)
{
using internal_IdxT = typename std::make_unsigned<IdxT>::type;
using g_accessor_internal =
host_device_accessor<std::experimental::default_accessor<internal_IdxT>, g_accessor::mem_type>;
auto knn_graph_internal =
mdspan<internal_IdxT, matrix_extent<int64_t>, row_major, g_accessor_internal>(
reinterpret_cast<internal_IdxT*>(knn_graph.data_handle()),
knn_graph.extent(0),
knn_graph.extent(1));
auto dataset_internal = mdspan<const DataT, matrix_extent<int64_t>, row_major, d_accessor>(
dataset.data_handle(), dataset.extent(0), dataset.extent(1));
cagra::detail::graph::sort_knn_graph(res, dataset_internal, knn_graph_internal);
}
/**
* @brief Prune a KNN graph.
*
* Decrease the number of neighbors for each node.
*
* See [cagra::build_knn_graph](#cagra::build_knn_graph) for usage example
*
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res raft resources
* @param[in] knn_graph a matrix view (host or device) of the input knn graph [n_rows,
* knn_graph_degree]
* @param[out] new_graph a host matrix view of the optimized knn graph [n_rows, graph_degree]
*/
template <typename IdxT = uint32_t,
typename g_accessor =
host_device_accessor<std::experimental::default_accessor<IdxT>, memory_type::host>>
void optimize(raft::resources const& res,
mdspan<IdxT, matrix_extent<int64_t>, row_major, g_accessor> knn_graph,
raft::host_matrix_view<IdxT, int64_t, row_major> new_graph)
{
detail::optimize(res, knn_graph, new_graph);
}
/**
* @brief Build the index from the dataset for efficient search.
*
* The build consist of two steps: build an intermediate knn-graph, and optimize it to
* create the final graph. The index_params struct controls the node degree of these
* graphs.
*
* It is required that dataset and the optimized graph fit the GPU memory.
*
* To customize the parameters for knn-graph building and pruning, and to reuse the
* intermediate results, you could build the index in two steps using
* [cagra::build_knn_graph](#cagra::build_knn_graph) and [cagra::optimize](#cagra::optimize).
*
* The following distance metrics are supported:
* - L2
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* // use default index parameters
* cagra::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = cagra::build(res, index_params, dataset);
* // use default search parameters
* cagra::search_params search_params;
* // search K nearest neighbours
* auto neighbors = raft::make_device_matrix<uint32_t>(res, n_queries, k);
* auto distances = raft::make_device_matrix<float>(res, n_queries, k);
* cagra::search(res, search_params, index, queries, neighbors, distances);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res
* @param[in] params parameters for building the index
* @param[in] dataset a matrix view (host or device) to a row-major matrix [n_rows, dim]
*
* @return the constructed cagra index
*/
template <typename T,
typename IdxT = uint32_t,
typename Accessor =
host_device_accessor<std::experimental::default_accessor<T>, memory_type::host>>
index<T, IdxT> build(raft::resources const& res,
const index_params& params,
mdspan<const T, matrix_extent<int64_t>, row_major, Accessor> dataset)
{
return detail::build<T, IdxT, Accessor>(res, params, dataset);
}
/**
* @brief Search ANN using the constructed index.
*
* See the [cagra::build](#cagra::build) documentation for a usage example.
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] res raft resources
* @param[in] params configure the search
* @param[in] idx cagra index
* @param[in] queries a device matrix view to a row-major matrix [n_queries, index->dim()]
* @param[out] neighbors a device matrix view to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device matrix view to the distances to the selected neighbors [n_queries,
* k]
*/
template <typename T, typename IdxT>
void search(raft::resources const& res,
const search_params& params,
const index<T, IdxT>& idx,
raft::device_matrix_view<const T, int64_t, row_major> queries,
raft::device_matrix_view<IdxT, int64_t, row_major> neighbors,
raft::device_matrix_view<float, int64_t, row_major> distances)
{
RAFT_EXPECTS(
queries.extent(0) == neighbors.extent(0) && queries.extent(0) == distances.extent(0),
"Number of rows in output neighbors and distances matrices must equal the number of queries.");
RAFT_EXPECTS(neighbors.extent(1) == distances.extent(1),
"Number of columns in output neighbors and distances matrices must equal k");
RAFT_EXPECTS(queries.extent(1) == idx.dim(),
"Number of query dimensions should equal number of dimensions in the index.");
using internal_IdxT = typename std::make_unsigned<IdxT>::type;
auto queries_internal = raft::make_device_matrix_view<const T, int64_t, row_major>(
queries.data_handle(), queries.extent(0), queries.extent(1));
auto neighbors_internal = raft::make_device_matrix_view<internal_IdxT, int64_t, row_major>(
reinterpret_cast<internal_IdxT*>(neighbors.data_handle()),
neighbors.extent(0),
neighbors.extent(1));
auto distances_internal = raft::make_device_matrix_view<float, int64_t, row_major>(
distances.data_handle(), distances.extent(0), distances.extent(1));
cagra::detail::search_main<T,
internal_IdxT,
decltype(raft::neighbors::filtering::none_cagra_sample_filter()),
IdxT>(res,
params,
idx,
queries_internal,
neighbors_internal,
distances_internal,
raft::neighbors::filtering::none_cagra_sample_filter());
}
/**
* @brief Search ANN using the constructed index with the given sample filter.
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* // use default index parameters
* cagra::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = cagra::build(res, index_params, dataset);
* // use default search parameters
* cagra::search_params search_params;
* // create a bitset to filter the search
* auto removed_indices = raft::make_device_vector<IdxT>(res, n_removed_indices);
* raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset(
* res, removed_indices.view(), dataset.extent(0));
* // search K nearest neighbours according to a bitset
* auto neighbors = raft::make_device_matrix<uint32_t>(res, n_queries, k);
* auto distances = raft::make_device_matrix<float>(res, n_queries, k);
* cagra::search_with_filtering(res, search_params, index, queries, neighbors, distances,
* filtering::bitset_filter(removed_indices_bitset.view()));
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
* @tparam CagraSampleFilterT Device filter function, with the signature
* `(uint32_t query ix, uint32_t sample_ix) -> bool`
*
* @param[in] res raft resources
* @param[in] params configure the search
* @param[in] idx cagra index
* @param[in] queries a device matrix view to a row-major matrix [n_queries, index->dim()]
* @param[out] neighbors a device matrix view to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device matrix view to the distances to the selected neighbors [n_queries,
* k]
* @param[in] sample_filter a device filter function that greenlights samples for a given query
*/
template <typename T, typename IdxT, typename CagraSampleFilterT>
void search_with_filtering(raft::resources const& res,
const search_params& params,
const index<T, IdxT>& idx,
raft::device_matrix_view<const T, int64_t, row_major> queries,
raft::device_matrix_view<IdxT, int64_t, row_major> neighbors,
raft::device_matrix_view<float, int64_t, row_major> distances,
CagraSampleFilterT sample_filter = CagraSampleFilterT())
{
RAFT_EXPECTS(
queries.extent(0) == neighbors.extent(0) && queries.extent(0) == distances.extent(0),
"Number of rows in output neighbors and distances matrices must equal the number of queries.");
RAFT_EXPECTS(neighbors.extent(1) == distances.extent(1),
"Number of columns in output neighbors and distances matrices must equal k");
RAFT_EXPECTS(queries.extent(1) == idx.dim(),
"Number of query dimensions should equal number of dimensions in the index.");
using internal_IdxT = typename std::make_unsigned<IdxT>::type;
auto queries_internal = raft::make_device_matrix_view<const T, int64_t, row_major>(
queries.data_handle(), queries.extent(0), queries.extent(1));
auto neighbors_internal = raft::make_device_matrix_view<internal_IdxT, int64_t, row_major>(
reinterpret_cast<internal_IdxT*>(neighbors.data_handle()),
neighbors.extent(0),
neighbors.extent(1));
auto distances_internal = raft::make_device_matrix_view<float, int64_t, row_major>(
distances.data_handle(), distances.extent(0), distances.extent(1));
cagra::detail::search_main<T, internal_IdxT, CagraSampleFilterT, IdxT>(
res, params, idx, queries_internal, neighbors_internal, distances_internal, sample_filter);
}
/** @} */ // end group cagra
} // namespace raft::neighbors::cagra
// TODO: Remove deprecated experimental namespace in 23.12 release
namespace raft::neighbors::experimental::cagra {
using raft::neighbors::cagra::build;
using raft::neighbors::cagra::build_knn_graph;
using raft::neighbors::cagra::optimize;
using raft::neighbors::cagra::search;
using raft::neighbors::cagra::sort_knn_graph;
} // namespace raft::neighbors::experimental::cagra
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_list.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/neighbors/ivf_list_types.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/core/serialize.hpp>
#include <raft/util/integer_utils.hpp>
#include <thrust/fill.h>
#include <fstream>
#include <memory>
#include <type_traits>
namespace raft::neighbors::ivf {
/** The data for a single IVF list. */
template <template <typename, typename...> typename SpecT,
typename SizeT,
typename... SpecExtraArgs>
list<SpecT, SizeT, SpecExtraArgs...>::list(raft::resources const& res,
const spec_type& spec,
size_type n_rows)
: size{n_rows}, data{res}, indices{res}
{
auto capacity = round_up_safe<SizeT>(n_rows, spec.align_max);
if (n_rows < spec.align_max) {
capacity = bound_by_power_of_two<SizeT>(std::max<SizeT>(n_rows, spec.align_min));
capacity = std::min<SizeT>(capacity, spec.align_max);
}
try {
data = make_device_mdarray<value_type>(res, spec.make_list_extents(capacity));
indices = make_device_vector<index_type, SizeT>(res, capacity);
} catch (std::bad_alloc& e) {
RAFT_FAIL(
"ivf::list: failed to allocate a big enough list to hold all data "
"(requested size: %zu records, selected capacity: %zu records). "
"Allocator exception: %s",
size_t(size),
size_t(capacity),
e.what());
}
// Fill the index buffer with a pre-defined marker for easier debugging
thrust::fill_n(resource::get_thrust_policy(res),
indices.data_handle(),
indices.size(),
ivf::kInvalidRecord<index_type>);
}
/**
* Resize a list by the given id, so that it can contain the given number of records;
* copy the data if necessary.
*/
template <typename ListT>
void resize_list(raft::resources const& res,
std::shared_ptr<ListT>& orig_list, // NOLINT
const typename ListT::spec_type& spec,
typename ListT::size_type new_used_size,
typename ListT::size_type old_used_size)
{
bool skip_resize = false;
if (orig_list) {
if (new_used_size <= orig_list->indices.extent(0)) {
auto shared_list_size = old_used_size;
if (new_used_size <= old_used_size ||
orig_list->size.compare_exchange_strong(shared_list_size, new_used_size)) {
// We don't need to resize the list if:
// 1. The list exists
// 2. The new size fits in the list
// 3. The list doesn't grow or no-one else has grown it yet
skip_resize = true;
}
}
} else {
old_used_size = 0;
}
if (skip_resize) { return; }
auto new_list = std::make_shared<ListT>(res, spec, new_used_size);
if (old_used_size > 0) {
auto copied_data_extents = spec.make_list_extents(old_used_size);
auto copied_view =
make_mdspan<typename ListT::value_type, typename ListT::size_type, row_major, false, true>(
new_list->data.data_handle(), copied_data_extents);
copy(copied_view.data_handle(),
orig_list->data.data_handle(),
copied_view.size(),
resource::get_cuda_stream(res));
copy(new_list->indices.data_handle(),
orig_list->indices.data_handle(),
old_used_size,
resource::get_cuda_stream(res));
}
// swap the shared pointer content with the new list
new_list.swap(orig_list);
}
template <typename ListT>
auto serialize_list(const raft::resources& handle,
std::ostream& os,
const ListT& ld,
const typename ListT::spec_type& store_spec,
std::optional<typename ListT::size_type> size_override = std::nullopt)
-> enable_if_valid_list_t<ListT>
{
using size_type = typename ListT::size_type;
auto size = size_override.value_or(ld.size.load());
serialize_scalar(handle, os, size);
if (size == 0) { return; }
auto data_extents = store_spec.make_list_extents(size);
auto data_array =
make_host_mdarray<typename ListT::value_type, size_type, row_major>(data_extents);
auto inds_array = make_host_mdarray<typename ListT::index_type, size_type, row_major>(
make_extents<size_type>(size));
copy(data_array.data_handle(),
ld.data.data_handle(),
data_array.size(),
resource::get_cuda_stream(handle));
copy(inds_array.data_handle(),
ld.indices.data_handle(),
inds_array.size(),
resource::get_cuda_stream(handle));
resource::sync_stream(handle);
serialize_mdspan(handle, os, data_array.view());
serialize_mdspan(handle, os, inds_array.view());
}
template <typename ListT>
auto serialize_list(const raft::resources& handle,
std::ostream& os,
const std::shared_ptr<ListT>& ld,
const typename ListT::spec_type& store_spec,
std::optional<typename ListT::size_type> size_override = std::nullopt)
-> enable_if_valid_list_t<ListT>
{
if (ld) {
return serialize_list<ListT>(handle, os, *ld, store_spec, size_override);
} else {
return serialize_scalar(handle, os, typename ListT::size_type{0});
}
}
template <typename ListT>
auto deserialize_list(const raft::resources& handle,
std::istream& is,
std::shared_ptr<ListT>& ld,
const typename ListT::spec_type& store_spec,
const typename ListT::spec_type& device_spec) -> enable_if_valid_list_t<ListT>
{
using size_type = typename ListT::size_type;
auto size = deserialize_scalar<size_type>(handle, is);
if (size == 0) { return ld.reset(); }
std::make_shared<ListT>(handle, device_spec, size).swap(ld);
auto data_extents = store_spec.make_list_extents(size);
auto data_array =
make_host_mdarray<typename ListT::value_type, size_type, row_major>(data_extents);
auto inds_array = make_host_mdarray<typename ListT::index_type, size_type, row_major>(
make_extents<size_type>(size));
deserialize_mdspan(handle, is, data_array.view());
deserialize_mdspan(handle, is, inds_array.view());
copy(ld->data.data_handle(),
data_array.data_handle(),
data_array.size(),
resource::get_cuda_stream(handle));
// NB: copying exactly 'size' indices to leave the rest 'kInvalidRecord' intact.
copy(
ld->indices.data_handle(), inds_array.data_handle(), size, resource::get_cuda_stream(handle));
// Make sure the data is copied from host to device before the host arrays get out of the scope.
resource::sync_stream(handle);
}
} // namespace raft::neighbors::ivf
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/brute_force-ext.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <optional>
#include <raft/core/device_mdspan.hpp> // raft::device_matrix_view
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/core/resources.hpp> // raft::resources
#include <raft/distance/distance_types.hpp> // raft::distance::DistanceType
#include <raft/neighbors/brute_force_types.hpp>
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace raft::neighbors::brute_force {
template <typename value_t, typename idx_t>
inline void knn_merge_parts(
raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, row_major> in_keys,
raft::device_matrix_view<const idx_t, idx_t, row_major> in_values,
raft::device_matrix_view<value_t, idx_t, row_major> out_keys,
raft::device_matrix_view<idx_t, idx_t, row_major> out_values,
size_t n_samples,
std::optional<raft::device_vector_view<idx_t, idx_t>> translations = std::nullopt) RAFT_EXPLICIT;
template <typename T, typename Accessor>
index<T> build(raft::resources const& res,
mdspan<const T, matrix_extent<int64_t>, row_major, Accessor> dataset,
raft::distance::DistanceType metric = distance::DistanceType::L2Unexpanded,
T metric_arg = 0.0) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void search(raft::resources const& res,
const index<T>& idx,
raft::device_matrix_view<const T, int64_t, row_major> queries,
raft::device_matrix_view<IdxT, int64_t, row_major> neighbors,
raft::device_matrix_view<T, int64_t, row_major> distances) RAFT_EXPLICIT;
template <typename idx_t,
typename value_t,
typename matrix_idx,
typename index_layout,
typename search_layout,
typename epilogue_op = raft::identity_op>
void knn(raft::resources const& handle,
std::vector<raft::device_matrix_view<const value_t, matrix_idx, index_layout>> index,
raft::device_matrix_view<const value_t, matrix_idx, search_layout> search,
raft::device_matrix_view<idx_t, matrix_idx, row_major> indices,
raft::device_matrix_view<value_t, matrix_idx, row_major> distances,
distance::DistanceType metric = distance::DistanceType::L2Unexpanded,
std::optional<float> metric_arg = std::make_optional<float>(2.0f),
std::optional<idx_t> global_id_offset = std::nullopt,
epilogue_op distance_epilogue = raft::identity_op()) RAFT_EXPLICIT;
template <typename value_t, typename idx_t, typename idx_layout, typename query_layout>
void fused_l2_knn(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, idx_layout> index,
raft::device_matrix_view<const value_t, idx_t, query_layout> query,
raft::device_matrix_view<idx_t, idx_t, row_major> out_inds,
raft::device_matrix_view<value_t, idx_t, row_major> out_dists,
raft::distance::DistanceType metric) RAFT_EXPLICIT;
} // namespace raft::neighbors::brute_force
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
// No extern template for raft::neighbors::brute_force::knn_merge_parts
#define instantiate_raft_neighbors_brute_force_knn( \
idx_t, value_t, matrix_idx, index_layout, search_layout, epilogue_op) \
extern template void raft::neighbors::brute_force:: \
knn<idx_t, value_t, matrix_idx, index_layout, search_layout, epilogue_op>( \
raft::resources const& handle, \
std::vector<raft::device_matrix_view<const value_t, matrix_idx, index_layout>> index, \
raft::device_matrix_view<const value_t, matrix_idx, search_layout> search, \
raft::device_matrix_view<idx_t, matrix_idx, row_major> indices, \
raft::device_matrix_view<value_t, matrix_idx, row_major> distances, \
raft::distance::DistanceType metric, \
std::optional<float> metric_arg, \
std::optional<idx_t> global_id_offset, \
epilogue_op distance_epilogue);
instantiate_raft_neighbors_brute_force_knn(
int64_t, float, uint32_t, raft::row_major, raft::row_major, raft::identity_op);
instantiate_raft_neighbors_brute_force_knn(
int64_t, float, int64_t, raft::row_major, raft::row_major, raft::identity_op);
instantiate_raft_neighbors_brute_force_knn(
int, float, int, raft::row_major, raft::row_major, raft::identity_op);
instantiate_raft_neighbors_brute_force_knn(
uint32_t, float, uint32_t, raft::row_major, raft::row_major, raft::identity_op);
#undef instantiate_raft_neighbors_brute_force_knn
namespace raft::neighbors::brute_force {
extern template void search<float, int>(
raft::resources const& res,
const raft::neighbors::brute_force::index<float>& idx,
raft::device_matrix_view<const float, int64_t, row_major> queries,
raft::device_matrix_view<int, int64_t, row_major> neighbors,
raft::device_matrix_view<float, int64_t, row_major> distances);
extern template void search<float, int64_t>(
raft::resources const& res,
const raft::neighbors::brute_force::index<float>& idx,
raft::device_matrix_view<const float, int64_t, row_major> queries,
raft::device_matrix_view<int64_t, int64_t, row_major> neighbors,
raft::device_matrix_view<float, int64_t, row_major> distances);
extern template raft::neighbors::brute_force::index<float> build<float>(
raft::resources const& res,
raft::device_matrix_view<const float, int64_t, row_major> dataset,
raft::distance::DistanceType metric,
float metric_arg);
} // namespace raft::neighbors::brute_force
#define instantiate_raft_neighbors_brute_force_fused_l2_knn( \
value_t, idx_t, idx_layout, query_layout) \
extern template void raft::neighbors::brute_force::fused_l2_knn( \
raft::resources const& handle, \
raft::device_matrix_view<const value_t, idx_t, idx_layout> index, \
raft::device_matrix_view<const value_t, idx_t, query_layout> query, \
raft::device_matrix_view<idx_t, idx_t, row_major> out_inds, \
raft::device_matrix_view<value_t, idx_t, row_major> out_dists, \
raft::distance::DistanceType metric);
instantiate_raft_neighbors_brute_force_fused_l2_knn(float,
int64_t,
raft::row_major,
raft::row_major)
#undef instantiate_raft_neighbors_brute_force_fused_l2_knn
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/refine-ext.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // int64_t
#include <raft/core/device_mdspan.hpp> // raft::device_matrix_view
#include <raft/core/host_mdspan.hpp> // // raft::host_matrix_view
#include <raft/core/resources.hpp> // raft::resources
#include <raft/distance/distance_types.hpp> // raft::distance::DistanceType
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace raft::neighbors {
template <typename idx_t, typename data_t, typename distance_t, typename matrix_idx>
void refine(raft::resources const& handle,
raft::device_matrix_view<const data_t, matrix_idx, row_major> dataset,
raft::device_matrix_view<const data_t, matrix_idx, row_major> queries,
raft::device_matrix_view<const idx_t, matrix_idx, row_major> neighbor_candidates,
raft::device_matrix_view<idx_t, matrix_idx, row_major> indices,
raft::device_matrix_view<distance_t, matrix_idx, row_major> distances,
raft::distance::DistanceType metric = distance::DistanceType::L2Unexpanded)
RAFT_EXPLICIT;
template <typename idx_t, typename data_t, typename distance_t, typename matrix_idx>
void refine(raft::resources const& handle,
raft::host_matrix_view<const data_t, matrix_idx, row_major> dataset,
raft::host_matrix_view<const data_t, matrix_idx, row_major> queries,
raft::host_matrix_view<const idx_t, matrix_idx, row_major> neighbor_candidates,
raft::host_matrix_view<idx_t, matrix_idx, row_major> indices,
raft::host_matrix_view<distance_t, matrix_idx, row_major> distances,
raft::distance::DistanceType metric = distance::DistanceType::L2Unexpanded)
RAFT_EXPLICIT;
} // namespace raft::neighbors
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_neighbors_refine(idx_t, data_t, distance_t, matrix_idx) \
extern template void raft::neighbors::refine<idx_t, data_t, distance_t, matrix_idx>( \
raft::resources const& handle, \
raft::device_matrix_view<const data_t, matrix_idx, row_major> dataset, \
raft::device_matrix_view<const data_t, matrix_idx, row_major> queries, \
raft::device_matrix_view<const idx_t, matrix_idx, row_major> neighbor_candidates, \
raft::device_matrix_view<idx_t, matrix_idx, row_major> indices, \
raft::device_matrix_view<distance_t, matrix_idx, row_major> distances, \
raft::distance::DistanceType metric); \
\
extern template void raft::neighbors::refine<idx_t, data_t, distance_t, matrix_idx>( \
raft::resources const& handle, \
raft::host_matrix_view<const data_t, matrix_idx, row_major> dataset, \
raft::host_matrix_view<const data_t, matrix_idx, row_major> queries, \
raft::host_matrix_view<const idx_t, matrix_idx, row_major> neighbor_candidates, \
raft::host_matrix_view<idx_t, matrix_idx, row_major> indices, \
raft::host_matrix_view<distance_t, matrix_idx, row_major> distances, \
raft::distance::DistanceType metric);
instantiate_raft_neighbors_refine(int64_t, float, float, int64_t);
instantiate_raft_neighbors_refine(int64_t, int8_t, float, int64_t);
instantiate_raft_neighbors_refine(int64_t, uint8_t, float, int64_t);
#undef instantiate_raft_neighbors_refine
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_flat_serialize.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/ivf_flat_serialize.cuh"
namespace raft::neighbors::ivf_flat {
/**
* \defgroup ivf_flat_serialize IVF-Flat Serialize
* @{
*/
/**
* Write the index to an output stream
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create an output stream
* std::ostream os(std::cout.rdbuf());
* // create an index with `auto index = ivf_flat::build(...);`
* raft::serialize(handle, os, index);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle the raft handle
* @param[in] os output stream
* @param[in] index IVF-Flat index
*
*/
template <typename T, typename IdxT>
void serialize(raft::resources const& handle, std::ostream& os, const index<T, IdxT>& index)
{
detail::serialize(handle, os, index);
}
/**
* Save the index to file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create a string with a filepath
* std::string filename("/path/to/index");
* // create an index with `auto index = ivf_flat::build(...);`
* raft::serialize(handle, filename, index);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle the raft handle
* @param[in] filename the file name for saving the index
* @param[in] index IVF-Flat index
*
*/
template <typename T, typename IdxT>
void serialize(raft::resources const& handle,
const std::string& filename,
const index<T, IdxT>& index)
{
detail::serialize(handle, filename, index);
}
/**
* Load index from input stream
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create an input stream
* std::istream is(std::cin.rdbuf());
* using T = float; // data element type
* using IdxT = int; // type of the index
* auto index = raft::deserialize<T, IdxT>(handle, is);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle the raft handle
* @param[in] is input stream
*
* @return raft::neighbors::ivf_flat::index<T, IdxT>
*/
template <typename T, typename IdxT>
index<T, IdxT> deserialize(raft::resources const& handle, std::istream& is)
{
return detail::deserialize<T, IdxT>(handle, is);
}
/**
* Load index from file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create a string with a filepath
* std::string filename("/path/to/index");
* using T = float; // data element type
* using IdxT = int; // type of the index
* auto index = raft::deserialize<T, IdxT>(handle, filename);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle the raft handle
* @param[in] filename the name of the file that stores the index
*
* @return raft::neighbors::ivf_flat::index<T, IdxT>
*/
template <typename T, typename IdxT>
index<T, IdxT> deserialize(raft::resources const& handle, const std::string& filename)
{
return detail::deserialize<T, IdxT>(handle, filename);
}
/**@}*/
} // namespace raft::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_flat.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "ivf_flat-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "ivf_flat-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_pq_serialize.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/ivf_pq_serialize.cuh"
namespace raft::neighbors::ivf_pq {
/**
* \defgroup ivf_pq_serialize IVF-PQ Serialize
* @{
*/
/**
* Write the index to an output stream
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create an output stream
* std::ostream os(std::cout.rdbuf());
* // create an index with `auto index = ivf_pq::build(...);`
* raft::serialize(handle, os, index);
* @endcode
*
* @tparam IdxT type of the index
*
* @param[in] handle the raft handle
* @param[in] os output stream
* @param[in] index IVF-PQ index
*
*/
template <typename IdxT>
void serialize(raft::resources const& handle, std::ostream& os, const index<IdxT>& index)
{
detail::serialize(handle, os, index);
}
/**
* Save the index to file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create a string with a filepath
* std::string filename("/path/to/index");
* // create an index with `auto index = ivf_pq::build(...);`
* raft::serialize(handle, filename, index);
* @endcode
*
* @tparam IdxT type of the index
*
* @param[in] handle the raft handle
* @param[in] filename the file name for saving the index
* @param[in] index IVF-PQ index
*
*/
template <typename IdxT>
void serialize(raft::resources const& handle, const std::string& filename, const index<IdxT>& index)
{
detail::serialize(handle, filename, index);
}
/**
* Load index from input stream
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create an input stream
* std::istream is(std::cin.rdbuf());
* using IdxT = int; // type of the index
* auto index = raft::deserialize<IdxT>(handle, is);
* @endcode
*
* @tparam IdxT type of the index
*
* @param[in] handle the raft handle
* @param[in] is input stream
*
* @return raft::neighbors::ivf_pq::index<IdxT>
*/
template <typename IdxT>
index<IdxT> deserialize(raft::resources const& handle, std::istream& is)
{
return detail::deserialize<IdxT>(handle, is);
}
/**
* Load index from file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create a string with a filepath
* std::string filename("/path/to/index");
* using IdxT = int; // type of the index
* auto index = raft::deserialize<IdxT>(handle, filename);
* @endcode
*
* @tparam IdxT type of the index
*
* @param[in] handle the raft handle
* @param[in] filename the name of the file that stores the index
*
* @return raft::neighbors::ivf_pq::index<IdxT>
*/
template <typename IdxT>
index<IdxT> deserialize(raft::resources const& handle, const std::string& filename)
{
return detail::deserialize<IdxT>(handle, filename);
}
/**@}*/
} // namespace raft::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/epsilon_neighborhood.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __EPSILON_NEIGH_H
#define __EPSILON_NEIGH_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/spatial/knn/detail/epsilon_neighborhood.cuh>
namespace raft::neighbors::epsilon_neighborhood {
/**
* @brief Computes epsilon neighborhood for the L2-Squared distance metric
*
* @tparam value_t IO and math type
* @tparam idx_t Index type
*
* @param[out] adj adjacency matrix [row-major] [on device] [dim = m x n]
* @param[out] vd vertex degree array [on device] [len = m + 1]
* `vd + m` stores the total number of edges in the adjacency
* matrix. Pass a nullptr if you don't need this info.
* @param[in] x first matrix [row-major] [on device] [dim = m x k]
* @param[in] y second matrix [row-major] [on device] [dim = n x k]
* @param[in] m number of rows in x
* @param[in] n number of rows in y
* @param[in] k number of columns in x and k
* @param[in] eps defines epsilon neighborhood radius (should be passed as
* squared as we compute L2-squared distance in this method)
* @param[in] stream cuda stream
*/
template <typename value_t, typename idx_t>
void epsUnexpL2SqNeighborhood(bool* adj,
idx_t* vd,
const value_t* x,
const value_t* y,
idx_t m,
idx_t n,
idx_t k,
value_t eps,
cudaStream_t stream)
{
spatial::knn::detail::epsUnexpL2SqNeighborhood<value_t, idx_t>(
adj, vd, x, y, m, n, k, eps, stream);
}
/**
* @defgroup epsilon_neighbors Epislon Neighborhood Operations
* @{
*/
/**
* @brief Computes epsilon neighborhood for the L2-Squared distance metric and given ball size.
* The epsilon neighbors is represented by a dense boolean adjacency matrix of size m * n and
* an array of degrees for each vertex, which can be used as a compressed sparse row (CSR)
* indptr array.
*
* @code{.cpp}
* #include <raft/neighbors/epsilon_neighborhood.cuh>
* #include <raft/core/resources.hpp>
* #include <raft/core/device_mdarray.hpp>
* using namespace raft::neighbors;
* raft::raft::resources handle;
* ...
* auto adj = raft::make_device_matrix<bool>(handle, m * n);
* auto vd = raft::make_device_vector<int>(handle, m+1);
* epsilon_neighborhood::eps_neighbors_l2sq(handle, x, y, adj.view(), vd.view(), eps);
* @endcode
*
* @tparam value_t IO and math type
* @tparam idx_t Index type
* @tparam matrix_idx_t matrix indexing type
*
* @param[in] handle raft handle to manage library resources
* @param[in] x first matrix [row-major] [on device] [dim = m x k]
* @param[in] y second matrix [row-major] [on device] [dim = n x k]
* @param[out] adj adjacency matrix [row-major] [on device] [dim = m x n]
* @param[out] vd vertex degree array [on device] [len = m + 1]
* `vd + m` stores the total number of edges in the adjacency
* matrix. Pass a nullptr if you don't need this info.
* @param[in] eps defines epsilon neighborhood radius (should be passed as
* squared as we compute L2-squared distance in this method)
*/
template <typename value_t, typename idx_t, typename matrix_idx_t>
void eps_neighbors_l2sq(raft::resources const& handle,
raft::device_matrix_view<const value_t, matrix_idx_t, row_major> x,
raft::device_matrix_view<const value_t, matrix_idx_t, row_major> y,
raft::device_matrix_view<bool, matrix_idx_t, row_major> adj,
raft::device_vector_view<idx_t, matrix_idx_t> vd,
value_t eps)
{
epsUnexpL2SqNeighborhood<value_t, idx_t>(adj.data_handle(),
vd.data_handle(),
x.data_handle(),
y.data_handle(),
x.extent(0),
y.extent(0),
x.extent(1),
eps,
resource::get_cuda_stream(handle));
}
/** @} */ // end group epsilon_neighbors
} // namespace raft::neighbors::epsilon_neighborhood
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_pq-ext.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // int64_t
#include <raft/core/device_mdspan.hpp> // raft::device_matrix_view
#include <raft/core/resources.hpp> // raft::resources
#include <raft/neighbors/ivf_pq_types.hpp> // raft::neighbors::ivf_pq::index
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#include <rmm/mr/device/per_device_resource.hpp> // rmm::mr::device_memory_resource
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace raft::neighbors::ivf_pq {
template <typename T, typename IdxT = uint32_t>
index<IdxT> build(raft::resources const& handle,
const index_params& params,
raft::device_matrix_view<const T, IdxT, row_major> dataset) RAFT_EXPLICIT;
template <typename T, typename IdxT>
index<IdxT> extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT, row_major>> new_indices,
const index<IdxT>& idx) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT, row_major>> new_indices,
index<IdxT>* idx) RAFT_EXPLICIT;
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
raft::device_matrix_view<const T, uint32_t, row_major> queries,
raft::device_matrix_view<IdxT, uint32_t, row_major> neighbors,
raft::device_matrix_view<float, uint32_t, row_major> distances,
IvfSampleFilterT sample_filter) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
raft::device_matrix_view<const T, uint32_t, row_major> queries,
raft::device_matrix_view<IdxT, uint32_t, row_major> neighbors,
raft::device_matrix_view<float, uint32_t, row_major> distances) RAFT_EXPLICIT;
template <typename T, typename IdxT = uint32_t>
auto build(raft::resources const& handle,
const index_params& params,
const T* dataset,
IdxT n_rows,
uint32_t dim) -> index<IdxT> RAFT_EXPLICIT;
template <typename T, typename IdxT>
auto extend(raft::resources const& handle,
const index<IdxT>& idx,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows) -> index<IdxT> RAFT_EXPLICIT;
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
index<IdxT>* idx,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows) RAFT_EXPLICIT;
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const raft::neighbors::ivf_pq::search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
IvfSampleFilterT sample_filter = IvfSampleFilterT{}) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const raft::neighbors::ivf_pq::search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances) RAFT_EXPLICIT;
template <typename T, typename IdxT, typename IvfSampleFilterT>
[[deprecated(
"Drop the `mr` argument and use `raft::resource::set_workspace_resource` instead")]] void
search_with_filtering(raft::resources const& handle,
const raft::neighbors::ivf_pq::search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr,
IvfSampleFilterT sample_filter = IvfSampleFilterT{}) RAFT_EXPLICIT;
template <typename T, typename IdxT>
[[deprecated(
"Drop the `mr` argument and use `raft::resource::set_workspace_resource` instead")]] void
search(raft::resources const& handle,
const raft::neighbors::ivf_pq::search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr) RAFT_EXPLICIT;
} // namespace raft::neighbors::ivf_pq
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_neighbors_ivf_pq_build(T, IdxT) \
extern template raft::neighbors::ivf_pq::index<IdxT> raft::neighbors::ivf_pq::build<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_pq::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset); \
\
extern template auto raft::neighbors::ivf_pq::build( \
raft::resources const& handle, \
const raft::neighbors::ivf_pq::index_params& params, \
const T* dataset, \
IdxT n_rows, \
uint32_t dim) \
->raft::neighbors::ivf_pq::index<IdxT>;
instantiate_raft_neighbors_ivf_pq_build(float, int64_t);
instantiate_raft_neighbors_ivf_pq_build(int8_t, int64_t);
instantiate_raft_neighbors_ivf_pq_build(uint8_t, int64_t);
#undef instantiate_raft_neighbors_ivf_pq_build
#define instantiate_raft_neighbors_ivf_pq_extend(T, IdxT) \
extern template raft::neighbors::ivf_pq::index<IdxT> raft::neighbors::ivf_pq::extend<T, IdxT>( \
raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT, row_major>> new_indices, \
const raft::neighbors::ivf_pq::index<IdxT>& idx); \
\
extern template void raft::neighbors::ivf_pq::extend<T, IdxT>( \
raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT, row_major>> new_indices, \
raft::neighbors::ivf_pq::index<IdxT>* idx); \
\
extern template auto raft::neighbors::ivf_pq::extend<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_pq::index<IdxT>& idx, \
const T* new_vectors, \
const IdxT* new_indices, \
IdxT n_rows) \
->raft::neighbors::ivf_pq::index<IdxT>; \
\
extern template void raft::neighbors::ivf_pq::extend<T, IdxT>( \
raft::resources const& handle, \
raft::neighbors::ivf_pq::index<IdxT>* idx, \
const T* new_vectors, \
const IdxT* new_indices, \
IdxT n_rows);
instantiate_raft_neighbors_ivf_pq_extend(float, int64_t);
instantiate_raft_neighbors_ivf_pq_extend(int8_t, int64_t);
instantiate_raft_neighbors_ivf_pq_extend(uint8_t, int64_t);
#undef instantiate_raft_neighbors_ivf_pq_extend
#define instantiate_raft_neighbors_ivf_pq_search(T, IdxT) \
extern template void raft::neighbors::ivf_pq::search<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_pq::search_params& params, \
const raft::neighbors::ivf_pq::index<IdxT>& idx, \
raft::device_matrix_view<const T, uint32_t, row_major> queries, \
raft::device_matrix_view<IdxT, uint32_t, row_major> neighbors, \
raft::device_matrix_view<float, uint32_t, row_major> distances); \
\
extern template void raft::neighbors::ivf_pq::search<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_pq::search_params& params, \
const raft::neighbors::ivf_pq::index<IdxT>& idx, \
const T* queries, \
uint32_t n_queries, \
uint32_t k, \
IdxT* neighbors, \
float* distances, \
rmm::mr::device_memory_resource* mr); \
\
extern template void raft::neighbors::ivf_pq::search<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_pq::search_params& params, \
const raft::neighbors::ivf_pq::index<IdxT>& idx, \
const T* queries, \
uint32_t n_queries, \
uint32_t k, \
IdxT* neighbors, \
float* distances)
instantiate_raft_neighbors_ivf_pq_search(float, int64_t);
instantiate_raft_neighbors_ivf_pq_search(int8_t, int64_t);
instantiate_raft_neighbors_ivf_pq_search(uint8_t, int64_t);
#undef instantiate_raft_neighbors_ivf_pq_search
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ann_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/distance/distance_types.hpp>
namespace raft::neighbors::ann {
/**
* @defgroup ann_types Approximate Nearest Neighbors Types
* @{
*/
/** The base for approximate KNN index structures. */
struct index {};
/** The base for KNN index parameters. */
struct index_params {
/** Distance type. */
raft::distance::DistanceType metric = distance::DistanceType::L2Expanded;
/** The argument used by some distance metrics. */
float metric_arg = 2.0f;
/**
* Whether to add the dataset content to the index, i.e.:
*
* - `true` means the index is filled with the dataset vectors and ready to search after calling
* `build`.
* - `false` means `build` only trains the underlying model (e.g. quantizer or clustering), but
* the index is left empty; you'd need to call `extend` on the index afterwards to populate it.
*/
bool add_data_on_build = true;
};
struct search_params {};
/** @} */ // end group ann_types
}; // namespace raft::neighbors::ann
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_pq.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "ivf_pq-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "ivf_pq-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/cagra_types.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_types.hpp"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/neighbors/detail/cagra/utils.hpp>
#include <raft/util/integer_utils.hpp>
#include <memory>
#include <optional>
#include <string>
#include <thrust/fill.h>
#include <type_traits>
#include <raft/core/logger.hpp>
namespace raft::neighbors::cagra {
/**
* @addtogroup cagra
* @{
*/
/**
* @brief ANN algorithm used by CAGRA to build knn graph
*
*/
enum class graph_build_algo {
/* Use IVF-PQ to build all-neighbors knn graph */
IVF_PQ,
/* Experimental, use NN-Descent to build all-neighbors knn graph */
NN_DESCENT
};
struct index_params : ann::index_params {
/** Degree of input graph for pruning. */
size_t intermediate_graph_degree = 128;
/** Degree of output graph. */
size_t graph_degree = 64;
/** ANN algorithm to build knn graph. */
graph_build_algo build_algo = graph_build_algo::IVF_PQ;
/** Number of Iterations to run if building with NN_DESCENT */
size_t nn_descent_niter = 20;
};
enum class search_algo {
/** For large batch sizes. */
SINGLE_CTA,
/** For small batch sizes. */
MULTI_CTA,
MULTI_KERNEL,
AUTO
};
enum class hash_mode { HASH, SMALL, AUTO };
struct search_params : ann::search_params {
/** Maximum number of queries to search at the same time (batch size). Auto select when 0.*/
size_t max_queries = 0;
/** Number of intermediate search results retained during the search.
*
* This is the main knob to adjust trade off between accuracy and search speed.
* Higher values improve the search accuracy.
*/
size_t itopk_size = 64;
/** Upper limit of search iterations. Auto select when 0.*/
size_t max_iterations = 0;
// In the following we list additional search parameters for fine tuning.
// Reasonable default values are automatically chosen.
/** Which search implementation to use. */
search_algo algo = search_algo::AUTO;
/** Number of threads used to calculate a single distance. 4, 8, 16, or 32. */
size_t team_size = 0;
/** Number of graph nodes to select as the starting point for the search in each iteration. aka
* search width?*/
size_t search_width = 1;
/** Lower limit of search iterations. */
size_t min_iterations = 0;
/** Thread block size. 0, 64, 128, 256, 512, 1024. Auto selection when 0. */
size_t thread_block_size = 0;
/** Hashmap type. Auto selection when AUTO. */
hash_mode hashmap_mode = hash_mode::AUTO;
/** Lower limit of hashmap bit length. More than 8. */
size_t hashmap_min_bitlen = 0;
/** Upper limit of hashmap fill rate. More than 0.1, less than 0.9.*/
float hashmap_max_fill_rate = 0.5;
/** Number of iterations of initial random seed node selection. 1 or more. */
uint32_t num_random_samplings = 1;
/** Bit mask used for initial random seed node selection. */
uint64_t rand_xor_mask = 0x128394;
};
static_assert(std::is_aggregate_v<index_params>);
static_assert(std::is_aggregate_v<search_params>);
/**
* @brief CAGRA index.
*
* The index stores the dataset and a kNN graph in device memory.
*
* @tparam T data element type
* @tparam IdxT type of the vector indices (represent dataset.extent(0))
*
*/
template <typename T, typename IdxT>
struct index : ann::index {
static_assert(!raft::is_narrowing_v<uint32_t, IdxT>,
"IdxT must be able to represent all values of uint32_t");
public:
/** Distance metric used for clustering. */
[[nodiscard]] constexpr inline auto metric() const noexcept -> raft::distance::DistanceType
{
return metric_;
}
/** Total length of the index (number of vectors). */
[[nodiscard]] constexpr inline auto size() const noexcept -> IdxT
{
return dataset_view_.extent(0);
}
/** Dimensionality of the data. */
[[nodiscard]] constexpr inline auto dim() const noexcept -> uint32_t
{
return dataset_view_.extent(1);
}
/** Graph degree */
[[nodiscard]] constexpr inline auto graph_degree() const noexcept -> uint32_t
{
return graph_view_.extent(1);
}
/** Dataset [size, dim] */
[[nodiscard]] inline auto dataset() const noexcept
-> device_matrix_view<const T, int64_t, layout_stride>
{
return dataset_view_;
}
/** neighborhood graph [size, graph-degree] */
[[nodiscard]] inline auto graph() const noexcept
-> device_matrix_view<const IdxT, int64_t, row_major>
{
return graph_view_;
}
// Don't allow copying the index for performance reasons (try avoiding copying data)
index(const index&) = delete;
index(index&&) = default;
auto operator=(const index&) -> index& = delete;
auto operator=(index&&) -> index& = default;
~index() = default;
/** Construct an empty index. */
index(raft::resources const& res,
raft::distance::DistanceType metric = raft::distance::DistanceType::L2Expanded)
: ann::index(),
metric_(metric),
dataset_(make_device_matrix<T, int64_t>(res, 0, 0)),
graph_(make_device_matrix<IdxT, int64_t>(res, 0, 0))
{
}
/** Construct an index from dataset and knn_graph arrays
*
* If the dataset and graph is already in GPU memory, then the index is just a thin wrapper around
* these that stores a non-owning a reference to the arrays.
*
* The constructor also accepts host arrays. In that case they are copied to the device, and the
* device arrays will be owned by the index.
*
* In case the dasates rows are not 16 bytes aligned, then we create a padded copy in device
* memory to ensure alignment for vectorized load.
*
* Usage examples:
*
* - Cagra index is normally created by the cagra::build
* @code{.cpp}
* using namespace raft::neighbors::experimental;
* auto dataset = raft::make_host_matrix<float, int64_t>(n_rows, n_cols);
* load_dataset(dataset.view());
* // use default index parameters
* cagra::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = cagra::build(res, index_params, dataset);
* // use default search parameters
* cagra::search_params search_params;
* // search K nearest neighbours
* auto neighbors = raft::make_device_matrix<uint32_t, int64_t>(res, n_queries, k);
* auto distances = raft::make_device_matrix<float, int64_t>(res, n_queries, k);
* cagra::search(res, search_params, index, queries, neighbors, distances);
* @endcode
* In the above example, we have passed a host dataset to build. The returned index will own a
* device copy of the dataset and the knn_graph. In contrast, if we pass the dataset as a
* device_mdspan to build, then it will only store a reference to it.
*
* - Constructing index using existing knn-graph
* @code{.cpp}
* using namespace raft::neighbors::experimental;
*
* auto dataset = raft::make_device_matrix<float, int64_t>(res, n_rows, n_cols);
* auto knn_graph = raft::make_device_matrix<uint32_n, int64_t>(res, n_rows, graph_degree);
*
* // custom loading and graph creation
* // load_dataset(dataset.view());
* // create_knn_graph(knn_graph.view());
*
* // Wrap the existing device arrays into an index structure
* cagra::index<T, IdxT> index(res, metric, raft::make_const_mdspan(dataset.view()),
* raft::make_const_mdspan(knn_graph.view()));
*
* // Both knn_graph and dataset objects have to be in scope while the index is used because
* // the index only stores a reference to these.
* cagra::search(res, search_params, index, queries, neighbors, distances);
* @endcode
*
*/
template <typename data_accessor, typename graph_accessor>
index(raft::resources const& res,
raft::distance::DistanceType metric,
mdspan<const T, matrix_extent<int64_t>, row_major, data_accessor> dataset,
mdspan<const IdxT, matrix_extent<int64_t>, row_major, graph_accessor> knn_graph)
: ann::index(),
metric_(metric),
dataset_(make_device_matrix<T, int64_t>(res, 0, 0)),
graph_(make_device_matrix<IdxT, int64_t>(res, 0, 0))
{
RAFT_EXPECTS(dataset.extent(0) == knn_graph.extent(0),
"Dataset and knn_graph must have equal number of rows");
update_dataset(res, dataset);
update_graph(res, knn_graph);
resource::sync_stream(res);
}
/**
* Replace the dataset with a new dataset.
*
* If the new dataset rows are aligned on 16 bytes, then only a reference is stored to the
* dataset. It is the caller's responsibility to ensure that dataset stays alive as long as the
* index.
*/
void update_dataset(raft::resources const& res,
raft::device_matrix_view<const T, int64_t, row_major> dataset)
{
if (dataset.extent(1) * sizeof(T) % 16 != 0) {
RAFT_LOG_DEBUG("Creating a padded copy of CAGRA dataset in device memory");
copy_padded(res, dataset);
} else {
dataset_view_ = make_device_strided_matrix_view<const T, int64_t>(
dataset.data_handle(), dataset.extent(0), dataset.extent(1), dataset.extent(1));
}
}
/**
* Replace the dataset with a new dataset.
*
* We create a copy of the dataset on the device. The index manages the lifetime of this copy.
*/
void update_dataset(raft::resources const& res,
raft::host_matrix_view<const T, int64_t, row_major> dataset)
{
RAFT_LOG_DEBUG("Copying CAGRA dataset from host to device");
copy_padded(res, dataset);
}
/**
* Replace the graph with a new graph.
*
* Since the new graph is a device array, we store a reference to that, and it is
* the caller's responsibility to ensure that knn_graph stays alive as long as the index.
*/
void update_graph(raft::resources const& res,
raft::device_matrix_view<const IdxT, int64_t, row_major> knn_graph)
{
graph_view_ = knn_graph;
}
/**
* Replace the graph with a new graph.
*
* We create a copy of the graph on the device. The index manages the lifetime of this copy.
*/
void update_graph(raft::resources const& res,
raft::host_matrix_view<const IdxT, int64_t, row_major> knn_graph)
{
RAFT_LOG_DEBUG("Copying CAGRA knn graph from host to device");
if ((graph_.extent(0) != knn_graph.extent(0)) || (graph_.extent(1) != knn_graph.extent(1))) {
// clear existing memory before allocating to prevent OOM errors on large graphs
if (graph_.size()) { graph_ = make_device_matrix<IdxT, int64_t>(res, 0, 0); }
graph_ = make_device_matrix<IdxT, int64_t>(res, knn_graph.extent(0), knn_graph.extent(1));
}
raft::copy(graph_.data_handle(),
knn_graph.data_handle(),
knn_graph.size(),
resource::get_cuda_stream(res));
graph_view_ = graph_.view();
}
private:
/** Create a device copy of the dataset, and pad it if necessary. */
template <typename data_accessor>
void copy_padded(raft::resources const& res,
mdspan<const T, matrix_extent<int64_t>, row_major, data_accessor> dataset)
{
detail::copy_with_padding(res, dataset_, dataset);
dataset_view_ = make_device_strided_matrix_view<const T, int64_t>(
dataset_.data_handle(), dataset_.extent(0), dataset.extent(1), dataset_.extent(1));
RAFT_LOG_DEBUG("CAGRA dataset strided matrix view %zux%zu, stride %zu",
static_cast<size_t>(dataset_view_.extent(0)),
static_cast<size_t>(dataset_view_.extent(1)),
static_cast<size_t>(dataset_view_.stride(0)));
}
raft::distance::DistanceType metric_;
raft::device_matrix<T, int64_t, row_major> dataset_;
raft::device_matrix<IdxT, int64_t, row_major> graph_;
raft::device_matrix_view<const T, int64_t, layout_stride> dataset_view_;
raft::device_matrix_view<const IdxT, int64_t, row_major> graph_view_;
};
/** @} */
} // namespace raft::neighbors::cagra
// TODO: Remove deprecated experimental namespace in 23.12 release
namespace raft::neighbors::experimental::cagra {
using raft::neighbors::cagra::graph_build_algo;
using raft::neighbors::cagra::hash_mode;
using raft::neighbors::cagra::index;
using raft::neighbors::cagra::index_params;
using raft::neighbors::cagra::search_algo;
using raft::neighbors::cagra::search_params;
} // namespace raft::neighbors::experimental::cagra
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/neighbors_types.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdarray.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/resources.hpp>
namespace raft::neighbors {
/** A single batch of nearest neighbors in device memory */
template <typename T, typename IdxT>
class batch {
public:
/** Create a new empty batch of data */
batch(raft::resources const& res, int64_t rows, int64_t cols)
: indices_(make_device_matrix<IdxT, int64_t>(res, rows, cols)),
distances_(make_device_matrix<T, int64_t>(res, rows, cols))
{
}
void resize(raft::resources const& res, int64_t rows, int64_t cols)
{
indices_ = make_device_matrix<IdxT, int64_t>(res, rows, cols);
distances_ = make_device_matrix<T, int64_t>(res, rows, cols);
}
/** Returns the indices for the batch */
device_matrix_view<const IdxT, int64_t> indices() const
{
return raft::make_const_mdspan(indices_.view());
}
device_matrix_view<IdxT, int64_t> indices() { return indices_.view(); }
/** Returns the distances for the batch */
device_matrix_view<const T, int64_t> distances() const
{
return raft::make_const_mdspan(distances_.view());
}
device_matrix_view<T, int64_t> distances() { return distances_.view(); }
/** Returns the size of the batch */
int64_t batch_size() const { return indices().extent(1); }
protected:
raft::device_matrix<IdxT, int64_t> indices_;
raft::device_matrix<T, int64_t> distances_;
};
} // namespace raft::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/brute_force_types.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_types.hpp"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/neighbors/neighbors_types.hpp>
#include <raft/core/logger.hpp>
namespace raft::neighbors::brute_force {
/**
* @addtogroup brute_force_knn
* @{
*/
/**
* @brief Brute Force index.
*
* The index stores the dataset and norms for the dataset in device memory.
*
* @tparam T data element type
*/
template <typename T>
struct index : ann::index {
public:
/** Distance metric used for retrieval */
[[nodiscard]] constexpr inline raft::distance::DistanceType metric() const noexcept
{
return metric_;
}
/** Total length of the index (number of vectors). */
[[nodiscard]] constexpr inline int64_t size() const noexcept { return dataset_view_.extent(0); }
/** Dimensionality of the data. */
[[nodiscard]] constexpr inline uint32_t dim() const noexcept { return dataset_view_.extent(1); }
/** Dataset [size, dim] */
[[nodiscard]] inline auto dataset() const noexcept
-> device_matrix_view<const T, int64_t, row_major>
{
return dataset_view_;
}
/** Dataset norms */
[[nodiscard]] inline auto norms() const -> device_vector_view<const T, int64_t, row_major>
{
return norms_view_.value();
}
/** Whether or not this index has dataset norms */
[[nodiscard]] inline bool has_norms() const noexcept { return norms_view_.has_value(); }
[[nodiscard]] inline T metric_arg() const noexcept { return metric_arg_; }
// Don't allow copying the index for performance reasons (try avoiding copying data)
index(const index&) = delete;
index(index&&) = default;
auto operator=(const index&) -> index& = delete;
auto operator=(index&&) -> index& = default;
~index() = default;
/** Construct a brute force index from dataset
*
* Constructs a brute force index from a dataset. This lets us precompute norms for
* the dataset, providing a speed benefit over doing this at query time.
* If the dataset is already in GPU memory, then this class stores a non-owning reference to
* the dataset. If the dataset is in host memory, it will be copied to the device and the
* index will own the device memory.
*/
template <typename data_accessor>
index(raft::resources const& res,
mdspan<const T, matrix_extent<int64_t>, row_major, data_accessor> dataset,
std::optional<raft::device_vector<T, int64_t>>&& norms,
raft::distance::DistanceType metric,
T metric_arg = 0.0)
: ann::index(),
metric_(metric),
dataset_(make_device_matrix<T, int64_t>(res, 0, 0)),
norms_(std::move(norms)),
metric_arg_(metric_arg)
{
if (norms_) { norms_view_ = make_const_mdspan(norms_.value().view()); }
update_dataset(res, dataset);
resource::sync_stream(res);
}
/** Construct a brute force index from dataset
*
* This class stores a non-owning reference to the dataset and norms here.
* Having precomputed norms gives us a performance advantage at query time.
*/
index(raft::resources const& res,
raft::device_matrix_view<const T, int64_t, row_major> dataset_view,
std::optional<raft::device_vector_view<const T, int64_t>> norms_view,
raft::distance::DistanceType metric,
T metric_arg = 0.0)
: ann::index(),
metric_(metric),
dataset_(make_device_matrix<T, int64_t>(res, 0, 0)),
dataset_view_(dataset_view),
norms_view_(norms_view),
metric_arg_(metric_arg)
{
}
private:
/**
* Replace the dataset with a new dataset.
*/
void update_dataset(raft::resources const& res,
raft::device_matrix_view<const T, int64_t, row_major> dataset)
{
dataset_view_ = dataset;
}
/**
* Replace the dataset with a new dataset.
*
* We create a copy of the dataset on the device. The index manages the lifetime of this copy.
*/
void update_dataset(raft::resources const& res,
raft::host_matrix_view<const T, int64_t, row_major> dataset)
{
dataset_ = make_device_matrix<T, int64_t>(dataset.extents(0), dataset.extents(1));
raft::copy(dataset_.data_handle(),
dataset.data_handle(),
dataset.size(),
resource::get_cuda_stream(res));
dataset_view_ = make_const_mdspan(dataset_.view());
}
raft::distance::DistanceType metric_;
raft::device_matrix<T, int64_t, row_major> dataset_;
std::optional<raft::device_vector<T, int64_t>> norms_;
std::optional<raft::device_vector_view<const T, int64_t>> norms_view_;
raft::device_matrix_view<const T, int64_t, row_major> dataset_view_;
T metric_arg_;
};
/**
* @brief Interface for performing queries over values of k
*
* This interface lets you iterate over batches of k from a brute_force::index.
* This lets you do things like retrieve the first 100 neighbors for a query,
* apply post processing to remove any unwanted items and then if needed get the
* next 100 closest neighbors for the query.
*
* This query interface exposes C++ iterators through the ::begin and ::end, and
* is compatible with range based for loops.
*
* Note that this class is an abstract class without any cuda dependencies, meaning
* that it doesn't require a cuda compiler to use - but also means it can't be directly
* instantiated. See the raft::neighbors::brute_force::make_batch_k_query
* function for usage examples.
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*/
template <typename T, typename IdxT = int64_t>
class batch_k_query {
public:
batch_k_query(const raft::resources& res,
int64_t index_size,
int64_t query_size,
int64_t batch_size)
: res(res), index_size(index_size), query_size(query_size), batch_size(batch_size)
{
}
virtual ~batch_k_query() {}
using value_type = raft::neighbors::batch<T, IdxT>;
class iterator {
public:
using value_type = raft::neighbors::batch<T, IdxT>;
using reference = const value_type&;
using pointer = const value_type*;
iterator(const batch_k_query<T, IdxT>* query, int64_t offset = 0)
: current(query->res, 0, 0), batches(query->res, 0, 0), query(query), offset(offset)
{
query->load_batch(offset, query->batch_size, &batches);
query->slice_batch(batches, offset, query->batch_size, ¤t);
}
reference operator*() const { return current; }
pointer operator->() const { return ¤t; }
iterator& operator++()
{
advance(query->batch_size);
return *this;
}
iterator operator++(int)
{
iterator previous(*this);
operator++();
return previous;
}
/**
* @brief Advance the iterator, using a custom size for the next batch
*
* Using operator++ means that we will load up the same batch_size for each
* batch. This method allows us to get around this restriction, and load up
* arbitrary batch sizes on each iteration.
* See raft::neighbors::brute_force::make_batch_k_query for a usage example.
*
* @param[in] next_batch_size: size of the next batch to load up
*/
void advance(int64_t next_batch_size)
{
offset = std::min(offset + current.batch_size(), query->index_size);
if (offset + next_batch_size > batches.batch_size()) {
query->load_batch(offset, next_batch_size, &batches);
}
query->slice_batch(batches, offset, next_batch_size, ¤t);
}
friend bool operator==(const iterator& lhs, const iterator& rhs)
{
return (lhs.query == rhs.query) && (lhs.offset == rhs.offset);
};
friend bool operator!=(const iterator& lhs, const iterator& rhs) { return !(lhs == rhs); };
protected:
// the current batch of data
value_type current;
// the currently loaded group of data (containing multiple batches of data that we can iterate
// through)
value_type batches;
const batch_k_query<T, IdxT>* query;
int64_t offset, current_batch_size;
};
iterator begin() const { return iterator(this); }
iterator end() const { return iterator(this, index_size); }
protected:
// these two methods need cuda code, and are implemented in the subclass
virtual void load_batch(int64_t offset,
int64_t next_batch_size,
batch<T, IdxT>* output) const = 0;
virtual void slice_batch(const value_type& input,
int64_t offset,
int64_t batch_size,
value_type* output) const = 0;
const raft::resources& res;
int64_t index_size, query_size, batch_size;
};
/** @} */
} // namespace raft::neighbors::brute_force
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/refine.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "refine-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "refine-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_list_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdarray.hpp>
#include <raft/core/resources.hpp>
#include <atomic>
#include <limits>
#include <type_traits>
namespace raft::neighbors::ivf {
/**
* Default value filled in the `indices` array.
* One may encounter it trying to access a record within a list that is outside of the
* `size` bound or whenever the list is allocated but not filled-in yet.
*/
template <typename IdxT>
constexpr static IdxT kInvalidRecord =
(std::is_signed_v<IdxT> ? IdxT{0} : std::numeric_limits<IdxT>::max()) - 1;
/** The data for a single IVF list. */
template <template <typename, typename...> typename SpecT,
typename SizeT,
typename... SpecExtraArgs>
struct list {
using size_type = SizeT;
using spec_type = SpecT<size_type, SpecExtraArgs...>;
using value_type = typename spec_type::value_type;
using index_type = typename spec_type::index_type;
using list_extents = typename spec_type::list_extents;
/** Possibly encoded data; it's layout is defined by `SpecT`. */
device_mdarray<value_type, list_extents, row_major> data;
/** Source indices. */
device_mdarray<index_type, extent_1d<size_type>, row_major> indices;
/** The actual size of the content. */
std::atomic<size_type> size;
/** Allocate a new list capable of holding at least `n_rows` data records and indices. */
list(raft::resources const& res, const spec_type& spec, size_type n_rows);
};
template <typename ListT, class T = void>
struct enable_if_valid_list {};
template <class T,
template <typename, typename...>
typename SpecT,
typename SizeT,
typename... SpecExtraArgs>
struct enable_if_valid_list<list<SpecT, SizeT, SpecExtraArgs...>, T> {
using type = T;
};
/**
* Designed after `std::enable_if_t`, this trait is helpful in the instance resolution;
* plug this in the return type of a function that has an instance of `ivf::list` as
* a template parameter.
*/
template <typename ListT, class T = void>
using enable_if_valid_list_t = typename enable_if_valid_list<ListT, T>::type;
} // namespace raft::neighbors::ivf
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/nn_descent_types.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_types.hpp"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
namespace raft::neighbors::experimental::nn_descent {
/**
* @ingroup nn_descent
* @{
*/
/**
* @brief Parameters used to build an nn-descent index
*
* `graph_degree`: For an input dataset of dimensions (N, D),
* determines the final dimensions of the all-neighbors knn graph
* which turns out to be of dimensions (N, graph_degree)
* `intermediate_graph_degree`: Internally, nn-descent builds an
* all-neighbors knn graph of dimensions (N, intermediate_graph_degree)
* before selecting the final `graph_degree` neighbors. It's recommended
* that `intermediate_graph_degree` >= 1.5 * graph_degree
* `max_iterations`: The number of iterations that nn-descent will refine
* the graph for. More iterations produce a better quality graph at cost of performance
* `termination_threshold`: The delta at which nn-descent will terminate its iterations
*
*/
struct index_params : ann::index_params {
size_t graph_degree = 64; // Degree of output graph.
size_t intermediate_graph_degree = 128; // Degree of input graph for pruning.
size_t max_iterations = 20; // Number of nn-descent iterations.
float termination_threshold = 0.0001; // Termination threshold of nn-descent.
};
/**
* @brief nn-descent Build an nn-descent index
* The index contains an all-neighbors graph of the input dataset
* stored in host memory of dimensions (n_rows, n_cols)
*
* @tparam IdxT dtype to be used for constructing knn-graph
*/
template <typename IdxT>
struct index : ann::index {
public:
/**
* @brief Construct a new index object
*
* This constructor creates an nn-descent index which is a knn-graph in host memory.
* The type of the knn-graph is a dense raft::host_matrix and dimensions are
* (n_rows, n_cols).
*
* @param res raft::resources is an object mangaging resources
* @param n_rows number of rows in knn-graph
* @param n_cols number of cols in knn-graph
*/
index(raft::resources const& res, int64_t n_rows, int64_t n_cols)
: ann::index(),
res_{res},
metric_{raft::distance::DistanceType::L2Expanded},
graph_{raft::make_host_matrix<IdxT, int64_t, row_major>(n_rows, n_cols)},
graph_view_{graph_.view()}
{
}
/**
* @brief Construct a new index object
*
* This constructor creates an nn-descent index using a user allocated host memory knn-graph.
* The type of the knn-graph is a dense raft::host_matrix and dimensions are
* (n_rows, n_cols).
*
* @param res raft::resources is an object mangaging resources
* @param graph_view raft::host_matrix_view<IdxT, int64_t, raft::row_major> for storing knn-graph
*/
index(raft::resources const& res,
raft::host_matrix_view<IdxT, int64_t, raft::row_major> graph_view)
: ann::index(),
res_{res},
metric_{raft::distance::DistanceType::L2Expanded},
graph_{raft::make_host_matrix<IdxT, int64_t, row_major>(0, 0)},
graph_view_{graph_view}
{
}
/** Distance metric used for clustering. */
[[nodiscard]] constexpr inline auto metric() const noexcept -> raft::distance::DistanceType
{
return metric_;
}
// /** Total length of the index (number of vectors). */
[[nodiscard]] constexpr inline auto size() const noexcept -> IdxT
{
return graph_view_.extent(0);
}
/** Graph degree */
[[nodiscard]] constexpr inline auto graph_degree() const noexcept -> uint32_t
{
return graph_view_.extent(1);
}
/** neighborhood graph [size, graph-degree] */
[[nodiscard]] inline auto graph() noexcept -> host_matrix_view<IdxT, int64_t, row_major>
{
return graph_view_;
}
// Don't allow copying the index for performance reasons (try avoiding copying data)
index(const index&) = delete;
index(index&&) = default;
auto operator=(const index&) -> index& = delete;
auto operator=(index&&) -> index& = default;
~index() = default;
private:
raft::resources const& res_;
raft::distance::DistanceType metric_;
raft::host_matrix<IdxT, int64_t, row_major> graph_; // graph to return for non-int IdxT
raft::host_matrix_view<IdxT, int64_t, row_major>
graph_view_; // view of graph for user provided matrix
};
/** @} */
} // namespace raft::neighbors::experimental::nn_descent
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ball_cover.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "ball_cover-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "ball_cover-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_flat-inl.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/neighbors/detail/ivf_flat_build.cuh>
#include <raft/neighbors/detail/ivf_flat_search.cuh>
#include <raft/neighbors/ivf_flat_serialize.cuh>
#include <raft/neighbors/ivf_flat_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/core/device_mdspan.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace raft::neighbors::ivf_flat {
/**
* @brief Build the index from the dataset for efficient search.
*
* NB: Currently, the following distance metrics are supported:
* - L2Expanded
* - L2Unexpanded
* - InnerProduct
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* // use default index parameters
* ivf_flat::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = ivf_flat::build(handle, index_params, dataset, N, D);
* // use default search parameters
* ivf_flat::search_params search_params;
* // search K nearest neighbours for each of the N queries
* ivf_flat::search(handle, search_params, index, queries, N, K, out_inds, out_dists);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] params configure the index building
* @param[in] dataset a device pointer to a row-major matrix [n_rows, dim]
* @param[in] n_rows the number of samples
* @param[in] dim the dimensionality of the data
*
* @return the constructed ivf-flat index
*/
template <typename T, typename IdxT>
auto build(raft::resources const& handle,
const index_params& params,
const T* dataset,
IdxT n_rows,
uint32_t dim) -> index<T, IdxT>
{
return raft::neighbors::ivf_flat::detail::build(handle, params, dataset, n_rows, dim);
}
/**
* @defgroup ivf_flat IVF Flat Algorithm
* @{
*/
/**
* @brief Build the index from the dataset for efficient search.
*
* NB: Currently, the following distance metrics are supported:
* - L2Expanded
* - L2Unexpanded
* - InnerProduct
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* // use default index parameters
* ivf_flat::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = ivf_flat::build(handle, dataset, index_params);
* // use default search parameters
* ivf_flat::search_params search_params;
* // search K nearest neighbours for each of the N queries
* ivf_flat::search(handle, search_params, index, queries, out_inds, out_dists);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] params configure the index building
* @param[in] dataset a device pointer to a row-major matrix [n_rows, dim]
*
* @return the constructed ivf-flat index
*/
template <typename T, typename IdxT>
auto build(raft::resources const& handle,
const index_params& params,
raft::device_matrix_view<const T, IdxT, row_major> dataset) -> index<T, IdxT>
{
return raft::neighbors::ivf_flat::detail::build(handle,
params,
dataset.data_handle(),
static_cast<IdxT>(dataset.extent(0)),
static_cast<IdxT>(dataset.extent(1)));
}
/**
* @brief Build the index from the dataset for efficient search.
*
* NB: Currently, the following distance metrics are supported:
* - L2Expanded
* - L2Unexpanded
* - InnerProduct
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* // use default index parameters
* ivf_flat::index_params index_params;
* // create and fill the index from a [N, D] dataset
* ivf_flat::index<decltype(dataset::value_type), decltype(dataset::index_type)> index;
* ivf_flat::build(handle, dataset, index_params, index);
* // use default search parameters
* ivf_flat::search_params search_params;
* // search K nearest neighbours for each of the N queries
* ivf_flat::search(handle, search_params, index, queries, out_inds, out_dists);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] params configure the index building
* @param[in] dataset raft::device_matrix_view to a row-major matrix [n_rows, dim]
* @param[out] idx reference to ivf_flat::index
*
*/
template <typename T, typename IdxT>
void build(raft::resources const& handle,
const index_params& params,
raft::device_matrix_view<const T, IdxT, row_major> dataset,
raft::neighbors::ivf_flat::index<T, IdxT>& idx)
{
idx = raft::neighbors::ivf_flat::detail::build(handle,
params,
dataset.data_handle(),
static_cast<IdxT>(dataset.extent(0)),
static_cast<IdxT>(dataset.extent(1)));
}
/** @} */
/**
* @brief Build a new index containing the data of the original plus new extra vectors.
*
* Implementation note:
* The new data is clustered according to existing kmeans clusters, then the cluster
* centers are adjusted to match the newly labeled data.
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* ivf_flat::index_params index_params;
* index_params.add_data_on_build = false; // don't populate index on build
* index_params.kmeans_trainset_fraction = 1.0; // use whole dataset for kmeans training
* // train the index from a [N, D] dataset
* auto index_empty = ivf_flat::build(handle, index_params, dataset, N, D);
* // fill the index with the data
* auto index = ivf_flat::extend(handle, index_empty, dataset, nullptr, N);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] orig_index original index
* @param[in] new_vectors a device pointer to a row-major matrix [n_rows, index.dim()]
* @param[in] new_indices a device pointer to a vector of indices [n_rows].
* If the original index is empty (`orig_index.size() == 0`), you can pass `nullptr`
* here to imply a continuous range `[0...n_rows)`.
* @param[in] n_rows number of rows in `new_vectors`
*
* @return the constructed extended ivf-flat index
*/
template <typename T, typename IdxT>
auto extend(raft::resources const& handle,
const index<T, IdxT>& orig_index,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows) -> index<T, IdxT>
{
return raft::neighbors::ivf_flat::detail::extend(
handle, orig_index, new_vectors, new_indices, n_rows);
}
/**
* @ingroup ivf_flat
* @{
*/
/**
* @brief Build a new index containing the data of the original plus new extra vectors.
*
* Implementation note:
* The new data is clustered according to existing kmeans clusters, then the cluster
* centers are adjusted to match the newly labeled data.
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* ivf_flat::index_params index_params;
* index_params.add_data_on_build = false; // don't populate index on build
* index_params.kmeans_trainset_fraction = 1.0; // use whole dataset for kmeans training
* // train the index from a [N, D] dataset
* auto index_empty = ivf_flat::build(handle, dataset, index_params, dataset);
* // fill the index with the data
* std::optional<raft::device_vector_view<const IdxT, IdxT>> no_op = std::nullopt;
* auto index = ivf_flat::extend(handle, index_empty, no_op, dataset);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] new_vectors raft::device_matrix_view to a row-major matrix [n_rows, index.dim()]
* @param[in] new_indices optional raft::device_vector_view to a vector of indices [n_rows].
* If the original index is empty (`orig_index.size() == 0`), you can pass `std::nullopt`
* here to imply a continuous range `[0...n_rows)`.
* @param[in] orig_index original index
*
* @return the constructed extended ivf-flat index
*/
template <typename T, typename IdxT>
auto extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices,
const index<T, IdxT>& orig_index) -> index<T, IdxT>
{
return extend<T, IdxT>(handle,
orig_index,
new_vectors.data_handle(),
new_indices.has_value() ? new_indices.value().data_handle() : nullptr,
new_vectors.extent(0));
}
/** @} */
/**
* @brief Extend the index in-place with the new data.
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* ivf_flat::index_params index_params;
* index_params.add_data_on_build = false; // don't populate index on build
* index_params.kmeans_trainset_fraction = 1.0; // use whole dataset for kmeans training
* // train the index from a [N, D] dataset
* auto index_empty = ivf_flat::build(handle, index_params, dataset, N, D);
* // fill the index with the data
* ivf_flat::extend(handle, index_empty, dataset, nullptr, N);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param handle
* @param[inout] index
* @param[in] new_vectors a device pointer to a row-major matrix [n_rows, index.dim()]
* @param[in] new_indices a device pointer to a vector of indices [n_rows].
* If the original index is empty (`orig_index.size() == 0`), you can pass `nullptr`
* here to imply a continuous range `[0...n_rows)`.
* @param[in] n_rows the number of samples
*/
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
index<T, IdxT>* index,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows)
{
raft::neighbors::ivf_flat::detail::extend(handle, index, new_vectors, new_indices, n_rows);
}
/**
* @ingroup ivf_flat
* @{
*/
/**
* @brief Extend the index in-place with the new data.
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* ivf_flat::index_params index_params;
* index_params.add_data_on_build = false; // don't populate index on build
* index_params.kmeans_trainset_fraction = 1.0; // use whole dataset for kmeans training
* // train the index from a [N, D] dataset
* auto index_empty = ivf_flat::build(handle, index_params, dataset);
* // fill the index with the data
* std::optional<raft::device_vector_view<const IdxT, IdxT>> no_op = std::nullopt;
* ivf_flat::extend(handle, dataset, no_opt, &index_empty);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] new_vectors raft::device_matrix_view to a row-major matrix [n_rows, index.dim()]
* @param[in] new_indices optional raft::device_vector_view to a vector of indices [n_rows].
* If the original index is empty (`orig_index.size() == 0`), you can pass `std::nullopt`
* here to imply a continuous range `[0...n_rows)`.
* @param[inout] index pointer to index, to be overwritten in-place
*/
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices,
index<T, IdxT>* index)
{
extend(handle,
index,
new_vectors.data_handle(),
new_indices.has_value() ? new_indices.value().data_handle() : nullptr,
static_cast<IdxT>(new_vectors.extent(0)));
}
/** @} */
/**
* @brief Search ANN using the constructed index with the given filter.
*
* See the [ivf_flat::build](#ivf_flat::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`:
* @code{.cpp}
* ...
* // Create a pooling memory resource with a pre-defined initial size.
* rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> mr(
* rmm::mr::get_current_device_resource(), 1024 * 1024);
* // use default search parameters
* ivf_flat::search_params search_params;
* filtering::none_ivf_sample_filter filter;
* // Use the same allocator across multiple searches to reduce the number of
* // cuda memory allocations
* ivf_flat::search_with_filtering(
* handle, search_params, index, queries1, N1, K, out_inds1, out_dists1, &mr, filter);
* ivf_flat::search_with_filtering(
* handle, search_params, index, queries2, N2, K, out_inds2, out_dists2, &mr, filter);
* ivf_flat::search_with_filtering(
* handle, search_params, index, queries3, N3, K, out_inds3, out_dists3, &mr, filter);
* ...
* @endcode
* The exact size of the temporary buffer depends on multiple factors and is an implementation
* detail. However, you can safely specify a small initial size for the memory pool, so that only a
* few allocations happen to grow it during the first invocations of the `search`.
*
* @tparam T data element type
* @tparam IdxT type of the indices
* @tparam IvfSampleFilterT Device filter function, with the signature
* `(uint32_t query_ix, uint32 cluster_ix, uint32_t sample_ix) -> bool` or
* `(uint32_t query_ix, uint32 sample_ix) -> bool`
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] index ivf-flat constructed index
* @param[in] queries a device pointer to a row-major matrix [n_queries, index->dim()]
* @param[in] n_queries the batch size
* @param[in] k the number of neighbors to find for each query.
* @param[out] neighbors a device pointer to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device pointer to the distances to the selected neighbors [n_queries, k]
* @param[in] mr an optional memory resource to use across the searches (you can provide a large
* enough memory pool here to avoid memory allocations within search).
* @param[in] sample_filter a device filter function that greenlights samples for a given query
*/
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<T, IdxT>& index,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr = nullptr,
IvfSampleFilterT sample_filter = IvfSampleFilterT())
{
raft::neighbors::ivf_flat::detail::search(
handle, params, index, queries, n_queries, k, neighbors, distances, mr, sample_filter);
}
/**
* @brief Search ANN using the constructed index using the given filter.
*
* See the [ivf_flat::build](#ivf_flat::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`:
* @code{.cpp}
* ...
* // Create a pooling memory resource with a pre-defined initial size.
* rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> mr(
* rmm::mr::get_current_device_resource(), 1024 * 1024);
* // use default search parameters
* ivf_flat::search_params search_params;
* // Use the same allocator across multiple searches to reduce the number of
* // cuda memory allocations
* ivf_flat::search(handle, search_params, index, queries1, N1, K, out_inds1, out_dists1, &mr);
* ivf_flat::search(handle, search_params, index, queries2, N2, K, out_inds2, out_dists2, &mr);
* ivf_flat::search(handle, search_params, index, queries3, N3, K, out_inds3, out_dists3, &mr);
* ...
* @endcode
* The exact size of the temporary buffer depends on multiple factors and is an implementation
* detail. However, you can safely specify a small initial size for the memory pool, so that only a
* few allocations happen to grow it during the first invocations of the `search`.
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] index ivf-flat constructed index
* @param[in] queries a device pointer to a row-major matrix [n_queries, index->dim()]
* @param[in] n_queries the batch size
* @param[in] k the number of neighbors to find for each query.
* @param[out] neighbors a device pointer to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device pointer to the distances to the selected neighbors [n_queries, k]
* @param[in] mr an optional memory resource to use across the searches (you can provide a large
* enough memory pool here to avoid memory allocations within search).
*/
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const search_params& params,
const index<T, IdxT>& index,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr = nullptr)
{
raft::neighbors::ivf_flat::detail::search(handle,
params,
index,
queries,
n_queries,
k,
neighbors,
distances,
mr,
raft::neighbors::filtering::none_ivf_sample_filter());
}
/**
* @ingroup ivf_flat
* @{
*/
/**
* @brief Search ANN using the constructed index with the given filter.
*
* See the [ivf_flat::build](#ivf_flat::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`:
* @code{.cpp}
* ...
* // use default search parameters
* ivf_flat::search_params search_params;
* filtering::none_ivf_sample_filter filter;
* // Use the same allocator across multiple searches to reduce the number of
* // cuda memory allocations
* ivf_flat::search_with_filtering(
* handle, search_params, index, queries1, out_inds1, out_dists1, filter);
* ivf_flat::search_with_filtering(
* handle, search_params, index, queries2, out_inds2, out_dists2, filter);
* ivf_flat::search_with_filtering(
* handle, search_params, index, queries3, out_inds3, out_dists3, filter);
* ...
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
* @tparam IvfSampleFilterT Device filter function, with the signature
* `(uint32_t query_ix, uint32 cluster_ix, uint32_t sample_ix) -> bool` or
* `(uint32_t query_ix, uint32 sample_ix) -> bool`
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] index ivf-flat constructed index
* @param[in] queries a device pointer to a row-major matrix [n_queries, index->dim()]
* @param[out] neighbors a device pointer to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device pointer to the distances to the selected neighbors [n_queries, k]
* @param[in] sample_filter a device filter function that greenlights samples for a given query
*/
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<T, IdxT>& index,
raft::device_matrix_view<const T, IdxT, row_major> queries,
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors,
raft::device_matrix_view<float, IdxT, row_major> distances,
IvfSampleFilterT sample_filter = IvfSampleFilterT())
{
RAFT_EXPECTS(
queries.extent(0) == neighbors.extent(0) && queries.extent(0) == distances.extent(0),
"Number of rows in output neighbors and distances matrices must equal the number of queries.");
RAFT_EXPECTS(neighbors.extent(1) == distances.extent(1),
"Number of columns in output neighbors and distances matrices must be equal");
RAFT_EXPECTS(queries.extent(1) == index.dim(),
"Number of query dimensions should equal number of dimensions in the index.");
search_with_filtering(handle,
params,
index,
queries.data_handle(),
static_cast<std::uint32_t>(queries.extent(0)),
static_cast<std::uint32_t>(neighbors.extent(1)),
neighbors.data_handle(),
distances.data_handle(),
resource::get_workspace_resource(handle),
sample_filter);
}
/**
* @brief Search ANN using the constructed index.
*
* See the [ivf_flat::build](#ivf_flat::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`:
* @code{.cpp}
* ...
* // use default search parameters
* ivf_flat::search_params search_params;
* // Use the same allocator across multiple searches to reduce the number of
* // cuda memory allocations
* ivf_flat::search(handle, search_params, index, queries1, out_inds1, out_dists1);
* ivf_flat::search(handle, search_params, index, queries2, out_inds2, out_dists2);
* ivf_flat::search(handle, search_params, index, queries3, out_inds3, out_dists3);
* ...
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] index ivf-flat constructed index
* @param[in] queries a device pointer to a row-major matrix [n_queries, index->dim()]
* @param[out] neighbors a device pointer to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device pointer to the distances to the selected neighbors [n_queries, k]
*/
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const search_params& params,
const index<T, IdxT>& index,
raft::device_matrix_view<const T, IdxT, row_major> queries,
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors,
raft::device_matrix_view<float, IdxT, row_major> distances)
{
search_with_filtering(handle,
params,
index,
queries,
neighbors,
distances,
raft::neighbors::filtering::none_ivf_sample_filter());
}
/** @} */
} // namespace raft::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ball_cover_types.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::neighbors::ball_cover {
/**
* @ingroup random_ball_cover
* @{
*/
/**
* Stores raw index data points, sampled landmarks, the 1-nns of index points
* to their closest landmarks, and the ball radii of each landmark. This
* class is intended to be constructed once and reused across subsequent
* queries.
* @tparam value_idx
* @tparam value_t
* @tparam value_int
*/
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
typename matrix_idx = std::uint32_t>
class BallCoverIndex {
public:
explicit BallCoverIndex(raft::resources const& handle_,
const value_t* X_,
value_int m_,
value_int n_,
raft::distance::DistanceType metric_)
: handle(handle_),
X(raft::make_device_matrix_view<const value_t, matrix_idx>(X_, m_, n_)),
m(m_),
n(n_),
metric(metric_),
/**
* the sqrt() here makes the sqrt(m)^2 a linear-time lower bound
*
* Total memory footprint of index: (2 * sqrt(m)) + (n * sqrt(m)) + (2 * m)
*/
n_landmarks(sqrt(m_)),
R_indptr(raft::make_device_vector<value_idx, matrix_idx>(handle, sqrt(m_) + 1)),
R_1nn_cols(raft::make_device_vector<value_idx, matrix_idx>(handle, m_)),
R_1nn_dists(raft::make_device_vector<value_t, matrix_idx>(handle, m_)),
R_closest_landmark_dists(raft::make_device_vector<value_t, matrix_idx>(handle, m_)),
R(raft::make_device_matrix<value_t, matrix_idx>(handle, sqrt(m_), n_)),
R_radius(raft::make_device_vector<value_t, matrix_idx>(handle, sqrt(m_))),
index_trained(false)
{
}
explicit BallCoverIndex(raft::resources const& handle_,
raft::device_matrix_view<const value_t, matrix_idx, row_major> X_,
raft::distance::DistanceType metric_)
: handle(handle_),
X(X_),
m(X_.extent(0)),
n(X_.extent(1)),
metric(metric_),
/**
* the sqrt() here makes the sqrt(m)^2 a linear-time lower bound
*
* Total memory footprint of index: (2 * sqrt(m)) + (n * sqrt(m)) + (2 * m)
*/
n_landmarks(sqrt(X_.extent(0))),
R_indptr(raft::make_device_vector<value_idx, matrix_idx>(handle, sqrt(X_.extent(0)) + 1)),
R_1nn_cols(raft::make_device_vector<value_idx, matrix_idx>(handle, X_.extent(0))),
R_1nn_dists(raft::make_device_vector<value_t, matrix_idx>(handle, X_.extent(0))),
R_closest_landmark_dists(raft::make_device_vector<value_t, matrix_idx>(handle, X_.extent(0))),
R(raft::make_device_matrix<value_t, matrix_idx>(handle, sqrt(X_.extent(0)), X_.extent(1))),
R_radius(raft::make_device_vector<value_t, matrix_idx>(handle, sqrt(X_.extent(0)))),
index_trained(false)
{
}
auto get_R_indptr() const -> raft::device_vector_view<const value_idx, matrix_idx>
{
return R_indptr.view();
}
auto get_R_1nn_cols() const -> raft::device_vector_view<const value_idx, matrix_idx>
{
return R_1nn_cols.view();
}
auto get_R_1nn_dists() const -> raft::device_vector_view<const value_t, matrix_idx>
{
return R_1nn_dists.view();
}
auto get_R_radius() const -> raft::device_vector_view<const value_t, matrix_idx>
{
return R_radius.view();
}
auto get_R() const -> raft::device_matrix_view<const value_t, matrix_idx, row_major>
{
return R.view();
}
auto get_R_closest_landmark_dists() const -> raft::device_vector_view<const value_t, matrix_idx>
{
return R_closest_landmark_dists.view();
}
raft::device_vector_view<value_idx, matrix_idx> get_R_indptr() { return R_indptr.view(); }
raft::device_vector_view<value_idx, matrix_idx> get_R_1nn_cols() { return R_1nn_cols.view(); }
raft::device_vector_view<value_t, matrix_idx> get_R_1nn_dists() { return R_1nn_dists.view(); }
raft::device_vector_view<value_t, matrix_idx> get_R_radius() { return R_radius.view(); }
raft::device_matrix_view<value_t, matrix_idx, row_major> get_R() { return R.view(); }
raft::device_vector_view<value_t, matrix_idx> get_R_closest_landmark_dists()
{
return R_closest_landmark_dists.view();
}
raft::device_matrix_view<const value_t, matrix_idx, row_major> get_X() const { return X; }
raft::distance::DistanceType get_metric() const { return metric; }
value_int get_n_landmarks() const { return n_landmarks; }
bool is_index_trained() const { return index_trained; };
// This should only be set by internal functions
void set_index_trained() { index_trained = true; }
raft::resources const& handle;
value_int m;
value_int n;
value_int n_landmarks;
raft::device_matrix_view<const value_t, matrix_idx, row_major> X;
raft::distance::DistanceType metric;
private:
// CSR storing the neighborhoods for each data point
raft::device_vector<value_idx, matrix_idx> R_indptr;
raft::device_vector<value_idx, matrix_idx> R_1nn_cols;
raft::device_vector<value_t, matrix_idx> R_1nn_dists;
raft::device_vector<value_t, matrix_idx> R_closest_landmark_dists;
raft::device_vector<value_t, matrix_idx> R_radius;
raft::device_matrix<value_t, matrix_idx, row_major> R;
protected:
bool index_trained;
};
/** @} */
} // namespace raft::neighbors::ball_cover
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_pq_helpers.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdio>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/neighbors/detail/ivf_pq_build.cuh>
#include <raft/neighbors/ivf_pq_types.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/spatial/knn/detail/ann_utils.cuh>
namespace raft::neighbors::ivf_pq::helpers {
using namespace raft::spatial::knn::detail; // NOLINT
/**
* @defgroup ivf_pq_helpers Helper functions for manipulationg IVF PQ Index
* @{
*/
namespace codepacker {
/**
* @brief Unpack `n_take` consecutive records of a single list (cluster) in the compressed index
* starting at given `offset`.
*
* Bit compression is removed, which means output will have pq_dim dimensional vectors (one code per
* byte, instead of ceildiv(pq_dim * pq_bits, 8) bytes of pq codes).
*
* Usage example:
* @code{.cpp}
* auto list_data = index.lists()[label]->data.view();
* // allocate the buffer for the output
* uint32_t n_take = 4;
* auto codes = raft::make_device_matrix<uint8_t>(res, n_take, index.pq_dim());
* uint32_t offset = 0;
* // unpack n_take elements from the list
* ivf_pq::helpers::codepacker::unpack(res, list_data, index.pq_bits(), offset, codes.view());
* @endcode
*
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res raft resource
* @param[in] list_data block to read from
* @param[in] pq_bits bit length of encoded vector elements
* @param[in] offset
* How many records in the list to skip.
* @param[out] codes
* the destination buffer [n_take, index.pq_dim()].
* The length `n_take` defines how many records to unpack,
* it must be smaller than the list size.
*/
inline void unpack(
raft::resources const& res,
device_mdspan<const uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data,
uint32_t pq_bits,
uint32_t offset,
device_matrix_view<uint8_t, uint32_t, row_major> codes)
{
ivf_pq::detail::unpack_list_data(
codes, list_data, offset, pq_bits, resource::get_cuda_stream(res));
}
/**
* @brief Unpack `n_rows` consecutive records of a single list (cluster) in the compressed index
* starting at given `offset`. The output codes of a single vector are contiguous, not expanded to
* one code per byte, which means the output has ceildiv(pq_dim * pq_bits, 8) bytes per PQ encoded
* vector.
*
* Usage example:
* @code{.cpp}
* raft::resources res;
* auto list_data = index.lists()[label]->data.view();
* // allocate the buffer for the output
* uint32_t n_rows = 4;
* auto codes = raft::make_device_matrix<uint8_t>(
* res, n_rows, raft::ceildiv(index.pq_dim() * index.pq_bits(), 8));
* uint32_t offset = 0;
* // unpack n_rows elements from the list
* ivf_pq::helpers::codepacker::unpack_contiguous(
* res, list_data, index.pq_bits(), offset, n_rows, index.pq_dim(), codes.data_handle());
* @endcode
*
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res raft resource
* @param[in] list_data block to read from
* @param[in] pq_bits bit length of encoded vector elements
* @param[in] offset
* How many records in the list to skip.
* @param[in] n_rows How many records to unpack
* @param[in] pq_dim The dimensionality of the PQ compressed records
* @param[out] codes
* the destination buffer [n_rows, ceildiv(pq_dim * pq_bits, 8)].
* The length `n_rows` defines how many records to unpack,
* it must be smaller than the list size.
*/
inline void unpack_contiguous(
raft::resources const& res,
device_mdspan<const uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data,
uint32_t pq_bits,
uint32_t offset,
uint32_t n_rows,
uint32_t pq_dim,
uint8_t* codes)
{
ivf_pq::detail::unpack_contiguous_list_data(
codes, list_data, n_rows, pq_dim, offset, pq_bits, resource::get_cuda_stream(res));
}
/**
* Write flat PQ codes into an existing list by the given offset.
*
* NB: no memory allocation happens here; the list must fit the data (offset + n_vec).
*
* Usage example:
* @code{.cpp}
* auto list_data = index.lists()[label]->data.view();
* // allocate the buffer for the input codes
* auto codes = raft::make_device_matrix<uint8_t>(res, n_vec, index.pq_dim());
* ... prepare n_vecs to pack into the list in codes ...
* // write codes into the list starting from the 42nd position
* ivf_pq::helpers::codepacker::pack(
* res, make_const_mdspan(codes.view()), index.pq_bits(), 42, list_data);
* @endcode
*
* @param[in] res raft resource
* @param[in] codes flat PQ codes, one code per byte [n_vec, pq_dim]
* @param[in] pq_bits bit length of encoded vector elements
* @param[in] offset how many records to skip before writing the data into the list
* @param[in] list_data block to write into
*/
inline void pack(
raft::resources const& res,
device_matrix_view<const uint8_t, uint32_t, row_major> codes,
uint32_t pq_bits,
uint32_t offset,
device_mdspan<uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data)
{
ivf_pq::detail::pack_list_data(list_data, codes, offset, pq_bits, resource::get_cuda_stream(res));
}
/**
* Write flat PQ codes into an existing list by the given offset. The input codes of a single vector
* are contiguous (not expanded to one code per byte).
*
* NB: no memory allocation happens here; the list must fit the data (offset + n_rows records).
*
* Usage example:
* @code{.cpp}
* raft::resources res;
* auto list_data = index.lists()[label]->data.view();
* // allocate the buffer for the input codes
* auto codes = raft::make_device_matrix<uint8_t>(
* res, n_rows, raft::ceildiv(index.pq_dim() * index.pq_bits(), 8));
* ... prepare compressed vectors to pack into the list in codes ...
* // write codes into the list starting from the 42nd position. If the current size of the list
* // is greater than 42, this will overwrite the codes starting at this offset.
* ivf_pq::helpers::codepacker::pack_contiguous(
* res, codes.data_handle(), n_rows, index.pq_dim(), index.pq_bits(), 42, list_data);
* @endcode
*
* @param[in] res raft resource
* @param[in] codes flat PQ codes, [n_vec, ceildiv(pq_dim * pq_bits, 8)]
* @param[in] n_rows number of records
* @param[in] pq_dim
* @param[in] pq_bits bit length of encoded vector elements
* @param[in] offset how many records to skip before writing the data into the list
* @param[in] list_data block to write into
*/
inline void pack_contiguous(
raft::resources const& res,
const uint8_t* codes,
uint32_t n_rows,
uint32_t pq_dim,
uint32_t pq_bits,
uint32_t offset,
device_mdspan<uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data)
{
ivf_pq::detail::pack_contiguous_list_data(
list_data, codes, n_rows, pq_dim, offset, pq_bits, resource::get_cuda_stream(res));
}
} // namespace codepacker
/**
* Write flat PQ codes into an existing list by the given offset.
*
* The list is identified by its label.
*
* NB: no memory allocation happens here; the list must fit the data (offset + n_vec).
*
* Usage example:
* @code{.cpp}
* // We will write into the 137th cluster
* uint32_t label = 137;
* // allocate the buffer for the input codes
* auto codes = raft::make_device_matrix<const uint8_t>(res, n_vec, index.pq_dim());
* ... prepare n_vecs to pack into the list in codes ...
* // write codes into the list starting from the 42nd position
* ivf_pq::helpers::pack_list_data(res, &index, codes_to_pack, label, 42);
* @endcode
*
* @param[in] res raft resource
* @param[inout] index IVF-PQ index.
* @param[in] codes flat PQ codes, one code per byte [n_rows, pq_dim]
* @param[in] label The id of the list (cluster) into which we write.
* @param[in] offset how many records to skip before writing the data into the list
*/
template <typename IdxT>
void pack_list_data(raft::resources const& res,
index<IdxT>* index,
device_matrix_view<const uint8_t, uint32_t, row_major> codes,
uint32_t label,
uint32_t offset)
{
ivf_pq::detail::pack_list_data(res, index, codes, label, offset);
}
/**
* Write flat PQ codes into an existing list by the given offset. Use this when the input
* vectors are PQ encoded and not expanded to one code per byte.
*
* The list is identified by its label.
*
* NB: no memory allocation happens here; the list into which the vectors are packed must fit offset
* + n_rows rows.
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* raft::resources res;
* // use default index parameters
* ivf_pq::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = ivf_pq::build(res, index_params, dataset, N, D);
* // allocate the buffer for n_rows input codes. Each vector occupies
* // raft::ceildiv(index.pq_dim() * index.pq_bits(), 8) bytes because
* // codes are compressed and without gaps.
* auto codes = raft::make_device_matrix<const uint8_t>(
* res, n_rows, raft::ceildiv(index.pq_dim() * index.pq_bits(), 8));
* ... prepare the compressed vectors to pack into the list in codes ...
* // the first n_rows codes in the fourth IVF list are to be overwritten.
* uint32_t label = 3;
* // write codes into the list starting from the 0th position
* ivf_pq::helpers::pack_contiguous_list_data(
* res, &index, codes.data_handle(), n_rows, label, 0);
* @endcode
*
* @tparam IdxT
*
* @param[in] res raft resource
* @param[inout] index pointer to IVF-PQ index
* @param[in] codes flat contiguous PQ codes [n_rows, ceildiv(pq_dim * pq_bits, 8)]
* @param[in] n_rows how many records to pack
* @param[in] label The id of the list (cluster) into which we write.
* @param[in] offset how many records to skip before writing the data into the list
*/
template <typename IdxT>
void pack_contiguous_list_data(raft::resources const& res,
index<IdxT>* index,
uint8_t* codes,
uint32_t n_rows,
uint32_t label,
uint32_t offset)
{
ivf_pq::detail::pack_contiguous_list_data(res, index, codes, n_rows, label, offset);
}
/**
* @brief Unpack `n_take` consecutive records of a single list (cluster) in the compressed index
* starting at given `offset`, one code per byte (independently of pq_bits).
*
* Usage example:
* @code{.cpp}
* // We will unpack the fourth cluster
* uint32_t label = 3;
* // Get the list size
* uint32_t list_size = 0;
* raft::copy(&list_size, index.list_sizes().data_handle() + label, 1,
* resource::get_cuda_stream(res)); resource::sync_stream(res);
* // allocate the buffer for the output
* auto codes = raft::make_device_matrix<float>(res, list_size, index.pq_dim());
* // unpack the whole list
* ivf_pq::helpers::unpack_list_data(res, index, codes.view(), label, 0);
* @endcode
*
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res
* @param[in] index
* @param[out] out_codes
* the destination buffer [n_take, index.pq_dim()].
* The length `n_take` defines how many records to unpack,
* it must be smaller than the list size.
* @param[in] label
* The id of the list (cluster) to decode.
* @param[in] offset
* How many records in the list to skip.
*/
template <typename IdxT>
void unpack_list_data(raft::resources const& res,
const index<IdxT>& index,
device_matrix_view<uint8_t, uint32_t, row_major> out_codes,
uint32_t label,
uint32_t offset)
{
return ivf_pq::detail::unpack_list_data<IdxT>(res, index, out_codes, label, offset);
}
/**
* @brief Unpack a series of records of a single list (cluster) in the compressed index
* by their in-list offsets, one code per byte (independently of pq_bits).
*
* Usage example:
* @code{.cpp}
* // We will unpack the fourth cluster
* uint32_t label = 3;
* // Create the selection vector
* auto selected_indices = raft::make_device_vector<uint32_t>(res, 4);
* ... fill the indices ...
* resource::sync_stream(res);
* // allocate the buffer for the output
* auto codes = raft::make_device_matrix<float>(res, selected_indices.size(), index.pq_dim());
* // decode the whole list
* ivf_pq::helpers::unpack_list_data(
* res, index, selected_indices.view(), codes.view(), label);
* @endcode
*
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res raft resource
* @param[in] index IVF-PQ index (passed by reference)
* @param[in] in_cluster_indices
* The offsets of the selected indices within the cluster.
* @param[out] out_codes
* the destination buffer [n_take, index.pq_dim()].
* The length `n_take` defines how many records to unpack,
* it must be smaller than the list size.
* @param[in] label
* The id of the list (cluster) to decode.
*/
template <typename IdxT>
void unpack_list_data(raft::resources const& res,
const index<IdxT>& index,
device_vector_view<const uint32_t> in_cluster_indices,
device_matrix_view<uint8_t, uint32_t, row_major> out_codes,
uint32_t label)
{
return ivf_pq::detail::unpack_list_data<IdxT>(res, index, out_codes, label, in_cluster_indices);
}
/**
* @brief Unpack `n_rows` consecutive PQ encoded vectors of a single list (cluster) in the
* compressed index starting at given `offset`, not expanded to one code per byte. Each code in the
* output buffer occupies ceildiv(index.pq_dim() * index.pq_bits(), 8) bytes.
*
* Usage example:
* @code{.cpp}
* raft::resources res;
* // We will unpack the whole fourth cluster
* uint32_t label = 3;
* // Get the list size
* uint32_t list_size = 0;
* raft::update_host(&list_size, index.list_sizes().data_handle() + label, 1,
* raft::resource::get_cuda_stream(res)); raft::resource::sync_stream(res);
* // allocate the buffer for the output
* auto codes = raft::make_device_matrix<float>(res, list_size, raft::ceildiv(index.pq_dim() *
* index.pq_bits(), 8));
* // unpack the whole list
* ivf_pq::helpers::unpack_list_data(res, index, codes.data_handle(), list_size, label, 0);
* @endcode
*
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res raft resource
* @param[in] index IVF-PQ index (passed by reference)
* @param[out] out_codes
* the destination buffer [n_rows, ceildiv(index.pq_dim() * index.pq_bits(), 8)].
* The length `n_rows` defines how many records to unpack,
* offset + n_rows must be smaller than or equal to the list size.
* @param[in] n_rows how many codes to unpack
* @param[in] label
* The id of the list (cluster) to decode.
* @param[in] offset
* How many records in the list to skip.
*/
template <typename IdxT>
void unpack_contiguous_list_data(raft::resources const& res,
const index<IdxT>& index,
uint8_t* out_codes,
uint32_t n_rows,
uint32_t label,
uint32_t offset)
{
return ivf_pq::detail::unpack_contiguous_list_data<IdxT>(
res, index, out_codes, n_rows, label, offset);
}
/**
* @brief Decode `n_take` consecutive records of a single list (cluster) in the compressed index
* starting at given `offset`.
*
* Usage example:
* @code{.cpp}
* // We will reconstruct the fourth cluster
* uint32_t label = 3;
* // Get the list size
* uint32_t list_size = 0;
* raft::copy(&list_size, index.list_sizes().data_handle() + label, 1,
* resource::get_cuda_stream(res)); resource::sync_stream(res);
* // allocate the buffer for the output
* auto decoded_vectors = raft::make_device_matrix<float>(res, list_size, index.dim());
* // decode the whole list
* ivf_pq::helpers::reconstruct_list_data(res, index, decoded_vectors.view(), label, 0);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res
* @param[in] index
* @param[out] out_vectors
* the destination buffer [n_take, index.dim()].
* The length `n_take` defines how many records to reconstruct,
* it must be smaller than the list size.
* @param[in] label
* The id of the list (cluster) to decode.
* @param[in] offset
* How many records in the list to skip.
*/
template <typename T, typename IdxT>
void reconstruct_list_data(raft::resources const& res,
const index<IdxT>& index,
device_matrix_view<T, uint32_t, row_major> out_vectors,
uint32_t label,
uint32_t offset)
{
return ivf_pq::detail::reconstruct_list_data(res, index, out_vectors, label, offset);
}
/**
* @brief Decode a series of records of a single list (cluster) in the compressed index
* by their in-list offsets.
*
* Usage example:
* @code{.cpp}
* // We will reconstruct the fourth cluster
* uint32_t label = 3;
* // Create the selection vector
* auto selected_indices = raft::make_device_vector<uint32_t>(res, 4);
* ... fill the indices ...
* resource::sync_stream(res);
* // allocate the buffer for the output
* auto decoded_vectors = raft::make_device_matrix<float>(
* res, selected_indices.size(), index.dim());
* // decode the whole list
* ivf_pq::helpers::reconstruct_list_data(
* res, index, selected_indices.view(), decoded_vectors.view(), label);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res
* @param[in] index
* @param[in] in_cluster_indices
* The offsets of the selected indices within the cluster.
* @param[out] out_vectors
* the destination buffer [n_take, index.dim()].
* The length `n_take` defines how many records to reconstruct,
* it must be smaller than the list size.
* @param[in] label
* The id of the list (cluster) to decode.
*/
template <typename T, typename IdxT>
void reconstruct_list_data(raft::resources const& res,
const index<IdxT>& index,
device_vector_view<const uint32_t> in_cluster_indices,
device_matrix_view<T, uint32_t, row_major> out_vectors,
uint32_t label)
{
return ivf_pq::detail::reconstruct_list_data(res, index, out_vectors, label, in_cluster_indices);
}
/**
* @brief Extend one list of the index in-place, by the list label, skipping the classification and
* encoding steps.
*
* Usage example:
* @code{.cpp}
* // We will extend the fourth cluster
* uint32_t label = 3;
* // We will fill 4 new vectors
* uint32_t n_vec = 4;
* // Indices of the new vectors
* auto indices = raft::make_device_vector<uint32_t>(res, n_vec);
* ... fill the indices ...
* auto new_codes = raft::make_device_matrix<uint8_t, uint32_t, row_major> new_codes(
* res, n_vec, index.pq_dim());
* ... fill codes ...
* // extend list with new codes
* ivf_pq::helpers::extend_list_with_codes(
* res, &index, codes.view(), indices.view(), label);
* @endcode
*
* @tparam IdxT
*
* @param[in] res
* @param[inout] index
* @param[in] new_codes flat PQ codes, one code per byte [n_rows, index.pq_dim()]
* @param[in] new_indices source indices [n_rows]
* @param[in] label the id of the target list (cluster).
*/
template <typename IdxT>
void extend_list_with_codes(raft::resources const& res,
index<IdxT>* index,
device_matrix_view<const uint8_t, uint32_t, row_major> new_codes,
device_vector_view<const IdxT, uint32_t, row_major> new_indices,
uint32_t label)
{
ivf_pq::detail::extend_list_with_codes(res, index, new_codes, new_indices, label);
}
/**
* @brief Extend one list of the index in-place, by the list label, skipping the classification
* step.
*
* Usage example:
* @code{.cpp}
* // We will extend the fourth cluster
* uint32_t label = 3;
* // We will extend with 4 new vectors
* uint32_t n_vec = 4;
* // Indices of the new vectors
* auto indices = raft::make_device_vector<uint32_t>(res, n_vec);
* ... fill the indices ...
* auto new_vectors = raft::make_device_matrix<float, uint32_t, row_major> new_codes(
* res, n_vec, index.dim());
* ... fill vectors ...
* // extend list with new vectors
* ivf_pq::helpers::extend_list(
* res, &index, new_vectors.view(), indices.view(), label);
* @endcode
*
* @tparam T
* @tparam IdxT
*
* @param[in] res
* @param[inout] index
* @param[in] new_vectors data to encode [n_rows, index.dim()]
* @param[in] new_indices source indices [n_rows]
* @param[in] label the id of the target list (cluster).
*
*/
template <typename T, typename IdxT>
void extend_list(raft::resources const& res,
index<IdxT>* index,
device_matrix_view<const T, uint32_t, row_major> new_vectors,
device_vector_view<const IdxT, uint32_t, row_major> new_indices,
uint32_t label)
{
ivf_pq::detail::extend_list(res, index, new_vectors, new_indices, label);
}
/**
* @brief Remove all data from a single list (cluster) in the index.
*
* Usage example:
* @code{.cpp}
* // We will erase the fourth cluster (label = 3)
* ivf_pq::helpers::erase_list(res, &index, 3);
* @endcode
*
* @tparam IdxT
*
* @param[in] res
* @param[inout] index
* @param[in] label the id of the target list (cluster).
*/
template <typename IdxT>
void erase_list(raft::resources const& res, index<IdxT>* index, uint32_t label)
{
ivf_pq::detail::erase_list(res, index, label);
}
/**
* @brief Public helper API to reset the data and indices ptrs, and the list sizes. Useful for
* externally modifying the index without going through the build stage. The data and indices of the
* IVF lists will be lost.
*
* Usage example:
* @code{.cpp}
* raft::resources res;
* using namespace raft::neighbors;
* // use default index parameters
* ivf_pq::index_params index_params;
* // initialize an empty index
* ivf_pq::index<int64_t> index(res, index_params, D);
* // reset the index's state and list sizes
* ivf_pq::helpers::reset_index(res, &index);
* @endcode
*
* @tparam IdxT
*
* @param[in] res raft resource
* @param[inout] index pointer to IVF-PQ index
*/
template <typename IdxT>
void reset_index(const raft::resources& res, index<IdxT>* index)
{
auto stream = resource::get_cuda_stream(res);
utils::memzero(
index->accum_sorted_sizes().data_handle(), index->accum_sorted_sizes().size(), stream);
utils::memzero(index->list_sizes().data_handle(), index->list_sizes().size(), stream);
utils::memzero(index->data_ptrs().data_handle(), index->data_ptrs().size(), stream);
utils::memzero(index->inds_ptrs().data_handle(), index->inds_ptrs().size(), stream);
}
/**
* @brief Public helper API exposing the computation of the index's rotation matrix.
* NB: This is to be used only when the rotation matrix is not already computed through
* raft::neighbors::ivf_pq::build.
*
* Usage example:
* @code{.cpp}
* raft::resources res;
* // use default index parameters
* ivf_pq::index_params index_params;
* // force random rotation
* index_params.force_random_rotation = true;
* // initialize an empty index
* raft::neighbors::ivf_pq::index<int64_t> index(res, index_params, D);
* // reset the index
* reset_index(res, &index);
* // compute the rotation matrix with random_rotation
* raft::neighbors::ivf_pq::helpers::make_rotation_matrix(
* res, &index, index_params.force_random_rotation);
* @endcode
*
* @tparam IdxT
*
* @param[in] res raft resource
* @param[inout] index pointer to IVF-PQ index
* @param[in] force_random_rotation whether to apply a random rotation matrix on the input data. See
* raft::neighbors::ivf_pq::index_params for more details.
*/
template <typename IdxT>
void make_rotation_matrix(raft::resources const& res,
index<IdxT>* index,
bool force_random_rotation)
{
raft::neighbors::ivf_pq::detail::make_rotation_matrix(res,
force_random_rotation,
index->rot_dim(),
index->dim(),
index->rotation_matrix().data_handle());
}
/**
* @brief Public helper API for externally modifying the index's IVF centroids.
* NB: The index must be reset before this. Use raft::neighbors::ivf_pq::extend to construct IVF
lists according to new centroids.
*
* Usage example:
* @code{.cpp}
* raft::resources res;
* // allocate the buffer for the input centers
* auto cluster_centers = raft::make_device_matrix<float, uint32_t>(res, index.n_lists(),
index.dim());
* ... prepare ivf centroids in cluster_centers ...
* // reset the index
* reset_index(res, &index);
* // recompute the state of the index
* raft::neighbors::ivf_pq::helpers::recompute_internal_state(res, index);
* // Write the IVF centroids
* raft::neighbors::ivf_pq::helpers::set_centers(
res,
&index,
cluster_centers);
* @endcode
*
* @tparam IdxT
*
* @param[in] res raft resource
* @param[inout] index pointer to IVF-PQ index
* @param[in] cluster_centers new cluster centers [index.n_lists(), index.dim()]
*/
template <typename IdxT>
void set_centers(raft::resources const& res,
index<IdxT>* index,
device_matrix_view<const float, uint32_t> cluster_centers)
{
RAFT_EXPECTS(cluster_centers.extent(0) == index->n_lists(),
"Number of rows in the new centers must be equal to the number of IVF lists");
RAFT_EXPECTS(cluster_centers.extent(1) == index->dim(),
"Number of columns in the new cluster centers and index dim are different");
RAFT_EXPECTS(index->size() == 0, "Index must be empty");
ivf_pq::detail::set_centers(res, index, cluster_centers.data_handle());
}
/**
* @brief Helper exposing the re-computation of list sizes and related arrays if IVF lists have been
* modified.
*
* Usage example:
* @code{.cpp}
* using namespace raft::neighbors;
* raft::resources res;
* // use default index parameters
* ivf_pq::index_params index_params;
* // initialize an empty index
* ivf_pq::index<int64_t> index(res, index_params, D);
* ivf_pq::helpers::reset_index(res, &index);
* // resize the first IVF list to hold 5 records
* auto spec = list_spec<uint32_t, int64_t>{
* index->pq_bits(), index->pq_dim(), index->conservative_memory_allocation()};
* uint32_t new_size = 5;
* ivf::resize_list(res, list, spec, new_size, 0);
* raft::update_device(index.list_sizes(), &new_size, 1, stream);
* // recompute the internal state of the index
* ivf_pq::recompute_internal_state(res, &index);
* @endcode
*
* @tparam IdxT
*
* @param[in] res raft resource
* @param[inout] index pointer to IVF-PQ index
*/
template <typename IdxT>
void recompute_internal_state(const raft::resources& res, index<IdxT>* index)
{
auto& list = index->lists()[0];
ivf_pq::detail::recompute_internal_state(res, *index);
}
/**
* @brief Public helper API for fetching a trained index's IVF centroids into a buffer that may be
* allocated on either host or device.
*
* Usage example:
* @code{.cpp}
* raft::resources res;
* // allocate the buffer for the output centers
* auto cluster_centers = raft::make_device_matrix<float, uint32_t>(
* res, index.n_lists(), index.dim());
* // Extract the IVF centroids into the buffer
* raft::neighbors::ivf_pq::helpers::extract_centers(res, index, cluster_centers.data_handle());
* @endcode
*
* @tparam IdxT
*
* @param[in] res raft resource
* @param[in] index IVF-PQ index (passed by reference)
* @param[out] cluster_centers IVF cluster centers [index.n_lists(), index.dim]
*/
template <typename IdxT>
void extract_centers(raft::resources const& res,
const index<IdxT>& index,
raft::device_matrix_view<float> cluster_centers)
{
RAFT_EXPECTS(cluster_centers.extent(0) == index.n_lists(),
"Number of rows in the output buffer for cluster centers must be equal to the "
"number of IVF lists");
RAFT_EXPECTS(
cluster_centers.extent(1) == index.dim(),
"Number of columns in the output buffer for cluster centers and index dim are different");
auto stream = resource::get_cuda_stream(res);
RAFT_CUDA_TRY(cudaMemcpy2DAsync(cluster_centers.data_handle(),
sizeof(float) * index.dim(),
index.centers().data_handle(),
sizeof(float) * index.dim_ext(),
sizeof(float) * index.dim(),
index.n_lists(),
cudaMemcpyDefault,
stream));
}
/** @} */
} // namespace raft::neighbors::ivf_pq::helpers
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_flat-ext.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // int64_t
#include <raft/core/device_mdspan.hpp> // raft::device_matrix_view
#include <raft/core/resources.hpp> // raft::resources
#include <raft/neighbors/ivf_flat_serialize.cuh>
#include <raft/neighbors/ivf_flat_types.hpp> // raft::neighbors::ivf_flat::index
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#include <rmm/mr/device/per_device_resource.hpp> // rmm::mr::device_memory_resource
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace raft::neighbors::ivf_flat {
template <typename T, typename IdxT>
auto build(raft::resources const& handle,
const index_params& params,
const T* dataset,
IdxT n_rows,
uint32_t dim) -> index<T, IdxT> RAFT_EXPLICIT;
template <typename T, typename IdxT>
auto build(raft::resources const& handle,
const index_params& params,
raft::device_matrix_view<const T, IdxT, row_major> dataset)
-> index<T, IdxT> RAFT_EXPLICIT;
template <typename T, typename IdxT>
void build(raft::resources const& handle,
const index_params& params,
raft::device_matrix_view<const T, IdxT, row_major> dataset,
raft::neighbors::ivf_flat::index<T, IdxT>& idx) RAFT_EXPLICIT;
template <typename T, typename IdxT>
auto extend(raft::resources const& handle,
const index<T, IdxT>& orig_index,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows) -> index<T, IdxT> RAFT_EXPLICIT;
template <typename T, typename IdxT>
auto extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices,
const index<T, IdxT>& orig_index) -> index<T, IdxT> RAFT_EXPLICIT;
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
index<T, IdxT>* index,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices,
index<T, IdxT>* index) RAFT_EXPLICIT;
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<T, IdxT>& index,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr = nullptr,
IvfSampleFilterT sample_filter = IvfSampleFilterT()) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const search_params& params,
const index<T, IdxT>& index,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr = nullptr) RAFT_EXPLICIT;
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<T, IdxT>& index,
raft::device_matrix_view<const T, IdxT, row_major> queries,
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors,
raft::device_matrix_view<float, IdxT, row_major> distances,
IvfSampleFilterT sample_filter = IvfSampleFilterT()) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const search_params& params,
const index<T, IdxT>& index,
raft::device_matrix_view<const T, IdxT, row_major> queries,
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors,
raft::device_matrix_view<float, IdxT, row_major> distances) RAFT_EXPLICIT;
} // namespace raft::neighbors::ivf_flat
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_neighbors_ivf_flat_build(T, IdxT) \
extern template auto raft::neighbors::ivf_flat::build<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_flat::index_params& params, \
const T* dataset, \
IdxT n_rows, \
uint32_t dim) \
->raft::neighbors::ivf_flat::index<T, IdxT>; \
\
extern template auto raft::neighbors::ivf_flat::build<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_flat::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset) \
->raft::neighbors::ivf_flat::index<T, IdxT>; \
\
extern template void raft::neighbors::ivf_flat::build<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_flat::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset, \
raft::neighbors::ivf_flat::index<T, IdxT>& idx);
instantiate_raft_neighbors_ivf_flat_build(float, int64_t);
instantiate_raft_neighbors_ivf_flat_build(int8_t, int64_t);
instantiate_raft_neighbors_ivf_flat_build(uint8_t, int64_t);
#undef instantiate_raft_neighbors_ivf_flat_build
#define instantiate_raft_neighbors_ivf_flat_extend(T, IdxT) \
extern template auto raft::neighbors::ivf_flat::extend<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_flat::index<T, IdxT>& orig_index, \
const T* new_vectors, \
const IdxT* new_indices, \
IdxT n_rows) \
->raft::neighbors::ivf_flat::index<T, IdxT>; \
\
extern template auto raft::neighbors::ivf_flat::extend<T, IdxT>( \
raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
const raft::neighbors::ivf_flat::index<T, IdxT>& orig_index) \
->raft::neighbors::ivf_flat::index<T, IdxT>; \
\
extern template void raft::neighbors::ivf_flat::extend<T, IdxT>( \
raft::resources const& handle, \
raft::neighbors::ivf_flat::index<T, IdxT>* index, \
const T* new_vectors, \
const IdxT* new_indices, \
IdxT n_rows); \
\
extern template void raft::neighbors::ivf_flat::extend<T, IdxT>( \
raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
raft::neighbors::ivf_flat::index<T, IdxT>* index);
instantiate_raft_neighbors_ivf_flat_extend(float, int64_t);
instantiate_raft_neighbors_ivf_flat_extend(int8_t, int64_t);
instantiate_raft_neighbors_ivf_flat_extend(uint8_t, int64_t);
#undef instantiate_raft_neighbors_ivf_flat_extend
#define instantiate_raft_neighbors_ivf_flat_search(T, IdxT) \
extern template void raft::neighbors::ivf_flat::search<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_flat::search_params& params, \
const raft::neighbors::ivf_flat::index<T, IdxT>& index, \
const T* queries, \
uint32_t n_queries, \
uint32_t k, \
IdxT* neighbors, \
float* distances, \
rmm::mr::device_memory_resource* mr); \
\
extern template void raft::neighbors::ivf_flat::search<T, IdxT>( \
raft::resources const& handle, \
const raft::neighbors::ivf_flat::search_params& params, \
const raft::neighbors::ivf_flat::index<T, IdxT>& index, \
raft::device_matrix_view<const T, IdxT, row_major> queries, \
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors, \
raft::device_matrix_view<float, IdxT, row_major> distances);
instantiate_raft_neighbors_ivf_flat_search(float, int64_t);
instantiate_raft_neighbors_ivf_flat_search(int8_t, int64_t);
instantiate_raft_neighbors_ivf_flat_search(uint8_t, int64_t);
#undef instantiate_raft_neighbors_ivf_flat_search
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_flat_helpers.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/neighbors/detail/ivf_flat_build.cuh>
#include <raft/neighbors/ivf_flat_types.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/spatial/knn/detail/ann_utils.cuh>
namespace raft::neighbors::ivf_flat::helpers {
using namespace raft::spatial::knn::detail; // NOLINT
/**
* @defgroup ivf_flat_helpers Helper functions for manipulationg IVF Flat Index
* @{
*/
namespace codepacker {
/**
* Write flat codes into an existing list by the given offset.
*
* NB: no memory allocation happens here; the list must fit the data (offset + n_vec).
*
* Usage example:
* @code{.cpp}
* auto list_data = index.lists()[label]->data.view();
* // allocate the buffer for the input codes
* auto codes = raft::make_device_matrix<T>(res, n_vec, index.dim());
* ... prepare n_vecs to pack into the list in codes ...
* // write codes into the list starting from the 42nd position
* ivf_pq::helpers::codepacker::pack(
* res, make_const_mdspan(codes.view()), index.veclen(), 42, list_data);
* @endcode
*
* @tparam T
* @tparam IdxT
*
* @param[in] res
* @param[in] codes flat codes [n_vec, dim]
* @param[in] veclen size of interleaved data chunks
* @param[in] offset how many records to skip before writing the data into the list
* @param[inout] list_data block to write into
*/
template <typename T, typename IdxT>
void pack(
raft::resources const& res,
device_matrix_view<const T, uint32_t, row_major> codes,
uint32_t veclen,
uint32_t offset,
device_mdspan<T, typename list_spec<uint32_t, T, IdxT>::list_extents, row_major> list_data)
{
raft::neighbors::ivf_flat::detail::pack_list_data<T, IdxT>(res, codes, veclen, offset, list_data);
}
/**
* @brief Unpack `n_take` consecutive records of a single list (cluster) in the compressed index
* starting at given `offset`.
*
* Usage example:
* @code{.cpp}
* auto list_data = index.lists()[label]->data.view();
* // allocate the buffer for the output
* uint32_t n_take = 4;
* auto codes = raft::make_device_matrix<T>(res, n_take, index.dim());
* uint32_t offset = 0;
* // unpack n_take elements from the list
* ivf_pq::helpers::codepacker::unpack(res, list_data, index.veclen(), offset, codes.view());
* @endcode
*
* @tparam T
* @tparam IdxT
*
* @param[in] res raft resource
* @param[in] list_data block to read from
* @param[in] veclen size of interleaved data chunks
* @param[in] offset
* How many records in the list to skip.
* @param[inout] codes
* the destination buffer [n_take, index.dim()].
* The length `n_take` defines how many records to unpack,
* it must be <= the list size.
*/
template <typename T, typename IdxT>
void unpack(
raft::resources const& res,
device_mdspan<const T, typename list_spec<uint32_t, T, IdxT>::list_extents, row_major> list_data,
uint32_t veclen,
uint32_t offset,
device_matrix_view<T, uint32_t, row_major> codes)
{
raft::neighbors::ivf_flat::detail::unpack_list_data<T, IdxT>(
res, list_data, veclen, offset, codes);
}
} // namespace codepacker
/**
* @brief Public helper API to reset the data and indices ptrs, and the list sizes. Useful for
* externally modifying the index without going through the build stage. The data and indices of the
* IVF lists will be lost.
*
* Usage example:
* @code{.cpp}
* raft::resources res;
* using namespace raft::neighbors;
* // use default index parameters
* ivf_flat::index_params index_params;
* // initialize an empty index
* ivf_flat::index<int64_t> index(res, index_params, D);
* // reset the index's state and list sizes
* ivf_flat::helpers::reset_index(res, &index);
* @endcode
*
* @tparam IdxT
*
* @param[in] res raft resource
* @param[inout] index pointer to IVF-PQ index
*/
template <typename T, typename IdxT>
void reset_index(const raft::resources& res, index<T, IdxT>* index)
{
auto stream = resource::get_cuda_stream(res);
utils::memzero(index->list_sizes().data_handle(), index->list_sizes().size(), stream);
utils::memzero(index->data_ptrs().data_handle(), index->data_ptrs().size(), stream);
utils::memzero(index->inds_ptrs().data_handle(), index->inds_ptrs().size(), stream);
}
/** @} */
} // namespace raft::neighbors::ivf_flat::helpers
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_flat_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_types.hpp"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/neighbors/ivf_list_types.hpp>
#include <raft/util/integer_utils.hpp>
#include <thrust/reduce.h>
#include <algorithm> // std::max
#include <memory>
#include <optional>
#include <type_traits>
namespace raft::neighbors::ivf_flat {
/**
* @addtogroup ivf_flat
* @{
*/
/** Size of the interleaved group (see `index::data` description). */
constexpr static uint32_t kIndexGroupSize = 32;
struct index_params : ann::index_params {
/** The number of inverted lists (clusters) */
uint32_t n_lists = 1024;
/** The number of iterations searching for kmeans centers (index building). */
uint32_t kmeans_n_iters = 20;
/** The fraction of data to use during iterative kmeans building. */
double kmeans_trainset_fraction = 0.5;
/**
* By default (adaptive_centers = false), the cluster centers are trained in `ivf_flat::build`,
* and never modified in `ivf_flat::extend`. As a result, you may need to retrain the index
* from scratch after invoking (`ivf_flat::extend`) a few times with new data, the distribution of
* which is no longer representative of the original training set.
*
* The alternative behavior (adaptive_centers = true) is to update the cluster centers for new
* data when it is added. In this case, `index.centers()` are always exactly the centroids of the
* data in the corresponding clusters. The drawback of this behavior is that the centroids depend
* on the order of adding new data (through the classification of the added data); that is,
* `index.centers()` "drift" together with the changing distribution of the newly added data.
*/
bool adaptive_centers = false;
/**
* By default, the algorithm allocates more space than necessary for individual clusters
* (`list_data`). This allows to amortize the cost of memory allocation and reduce the number of
* data copies during repeated calls to `extend` (extending the database).
*
* The alternative is the conservative allocation behavior; when enabled, the algorithm always
* allocates the minimum amount of memory required to store the given number of records. Set this
* flag to `true` if you prefer to use as little GPU memory for the database as possible.
*/
bool conservative_memory_allocation = false;
};
struct search_params : ann::search_params {
/** The number of clusters to search. */
uint32_t n_probes = 20;
};
static_assert(std::is_aggregate_v<index_params>);
static_assert(std::is_aggregate_v<search_params>);
template <typename SizeT, typename ValueT, typename IdxT>
struct list_spec {
using value_type = ValueT;
using list_extents = matrix_extent<SizeT>;
using index_type = IdxT;
SizeT align_max;
SizeT align_min;
uint32_t dim;
constexpr list_spec(uint32_t dim, bool conservative_memory_allocation)
: dim(dim),
align_min(kIndexGroupSize),
align_max(conservative_memory_allocation ? kIndexGroupSize : 1024)
{
}
// Allow casting between different size-types (for safer size and offset calculations)
template <typename OtherSizeT>
constexpr explicit list_spec(const list_spec<OtherSizeT, ValueT, IdxT>& other_spec)
: dim{other_spec.dim}, align_min{other_spec.align_min}, align_max{other_spec.align_max}
{
}
/** Determine the extents of an array enough to hold a given amount of data. */
constexpr auto make_list_extents(SizeT n_rows) const -> list_extents
{
return make_extents<SizeT>(n_rows, dim);
}
};
template <typename ValueT, typename IdxT, typename SizeT = uint32_t>
using list_data = ivf::list<list_spec, SizeT, ValueT, IdxT>;
/**
* @brief IVF-flat index.
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
*/
template <typename T, typename IdxT>
struct index : ann::index {
static_assert(!raft::is_narrowing_v<uint32_t, IdxT>,
"IdxT must be able to represent all values of uint32_t");
public:
/**
* Vectorized load/store size in elements, determines the size of interleaved data chunks.
*
* TODO: in theory, we can lift this to the template parameter and keep it at hardware maximum
* possible value by padding the `dim` of the data https://github.com/rapidsai/raft/issues/711
*/
[[nodiscard]] constexpr inline auto veclen() const noexcept -> uint32_t { return veclen_; }
/** Distance metric used for clustering. */
[[nodiscard]] constexpr inline auto metric() const noexcept -> raft::distance::DistanceType
{
return metric_;
}
/** Whether `centers()` change upon extending the index (ivf_pq::extend). */
[[nodiscard]] constexpr inline auto adaptive_centers() const noexcept -> bool
{
return adaptive_centers_;
}
/**
* Inverted list data [size, dim].
*
* The data consists of the dataset rows, grouped by their labels (into clusters/lists).
* Within each list (cluster), the data is grouped into blocks of `kIndexGroupSize` interleaved
* vectors. Note, the total index length is slightly larger than the source dataset length,
* because each cluster is padded by `kIndexGroupSize` elements.
*
* Interleaving pattern:
* within groups of `kIndexGroupSize` rows, the data is interleaved with the block size equal to
* `veclen * sizeof(T)`. That is, a chunk of `veclen` consecutive components of one row is
* followed by a chunk of the same size of the next row, and so on.
*
* __Example__: veclen = 2, dim = 6, kIndexGroupSize = 32, list_size = 31
*
* x[ 0, 0], x[ 0, 1], x[ 1, 0], x[ 1, 1], ... x[14, 0], x[14, 1], x[15, 0], x[15, 1],
* x[16, 0], x[16, 1], x[17, 0], x[17, 1], ... x[30, 0], x[30, 1], - , - ,
* x[ 0, 2], x[ 0, 3], x[ 1, 2], x[ 1, 3], ... x[14, 2], x[14, 3], x[15, 2], x[15, 3],
* x[16, 2], x[16, 3], x[17, 2], x[17, 3], ... x[30, 2], x[30, 3], - , - ,
* x[ 0, 4], x[ 0, 5], x[ 1, 4], x[ 1, 5], ... x[14, 4], x[14, 5], x[15, 4], x[15, 5],
* x[16, 4], x[16, 5], x[17, 4], x[17, 5], ... x[30, 4], x[30, 5], - , - ,
*
*/
/** Sizes of the lists (clusters) [n_lists]
* NB: This may differ from the actual list size if the shared lists have been extended by another
* index
*/
inline auto list_sizes() noexcept -> device_vector_view<uint32_t, uint32_t>
{
return list_sizes_.view();
}
[[nodiscard]] inline auto list_sizes() const noexcept
-> device_vector_view<const uint32_t, uint32_t>
{
return list_sizes_.view();
}
/** k-means cluster centers corresponding to the lists [n_lists, dim] */
inline auto centers() noexcept -> device_matrix_view<float, uint32_t, row_major>
{
return centers_.view();
}
[[nodiscard]] inline auto centers() const noexcept
-> device_matrix_view<const float, uint32_t, row_major>
{
return centers_.view();
}
/**
* (Optional) Precomputed norms of the `centers` w.r.t. the chosen distance metric [n_lists].
*
* NB: this may be empty if the index is empty or if the metric does not require the center norms
* calculation.
*/
inline auto center_norms() noexcept -> std::optional<device_vector_view<float, uint32_t>>
{
if (center_norms_.has_value()) {
return std::make_optional<device_vector_view<float, uint32_t>>(center_norms_->view());
} else {
return std::nullopt;
}
}
[[nodiscard]] inline auto center_norms() const noexcept
-> std::optional<device_vector_view<const float, uint32_t>>
{
if (center_norms_.has_value()) {
return std::make_optional<device_vector_view<const float, uint32_t>>(center_norms_->view());
} else {
return std::nullopt;
}
}
/** Total length of the index. */
[[nodiscard]] constexpr inline auto size() const noexcept -> IdxT { return total_size_; }
/** Dimensionality of the data. */
[[nodiscard]] constexpr inline auto dim() const noexcept -> uint32_t
{
return centers_.extent(1);
}
/** Number of clusters/inverted lists. */
[[nodiscard]] constexpr inline auto n_lists() const noexcept -> uint32_t { return lists_.size(); }
// Don't allow copying the index for performance reasons (try avoiding copying data)
index(const index&) = delete;
index(index&&) = default;
auto operator=(const index&) -> index& = delete;
auto operator=(index&&) -> index& = default;
~index() = default;
/** Construct an empty index. It needs to be trained and then populated. */
index(raft::resources const& res,
raft::distance::DistanceType metric,
uint32_t n_lists,
bool adaptive_centers,
bool conservative_memory_allocation,
uint32_t dim)
: ann::index(),
veclen_(calculate_veclen(dim)),
metric_(metric),
adaptive_centers_(adaptive_centers),
conservative_memory_allocation_{conservative_memory_allocation},
centers_(make_device_matrix<float, uint32_t>(res, n_lists, dim)),
center_norms_(std::nullopt),
lists_{n_lists},
list_sizes_{make_device_vector<uint32_t, uint32_t>(res, n_lists)},
data_ptrs_{make_device_vector<T*, uint32_t>(res, n_lists)},
inds_ptrs_{make_device_vector<IdxT*, uint32_t>(res, n_lists)},
total_size_{0}
{
check_consistency();
}
/** Construct an empty index. It needs to be trained and then populated. */
index(raft::resources const& res, const index_params& params, uint32_t dim)
: index(res,
params.metric,
params.n_lists,
params.adaptive_centers,
params.conservative_memory_allocation,
dim)
{
}
/** Pointers to the inverted lists (clusters) data [n_lists]. */
inline auto data_ptrs() noexcept -> device_vector_view<T*, uint32_t> { return data_ptrs_.view(); }
[[nodiscard]] inline auto data_ptrs() const noexcept -> device_vector_view<T* const, uint32_t>
{
return data_ptrs_.view();
}
/** Pointers to the inverted lists (clusters) indices [n_lists]. */
inline auto inds_ptrs() noexcept -> device_vector_view<IdxT*, uint32_t>
{
return inds_ptrs_.view();
}
[[nodiscard]] inline auto inds_ptrs() const noexcept -> device_vector_view<IdxT* const, uint32_t>
{
return inds_ptrs_.view();
}
/**
* Whether to use convervative memory allocation when extending the list (cluster) data
* (see index_params.conservative_memory_allocation).
*/
[[nodiscard]] constexpr inline auto conservative_memory_allocation() const noexcept -> bool
{
return conservative_memory_allocation_;
}
/**
* Update the state of the dependent index members.
*/
void recompute_internal_state(raft::resources const& res)
{
auto stream = resource::get_cuda_stream(res);
// Actualize the list pointers
auto this_lists = lists();
auto this_data_ptrs = data_ptrs();
auto this_inds_ptrs = inds_ptrs();
for (uint32_t label = 0; label < this_lists.size(); label++) {
auto& list = this_lists[label];
const auto data_ptr = list ? list->data.data_handle() : nullptr;
const auto inds_ptr = list ? list->indices.data_handle() : nullptr;
copy(&this_data_ptrs(label), &data_ptr, 1, stream);
copy(&this_inds_ptrs(label), &inds_ptr, 1, stream);
}
auto this_list_sizes = list_sizes().data_handle();
total_size_ = thrust::reduce(resource::get_thrust_policy(res),
this_list_sizes,
this_list_sizes + this_lists.size(),
0,
raft::add_op{});
check_consistency();
}
void allocate_center_norms(raft::resources const& res)
{
switch (metric_) {
case raft::distance::DistanceType::L2Expanded:
case raft::distance::DistanceType::L2SqrtExpanded:
case raft::distance::DistanceType::L2Unexpanded:
case raft::distance::DistanceType::L2SqrtUnexpanded:
center_norms_ = make_device_vector<float, uint32_t>(res, n_lists());
break;
default: center_norms_ = std::nullopt;
}
}
/** Lists' data and indices. */
inline auto lists() noexcept -> std::vector<std::shared_ptr<list_data<T, IdxT>>>&
{
return lists_;
}
[[nodiscard]] inline auto lists() const noexcept
-> const std::vector<std::shared_ptr<list_data<T, IdxT>>>&
{
return lists_;
}
private:
/**
* TODO: in theory, we can lift this to the template parameter and keep it at hardware maximum
* possible value by padding the `dim` of the data https://github.com/rapidsai/raft/issues/711
*/
uint32_t veclen_;
raft::distance::DistanceType metric_;
bool adaptive_centers_;
bool conservative_memory_allocation_;
std::vector<std::shared_ptr<list_data<T, IdxT>>> lists_;
device_vector<uint32_t, uint32_t> list_sizes_;
device_matrix<float, uint32_t, row_major> centers_;
std::optional<device_vector<float, uint32_t>> center_norms_;
// Computed members
device_vector<T*, uint32_t> data_ptrs_;
device_vector<IdxT*, uint32_t> inds_ptrs_;
IdxT total_size_;
/** Throw an error if the index content is inconsistent. */
void check_consistency()
{
auto n_lists = lists_.size();
RAFT_EXPECTS(dim() % veclen_ == 0, "dimensionality is not a multiple of the veclen");
RAFT_EXPECTS(list_sizes_.extent(0) == n_lists, "inconsistent list size");
RAFT_EXPECTS(data_ptrs_.extent(0) == n_lists, "inconsistent list size");
RAFT_EXPECTS(inds_ptrs_.extent(0) == n_lists, "inconsistent list size");
RAFT_EXPECTS( //
(centers_.extent(0) == list_sizes_.extent(0)) && //
(!center_norms_.has_value() || centers_.extent(0) == center_norms_->extent(0)),
"inconsistent number of lists (clusters)");
}
static auto calculate_veclen(uint32_t dim) -> uint32_t
{
// TODO: consider padding the dimensions and fixing veclen to its maximum possible value as a
// template parameter (https://github.com/rapidsai/raft/issues/711)
// NOTE: keep this consistent with the select_interleaved_scan_kernel logic
// in detail/ivf_flat_interleaved_scan-inl.cuh.
uint32_t veclen = std::max<uint32_t>(1, 16 / sizeof(T));
if (dim % veclen != 0) { veclen = 1; }
return veclen;
}
};
/** @} */
} // namespace raft::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/ivf_flat_codepacker.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/neighbors/detail/div_utils.hpp>
#include <raft/neighbors/ivf_flat_types.hpp>
namespace raft::neighbors::ivf_flat::codepacker {
/**
* Write one flat code into a block by the given offset. The offset indicates the id of the record
* in the list. This function interleaves the code and is intended to later copy the interleaved
* codes over to the IVF list on device. NB: no memory allocation happens here; the block must fit
* the record (offset + 1).
*
* @tparam T
*
* @param[in] flat_code input flat code
* @param[out] block block of memory to write interleaved codes to
* @param[in] dim dimension of the flat code
* @param[in] veclen size of interleaved data chunks
* @param[in] offset how many records to skip before writing the data into the list
*/
template <typename T>
_RAFT_HOST_DEVICE void pack_1(
const T* flat_code, T* block, uint32_t dim, uint32_t veclen, uint32_t offset)
{
// The data is written in interleaved groups of `index::kGroupSize` vectors
using interleaved_group = neighbors::detail::div_utils<kIndexGroupSize>;
// Interleave dimensions of the source vector while recording it.
// NB: such `veclen` is selected, that `dim % veclen == 0`
auto group_offset = interleaved_group::roundDown(offset);
auto ingroup_id = interleaved_group::mod(offset) * veclen;
for (uint32_t l = 0; l < dim; l += veclen) {
for (uint32_t j = 0; j < veclen; j++) {
block[group_offset * dim + l * kIndexGroupSize + ingroup_id + j] = flat_code[l + j];
}
}
}
/**
* Unpack 1 record of a single list (cluster) in the index to fetch the flat code. The offset
* indicates the id of the record. This function fetches one flat code from an interleaved code.
*
* @tparam T
*
* @param[in] block interleaved block. The block can be thought of as the whole inverted list in
* interleaved format.
* @param[out] flat_code output flat code
* @param[in] dim dimension of the flat code
* @param[in] veclen size of interleaved data chunks
* @param[in] offset fetch the flat code by the given offset
*/
template <typename T>
_RAFT_HOST_DEVICE void unpack_1(
const T* block, T* flat_code, uint32_t dim, uint32_t veclen, uint32_t offset)
{
// The data is written in interleaved groups of `index::kGroupSize` vectors
using interleaved_group = neighbors::detail::div_utils<kIndexGroupSize>;
// NB: such `veclen` is selected, that `dim % veclen == 0`
auto group_offset = interleaved_group::roundDown(offset);
auto ingroup_id = interleaved_group::mod(offset) * veclen;
for (uint32_t l = 0; l < dim; l += veclen) {
for (uint32_t j = 0; j < veclen; j++) {
flat_code[l + j] = block[group_offset * dim + l * kIndexGroupSize + ingroup_id + j];
}
}
}
} // namespace raft::neighbors::ivf_flat::codepacker | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/neighbors/brute_force.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "brute_force-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "brute_force-ext.cuh"
#endif
#include <raft/neighbors/detail/knn_brute_force_batch_k_query.cuh>
namespace raft::neighbors::brute_force {
/**
* @brief Make a brute force query over batches of k
*
* This lets you query for batches of k. For example, you can get
* the first 100 neighbors, then the next 100 neighbors etc.
*
* Example usage:
* @code{.cpp}
* #include <raft/neighbors/brute_force.cuh>
* #include <raft/core/device_mdarray.hpp>
* #include <raft/random/make_blobs.cuh>
* // create a random dataset
* int n_rows = 10000;
* int n_cols = 10000;
* raft::device_resources res;
* auto dataset = raft::make_device_matrix<float, int>(res, n_rows, n_cols);
* auto labels = raft::make_device_vector<float, int>(res, n_rows);
* raft::make_blobs(res, dataset.view(), labels.view());
*
* // create a brute_force knn index from the dataset
* auto index = raft::neighbors::brute_force::build(res,
* raft::make_const_mdspan(dataset.view()));
*
* // search the index in batches of 128 nearest neighbors
* auto search = raft::make_const_mdspan(dataset.view());
* auto query = make_batch_k_query<float, int>(res, index, search, 128);
* for (auto & batch: *query) {
* // batch.indices() and batch.distances() contain the information on the current batch
* }
*
* // we can also support variable sized batches - loaded up a different number
* // of neighbors at each iteration through the ::advance method
* int64_t batch_size = 128;
* query = make_batch_k_query<float, int>(res, index, search, batch_size);
* for (auto it = query->begin(); it != query->end(); it.advance(batch_size)) {
* // batch.indices() and batch.distances() contain the information on the current batch
*
* batch_size += 16; // load up an extra 16 items in the next batch
* }
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
* @param[in] res
* @param[in] index The index to query
* @param[in] query A device matrix view to query for [n_queries, index->dim()]
* @param[in] batch_size The size of each batch
*/
template <typename T, typename IdxT>
std::shared_ptr<batch_k_query<T, IdxT>> make_batch_k_query(
const raft::resources& res,
const raft::neighbors::brute_force::index<T>& index,
raft::device_matrix_view<const T, int64_t, row_major> query,
int64_t batch_size)
{
return std::shared_ptr<batch_k_query<T, IdxT>>(
new detail::gpu_batch_k_query<T, IdxT>(res, index, query, batch_size));
}
} // namespace raft::neighbors::brute_force
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.