repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/epsilon_neighborhood.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/contractions.cuh>
#include <raft/util/device_utils.cuh>
namespace cuvs {
namespace spatial {
namespace knn {
namespace detail {
template <typename DataT,
typename IdxT,
typename Policy,
typename BaseClass = raft::linalg::Contractions_NT<DataT, IdxT, Policy>>
struct EpsUnexpL2SqNeighborhood : public BaseClass {
private:
typedef Policy P;
bool* adj;
DataT eps;
IdxT* vd;
char* smem; // for final reductions
DataT acc[P::AccRowsPerTh][P::AccColsPerTh];
public:
DI EpsUnexpL2SqNeighborhood(bool* _adj,
IdxT* _vd,
const DataT* _x,
const DataT* _y,
IdxT _m,
IdxT _n,
IdxT _k,
DataT _eps,
char* _smem)
: BaseClass(_x, _y, _m, _n, _k, _smem), adj(_adj), eps(_eps), vd(_vd), smem(_smem)
{
}
DI void run()
{
prolog();
loop();
epilog();
}
private:
DI void prolog()
{
this->ldgXY(IdxT(blockIdx.x) * P::Mblk, IdxT(blockIdx.y) * P::Nblk, 0);
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
acc[i][j] = BaseClass::Zero;
}
}
this->stsXY();
__syncthreads();
this->switch_write_buffer();
}
DI void loop()
{
for (int kidx = P::Kblk; kidx < this->k; kidx += P::Kblk) {
this->ldgXY(IdxT(blockIdx.x) * P::Mblk, IdxT(blockIdx.y) * P::Nblk, kidx);
accumulate(); // on the previous k-block
this->stsXY();
__syncthreads();
this->switch_write_buffer();
this->switch_read_buffer();
}
accumulate(); // last iteration
}
DI void epilog()
{
IdxT startx = blockIdx.x * P::Mblk + this->accrowid;
IdxT starty = blockIdx.y * P::Nblk + this->acccolid;
auto lid = raft::laneId();
IdxT sums[P::AccColsPerTh];
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
sums[j] = 0;
}
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
auto xid = startx + i * P::AccThRows;
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
auto yid = starty + j * P::AccThCols;
auto is_neigh = acc[i][j] <= eps;
///@todo: fix uncoalesced writes using shared mem
if (xid < this->m && yid < this->n) {
adj[xid * this->n + yid] = is_neigh;
sums[j] += is_neigh;
}
}
}
// perform reduction of adjacency values to compute vertex degrees
if (vd != nullptr) { updateVertexDegree(sums); }
}
DI void accumulate()
{
#pragma unroll
for (int ki = 0; ki < P::Kblk; ki += P::Veclen) {
this->ldsXY(ki);
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
#pragma unroll
for (int v = 0; v < P::Veclen; ++v) {
auto diff = this->regx[i][v] - this->regy[j][v];
acc[i][j] += diff * diff;
}
}
}
}
}
DI void updateVertexDegree(IdxT (&sums)[P::AccColsPerTh])
{
__syncthreads(); // so that we can safely reuse smem
int gid = threadIdx.x / P::AccThCols;
int lid = threadIdx.x % P::AccThCols;
auto cidx = IdxT(blockIdx.y) * P::Nblk + lid;
IdxT totalSum = 0;
// update the individual vertex degrees
#pragma unroll
for (int i = 0; i < P::AccColsPerTh; ++i) {
sums[i] = batchedBlockReduce<IdxT, P::AccThCols>(sums[i], smem);
auto cid = cidx + i * P::AccThCols;
if (gid == 0 && cid < this->n) {
atomicUpdate(cid, sums[i]);
totalSum += sums[i];
}
__syncthreads(); // for safe smem reuse
}
// update the total edge count
totalSum = raft::blockReduce<IdxT>(totalSum, smem);
if (threadIdx.x == 0) { atomicUpdate(this->n, totalSum); }
}
DI void atomicUpdate(IdxT addrId, IdxT val)
{
if (sizeof(IdxT) == 4) {
raft::myAtomicAdd<unsigned>((unsigned*)(vd + addrId), val);
} else if (sizeof(IdxT) == 8) {
raft::myAtomicAdd<unsigned long long>((unsigned long long*)(vd + addrId), val);
}
}
}; // struct EpsUnexpL2SqNeighborhood
template <typename DataT, typename IdxT, typename Policy>
__launch_bounds__(Policy::Nthreads, 2) RAFT_KERNEL epsUnexpL2SqNeighKernel(
bool* adj, IdxT* vd, const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k, DataT eps)
{
extern __shared__ char smem[];
EpsUnexpL2SqNeighborhood<DataT, IdxT, Policy> obj(adj, vd, x, y, m, n, k, eps, smem);
obj.run();
}
template <typename DataT, typename IdxT, int VecLen>
void epsUnexpL2SqNeighImpl(bool* adj,
IdxT* vd,
const DataT* x,
const DataT* y,
IdxT m,
IdxT n,
IdxT k,
DataT eps,
cudaStream_t stream)
{
typedef typename raft::linalg::Policy4x4<DataT, VecLen>::Policy Policy;
dim3 grid(raft::ceildiv<int>(m, Policy::Mblk), raft::ceildiv<int>(n, Policy::Nblk));
dim3 blk(Policy::Nthreads);
epsUnexpL2SqNeighKernel<DataT, IdxT, Policy>
<<<grid, blk, Policy::SmemSize, stream>>>(adj, vd, x, y, m, n, k, eps);
RAFT_CUDA_TRY(cudaGetLastError());
}
/**
* @brief Computes epsilon neighborhood for the L2-Squared distance metric
*
* @tparam DataT IO and math type
* @tparam IdxT Index type
*
* @param[out] adj adjacency matrix [row-major] [on device] [dim = m x n]
* @param[out] vd vertex degree array [on device] [len = m + 1]
* `vd + m` stores the total number of edges in the adjacency
* matrix. Pass a nullptr if you don't need this info.
* @param[in] x first matrix [row-major] [on device] [dim = m x k]
* @param[in] y second matrix [row-major] [on device] [dim = n x k]
* @param[in] eps defines epsilon neighborhood radius (should be passed as
* squared as we compute L2-squared distance in this method)
* @param[in] fop device lambda to do any other custom functions
* @param[in] stream cuda stream
*/
template <typename DataT, typename IdxT>
void epsUnexpL2SqNeighborhood(bool* adj,
IdxT* vd,
const DataT* x,
const DataT* y,
IdxT m,
IdxT n,
IdxT k,
DataT eps,
cudaStream_t stream)
{
size_t bytes = sizeof(DataT) * k;
if (16 % sizeof(DataT) == 0 && bytes % 16 == 0) {
epsUnexpL2SqNeighImpl<DataT, IdxT, 16 / sizeof(DataT)>(adj, vd, x, y, m, n, k, eps, stream);
} else if (8 % sizeof(DataT) == 0 && bytes % 8 == 0) {
epsUnexpL2SqNeighImpl<DataT, IdxT, 8 / sizeof(DataT)>(adj, vd, x, y, m, n, k, eps, stream);
} else {
epsUnexpL2SqNeighImpl<DataT, IdxT, 1>(adj, vd, x, y, m, n, k, eps, stream);
}
}
} // namespace detail
} // namespace knn
} // namespace spatial
} // namespace cuvs | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/ann_utils.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/logger.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/integer_utils.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <memory>
#include <optional>
#include <cuda_fp16.hpp>
namespace cuvs::spatial::knn::detail::utils {
/** Whether pointers are accessible on the device or on the host. */
enum class pointer_residency {
/** Some of the pointers are on the device, some on the host. */
mixed,
/** All pointers accessible from both the device and the host. */
host_and_device,
/** All pointers are host accessible. */
host_only,
/** All poitners are device accessible. */
device_only
};
template <typename... Types>
struct pointer_residency_count {};
template <>
struct pointer_residency_count<> {
static inline auto run() -> std::tuple<int, int> { return std::make_tuple(0, 0); }
};
template <typename Type, typename... Types>
struct pointer_residency_count<Type, Types...> {
static inline auto run(const Type* ptr, const Types*... ptrs) -> std::tuple<int, int>
{
auto [on_device, on_host] = pointer_residency_count<Types...>::run(ptrs...);
cudaPointerAttributes attr;
RAFT_CUDA_TRY(cudaPointerGetAttributes(&attr, ptr));
switch (attr.type) {
case cudaMemoryTypeUnregistered: return std::make_tuple(on_device, on_host + 1);
case cudaMemoryTypeHost:
return std::make_tuple(on_device + int(attr.devicePointer == ptr), on_host + 1);
case cudaMemoryTypeDevice: return std::make_tuple(on_device + 1, on_host);
case cudaMemoryTypeManaged: return std::make_tuple(on_device + 1, on_host + 1);
default: return std::make_tuple(on_device, on_host);
}
}
};
/** Check if all argument pointers reside on the host or on the device. */
template <typename... Types>
auto check_pointer_residency(const Types*... ptrs) -> pointer_residency
{
auto [on_device, on_host] = pointer_residency_count<Types...>::run(ptrs...);
int n_args = sizeof...(Types);
if (on_device == n_args && on_host == n_args) { return pointer_residency::host_and_device; }
if (on_device == n_args) { return pointer_residency::device_only; }
if (on_host == n_args) { return pointer_residency::host_only; }
return pointer_residency::mixed;
}
/** RAII helper to access the host data from gpu when necessary. */
template <typename PtrT, typename Action>
struct with_mapped_memory_t {
with_mapped_memory_t(PtrT ptr, size_t size, Action action) : action_(action)
{
if (ptr == nullptr) { return; }
switch (utils::check_pointer_residency(ptr)) {
case utils::pointer_residency::device_only:
case utils::pointer_residency::host_and_device: {
dev_ptr_ = (void*)ptr; // NOLINT
} break;
default: {
host_ptr_ = (void*)ptr; // NOLINT
RAFT_CUDA_TRY(cudaHostRegister(host_ptr_, size, choose_flags(ptr)));
RAFT_CUDA_TRY(cudaHostGetDevicePointer(&dev_ptr_, host_ptr_, 0));
} break;
}
}
~with_mapped_memory_t()
{
if (host_ptr_ != nullptr) { cudaHostUnregister(host_ptr_); }
}
auto operator()() { return action_((PtrT)dev_ptr_); } // NOLINT
private:
Action action_;
void* host_ptr_ = nullptr;
void* dev_ptr_ = nullptr;
template <typename T>
static auto choose_flags(const T*) -> unsigned int
{
int dev_id, readonly_supported;
RAFT_CUDA_TRY(cudaGetDevice(&dev_id));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(
&readonly_supported, cudaDevAttrHostRegisterReadOnlySupported, dev_id));
if (readonly_supported) {
return cudaHostRegisterMapped | cudaHostRegisterReadOnly;
} else {
return cudaHostRegisterMapped;
}
}
template <typename T>
static auto choose_flags(T*) -> unsigned int
{
return cudaHostRegisterMapped;
}
};
template <typename T>
struct config {};
template <>
struct config<double> {
using value_t = double;
static constexpr double kDivisor = 1.0;
};
template <>
struct config<float> {
using value_t = float;
static constexpr double kDivisor = 1.0;
};
template <>
struct config<half> {
using value_t = half;
static constexpr double kDivisor = 1.0;
};
template <>
struct config<uint8_t> {
using value_t = uint32_t;
static constexpr double kDivisor = 256.0;
};
template <>
struct config<int8_t> {
using value_t = int32_t;
static constexpr double kDivisor = 128.0;
};
/**
* @brief Converting values between the types taking into account scaling factors
* for the integral types.
*
* @tparam T target type of the mapping.
*/
template <typename T>
struct mapping {
/**
* @defgroup
* @brief Cast and possibly scale a value of the source type `S` to the target type `T`.
*
* @tparam S source type
* @param x source value
* @{
*/
template <typename S>
HDI constexpr auto operator()(const S& x) const -> std::enable_if_t<std::is_same_v<S, T>, T>
{
return x;
};
template <typename S>
HDI constexpr auto operator()(const S& x) const -> std::enable_if_t<!std::is_same_v<S, T>, T>
{
constexpr double kMult = config<T>::kDivisor / config<S>::kDivisor;
if constexpr (std::is_floating_point_v<S>) { return static_cast<T>(x * static_cast<S>(kMult)); }
if constexpr (std::is_floating_point_v<T>) { return static_cast<T>(x) * static_cast<T>(kMult); }
return static_cast<T>(static_cast<float>(x) * static_cast<float>(kMult));
};
/** @} */
};
/**
* @brief Sets the first num bytes of the block of memory pointed by ptr to the specified value.
*
* @param[out] ptr host or device pointer
* @param[in] value
* @param[in] n_bytes
*/
template <typename T, typename IdxT>
inline void memzero(T* ptr, IdxT n_elems, rmm::cuda_stream_view stream)
{
switch (check_pointer_residency(ptr)) {
case pointer_residency::host_and_device:
case pointer_residency::device_only: {
RAFT_CUDA_TRY(cudaMemsetAsync(ptr, 0, n_elems * sizeof(T), stream));
} break;
case pointer_residency::host_only: {
stream.synchronize();
::memset(ptr, 0, n_elems * sizeof(T));
} break;
default: RAFT_FAIL("memset: unreachable code");
}
}
template <typename T, typename IdxT>
RAFT_KERNEL outer_add_kernel(const T* a, IdxT len_a, const T* b, IdxT len_b, T* c)
{
IdxT gid = threadIdx.x + blockDim.x * static_cast<IdxT>(blockIdx.x);
IdxT i = gid / len_b;
IdxT j = gid % len_b;
if (i >= len_a) return;
c[gid] = (a == nullptr ? T(0) : a[i]) + (b == nullptr ? T(0) : b[j]);
}
template <typename T, typename IdxT>
RAFT_KERNEL block_copy_kernel(const IdxT* in_offsets,
const IdxT* out_offsets,
IdxT n_blocks,
const T* in_data,
T* out_data,
IdxT n_mult)
{
IdxT i = static_cast<IdxT>(blockDim.x) * static_cast<IdxT>(blockIdx.x) + threadIdx.x;
// find the source offset using the binary search.
uint32_t l = 0;
uint32_t r = n_blocks;
IdxT in_offset = 0;
if (in_offsets[r] * n_mult <= i) return;
while (l + 1 < r) {
uint32_t c = (l + r) >> 1;
IdxT o = in_offsets[c] * n_mult;
if (o <= i) {
l = c;
in_offset = o;
} else {
r = c;
}
}
// copy the data
out_data[out_offsets[l] * n_mult - in_offset + i] = in_data[i];
}
/**
* Copy chunks of data from one array to another at given offsets.
*
* @tparam T element type
* @tparam IdxT index type
*
* @param[in] in_offsets
* @param[in] out_offsets
* @param n_blocks size of the offset arrays minus one.
* @param[in] in_data
* @param[out] out_data
* @param n_mult constant multiplier for offset values (such as e.g. `dim`)
* @param stream
*/
template <typename T, typename IdxT>
void block_copy(const IdxT* in_offsets,
const IdxT* out_offsets,
IdxT n_blocks,
const T* in_data,
T* out_data,
IdxT n_mult,
rmm::cuda_stream_view stream)
{
IdxT in_size;
update_host(&in_size, in_offsets + n_blocks, 1, stream);
stream.synchronize();
dim3 threads(128, 1, 1);
dim3 blocks(raft::ceildiv<IdxT>(in_size * n_mult, threads.x), 1, 1);
block_copy_kernel<<<blocks, threads, 0, stream>>>(
in_offsets, out_offsets, n_blocks, in_data, out_data, n_mult);
}
/**
* @brief Fill matrix `c` with all combinations of sums of vectors `a` and `b`.
*
* NB: device-only function
*
* @tparam T element type
* @tparam IdxT index type
*
* @param[in] a device pointer to a vector [len_a]
* @param len_a number of elements in `a`
* @param[in] b device pointer to a vector [len_b]
* @param len_b number of elements in `b`
* @param[out] c row-major matrix [len_a, len_b]
* @param stream
*/
template <typename T, typename IdxT>
void outer_add(const T* a, IdxT len_a, const T* b, IdxT len_b, T* c, rmm::cuda_stream_view stream)
{
dim3 threads(128, 1, 1);
dim3 blocks(raft::ceildiv<IdxT>(len_a * len_b, threads.x), 1, 1);
outer_add_kernel<<<blocks, threads, 0, stream>>>(a, len_a, b, len_b, c);
}
template <typename T, typename S, typename IdxT, typename LabelT>
RAFT_KERNEL copy_selected_kernel(
IdxT n_rows, IdxT n_cols, const S* src, const LabelT* row_ids, IdxT ld_src, T* dst, IdxT ld_dst)
{
IdxT gid = threadIdx.x + blockDim.x * static_cast<IdxT>(blockIdx.x);
IdxT j = gid % n_cols;
IdxT i_dst = gid / n_cols;
if (i_dst >= n_rows) return;
auto i_src = static_cast<IdxT>(row_ids[i_dst]);
dst[ld_dst * i_dst + j] = mapping<T>{}(src[ld_src * i_src + j]);
}
/**
* @brief Copy selected rows of a matrix while mapping the data from the source to the target
* type.
*
* @tparam T target type
* @tparam S source type
* @tparam IdxT index type
* @tparam LabelT label type
*
* @param n_rows
* @param n_cols
* @param[in] src input matrix [..., ld_src]
* @param[in] row_ids selection of rows to be copied [n_rows]
* @param ld_src number of cols in the input (ld_src >= n_cols)
* @param[out] dst output matrix [n_rows, ld_dst]
* @param ld_dst number of cols in the output (ld_dst >= n_cols)
* @param stream
*/
template <typename T, typename S, typename IdxT, typename LabelT>
void copy_selected(IdxT n_rows,
IdxT n_cols,
const S* src,
const LabelT* row_ids,
IdxT ld_src,
T* dst,
IdxT ld_dst,
rmm::cuda_stream_view stream)
{
switch (check_pointer_residency(src, dst, row_ids)) {
case pointer_residency::host_and_device:
case pointer_residency::device_only: {
IdxT block_dim = 128;
IdxT grid_dim = raft::ceildiv(n_rows * n_cols, block_dim);
copy_selected_kernel<T, S>
<<<grid_dim, block_dim, 0, stream>>>(n_rows, n_cols, src, row_ids, ld_src, dst, ld_dst);
} break;
case pointer_residency::host_only: {
stream.synchronize();
for (IdxT i_dst = 0; i_dst < n_rows; i_dst++) {
auto i_src = static_cast<IdxT>(row_ids[i_dst]);
for (IdxT j = 0; j < n_cols; j++) {
dst[ld_dst * i_dst + j] = mapping<T>{}(src[ld_src * i_src + j]);
}
}
stream.synchronize();
} break;
default: RAFT_FAIL("All pointers must reside on the same side, host or device.");
}
}
/**
* A batch input iterator over the data source.
* Given an input pointer, it decides whether the current device has the access to the data and
* gives it back to the user in batches. Three scenarios are possible:
*
* 1. if `source == nullptr`: then `batch.data() == nullptr`
* 2. if `source` is accessible from the device, `batch.data()` points directly at the source at
* the proper offsets on each iteration.
* 3. if `source` is not accessible from the device, `batch.data()` points to an intermediate
* buffer; the corresponding data is copied in the given `stream` on every iterator dereference
* (i.e. batches can be skipped). Dereferencing the same batch two times in a row does not force
* the copy.
*
* In all three scenarios, the number of iterations, batch offsets and sizes are the same.
*
* The iterator can be reused. If the number of iterations is one, at most one copy will ever be
* invoked (i.e. small datasets are not reloaded multiple times).
*/
template <typename T>
struct batch_load_iterator {
using size_type = size_t;
/** A single batch of data residing in device memory. */
struct batch {
/** Logical width of a single row in a batch, in elements of type `T`. */
[[nodiscard]] auto row_width() const -> size_type { return row_width_; }
/** Logical offset of the batch, in rows (`row_width()`) */
[[nodiscard]] auto offset() const -> size_type { return pos_.value_or(0) * batch_size_; }
/** Logical size of the batch, in rows (`row_width()`) */
[[nodiscard]] auto size() const -> size_type { return batch_len_; }
/** Logical size of the batch, in rows (`row_width()`) */
[[nodiscard]] auto data() const -> const T* { return const_cast<const T*>(dev_ptr_); }
/** Whether this batch copies the data (i.e. the source is inaccessible from the device). */
[[nodiscard]] auto does_copy() const -> bool { return needs_copy_; }
private:
batch(const T* source,
size_type n_rows,
size_type row_width,
size_type batch_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: stream_(stream),
buf_(0, stream, mr),
source_(source),
dev_ptr_(nullptr),
n_rows_(n_rows),
row_width_(row_width),
batch_size_(std::min(batch_size, n_rows)),
pos_(std::nullopt),
n_iters_(raft::div_rounding_up_safe(n_rows, batch_size)),
needs_copy_(false)
{
if (source_ == nullptr) { return; }
cudaPointerAttributes attr;
RAFT_CUDA_TRY(cudaPointerGetAttributes(&attr, source_));
dev_ptr_ = reinterpret_cast<T*>(attr.devicePointer);
if (dev_ptr_ == nullptr) {
buf_.resize(row_width_ * batch_size_, stream);
dev_ptr_ = buf_.data();
needs_copy_ = true;
}
}
rmm::cuda_stream_view stream_;
rmm::device_uvector<T> buf_;
const T* source_;
size_type n_rows_;
size_type row_width_;
size_type batch_size_;
size_type n_iters_;
bool needs_copy_;
std::optional<size_type> pos_;
size_type batch_len_;
T* dev_ptr_;
friend class batch_load_iterator<T>;
/**
* Changes the state of the batch to point at the `pos` index.
* If necessary, copies the data from the source in the registered stream.
*/
void load(const size_type& pos)
{
// No-op if the data is already loaded, or it's the end of the input.
if (pos == pos_ || pos >= n_iters_) { return; }
pos_.emplace(pos);
batch_len_ = std::min(batch_size_, n_rows_ - std::min(offset(), n_rows_));
if (source_ == nullptr) { return; }
if (needs_copy_) {
if (size() > 0) {
RAFT_LOG_TRACE("batch_load_iterator::copy(offset = %zu, size = %zu, row_width = %zu)",
size_t(offset()),
size_t(size()),
size_t(row_width()));
copy(dev_ptr_, source_ + offset() * row_width(), size() * row_width(), stream_);
}
} else {
dev_ptr_ = const_cast<T*>(source_) + offset() * row_width();
}
}
};
using value_type = batch;
using reference = const value_type&;
using pointer = const value_type*;
/**
* Create a batch iterator over the data `source`.
*
* For convenience, the data `source` is read in logical units of size `row_width`; batch sizes
* and offsets are calculated in logical rows. Hence, can interpret the data as a contiguous
* row-major matrix of size [n_rows, row_width], and the batches are the sub-matrices of size
* [x<=batch_size, n_rows].
*
* @param source the input data -- host, device, or nullptr.
* @param n_rows the size of the input in logical rows.
* @param row_width the size of the logical row in the elements of type `T`.
* @param batch_size the desired size of the batch.
* @param stream the ordering for the host->device copies, if applicable.
* @param mr a custom memory resource for the intermediate buffer, if applicable.
*/
batch_load_iterator(const T* source,
size_type n_rows,
size_type row_width,
size_type batch_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
: cur_batch_(new batch(source, n_rows, row_width, batch_size, stream, mr)), cur_pos_(0)
{
}
/**
* Whether this iterator copies the data on every iteration
* (i.e. the source is inaccessible from the device).
*/
[[nodiscard]] auto does_copy() const -> bool { return cur_batch_->does_copy(); }
/** Reset the iterator position to `begin()` */
void reset() { cur_pos_ = 0; }
/** Reset the iterator position to `end()` */
void reset_to_end() { cur_pos_ = cur_batch_->n_iters_; }
[[nodiscard]] auto begin() const -> const batch_load_iterator<T>
{
batch_load_iterator<T> x(*this);
x.reset();
return x;
}
[[nodiscard]] auto end() const -> const batch_load_iterator<T>
{
batch_load_iterator<T> x(*this);
x.reset_to_end();
return x;
}
[[nodiscard]] auto operator*() const -> reference
{
cur_batch_->load(cur_pos_);
return *cur_batch_;
}
[[nodiscard]] auto operator->() const -> pointer
{
cur_batch_->load(cur_pos_);
return cur_batch_.get();
}
friend auto operator==(const batch_load_iterator<T>& x, const batch_load_iterator<T>& y) -> bool
{
return x.cur_batch_ == y.cur_batch_ && x.cur_pos_ == y.cur_pos_;
};
friend auto operator!=(const batch_load_iterator<T>& x, const batch_load_iterator<T>& y) -> bool
{
return x.cur_batch_ != y.cur_batch_ || x.cur_pos_ != y.cur_pos_;
};
auto operator++() -> batch_load_iterator<T>&
{
++cur_pos_;
return *this;
}
auto operator++(int) -> batch_load_iterator<T>
{
batch_load_iterator<T> x(*this);
++cur_pos_;
return x;
}
auto operator--() -> batch_load_iterator<T>&
{
--cur_pos_;
return *this;
}
auto operator--(int) -> batch_load_iterator<T>
{
batch_load_iterator<T> x(*this);
--cur_pos_;
return x;
}
private:
std::shared_ptr<value_type> cur_batch_;
size_type cur_pos_;
};
} // namespace cuvs::spatial::knn::detail::utils
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/ball_cover.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include "../ball_cover_types.hpp"
#include "ball_cover/common.cuh"
#include "ball_cover/registers.cuh"
#include "haversine_distance.cuh"
#include <cstdint>
#include <limits.h>
#include <raft/util/cuda_utils.cuh>
#include <cuvs/neighbors/detail/faiss_select/key_value_block_select.cuh>
#include <cuvs/neighbors/brute_force.cuh>
#include <raft/matrix/copy.cuh>
#include <raft/random/rng.cuh>
#include <raft/sparse/convert/csr.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
namespace cuvs {
namespace spatial {
namespace knn {
namespace detail {
/**
* Given a set of points in row-major order which are to be
* used as a set of index points, uniformly samples a subset
* of points to be used as landmarks.
* @tparam value_idx
* @tparam value_t
* @param handle
* @param index
*/
template <typename value_idx, typename value_t, typename value_int = std::uint32_t>
void sample_landmarks(raft::resources const& handle,
BallCoverIndex<value_idx, value_t, value_int>& index)
{
rmm::device_uvector<value_idx> R_1nn_cols2(index.n_landmarks, resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> R_1nn_ones(index.m, resource::get_cuda_stream(handle));
rmm::device_uvector<value_idx> R_indices(index.n_landmarks, resource::get_cuda_stream(handle));
thrust::sequence(raft::resource::get_thrust_policy(handle),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_cols().data_handle() + index.m,
(value_idx)0);
thrust::fill(raft::resource::get_thrust_policy(handle),
R_1nn_ones.data(),
R_1nn_ones.data() + R_1nn_ones.size(),
1.0);
thrust::fill(raft::resource::get_thrust_policy(handle),
R_indices.data(),
R_indices.data() + R_indices.size(),
0.0);
/**
* 1. Randomly sample sqrt(n) points from X
*/
raft::random::RngState rng_state(12345);
raft::random::sampleWithoutReplacement(handle,
rng_state,
R_indices.data(),
R_1nn_cols2.data(),
index.get_R_1nn_cols().data_handle(),
R_1nn_ones.data(),
(value_idx)index.n_landmarks,
(value_idx)index.m);
// index.get_X() returns the wrong indextype (uint32_t where we need value_idx), so need to
// create new device_matrix_view here
auto x = index.get_X();
auto r = index.get_R();
raft::matrix::copy_rows<value_t, value_idx>(
handle,
make_device_matrix_view<const value_t, value_idx>(x.data_handle(), x.extent(0), x.extent(1)),
make_device_matrix_view<value_t, value_idx>(r.data_handle(), r.extent(0), r.extent(1)),
make_device_vector_view(R_1nn_cols2.data(), index.n_landmarks));
}
/**
* Constructs a 1-nn index mapping each landmark to their closest points.
* @tparam value_idx
* @tparam value_t
* @param handle
* @param R_knn_inds_ptr
* @param R_knn_dists_ptr
* @param k
* @param index
*/
template <typename value_idx, typename value_t, typename value_int = std::uint32_t>
void construct_landmark_1nn(raft::resources const& handle,
const value_idx* R_knn_inds_ptr,
const value_t* R_knn_dists_ptr,
value_int k,
BallCoverIndex<value_idx, value_t, value_int>& index)
{
rmm::device_uvector<value_idx> R_1nn_inds(index.m, resource::get_cuda_stream(handle));
thrust::fill(raft::resource::get_thrust_policy(handle),
R_1nn_inds.data(),
R_1nn_inds.data() + index.m,
std::numeric_limits<value_idx>::max());
value_idx* R_1nn_inds_ptr = R_1nn_inds.data();
value_t* R_1nn_dists_ptr = index.get_R_1nn_dists().data_handle();
auto idxs = thrust::make_counting_iterator<value_idx>(0);
thrust::for_each(
resource::get_thrust_policy(handle), idxs, idxs + index.m, [=] __device__(value_idx i) {
R_1nn_inds_ptr[i] = R_knn_inds_ptr[i * k];
R_1nn_dists_ptr[i] = R_knn_dists_ptr[i * k];
});
auto keys = thrust::make_zip_iterator(
thrust::make_tuple(R_1nn_inds.data(), index.get_R_1nn_dists().data_handle()));
// group neighborhoods for each reference landmark and sort each group by distance
thrust::sort_by_key(raft::resource::get_thrust_policy(handle),
keys,
keys + index.m,
index.get_R_1nn_cols().data_handle(),
NNComp());
// convert to CSR for fast lookup
raft::sparse::convert::sorted_coo_to_csr(R_1nn_inds.data(),
index.m,
index.get_R_indptr().data_handle(),
index.n_landmarks + 1,
resource::get_cuda_stream(handle));
}
/**
* Computes the k closest landmarks to a set of query points.
* @tparam value_idx
* @tparam value_t
* @tparam value_int
* @param handle
* @param index
* @param query_pts
* @param n_query_pts
* @param k
* @param R_knn_inds
* @param R_knn_dists
*/
template <typename value_idx, typename value_t, typename value_int = std::uint32_t>
void k_closest_landmarks(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query_pts,
value_int n_query_pts,
value_int k,
value_idx* R_knn_inds,
value_t* R_knn_dists)
{
std::vector<raft::device_matrix_view<const value_t, value_int>> inputs = {index.get_R()};
cuvs::neighbors::brute_force::knn<value_idx, value_t, value_int>(
handle,
inputs,
make_device_matrix_view(query_pts, n_query_pts, inputs[0].extent(1)),
make_device_matrix_view(R_knn_inds, n_query_pts, k),
make_device_matrix_view(R_knn_dists, n_query_pts, k),
index.get_metric());
}
/**
* Uses the sorted data points in the 1-nn landmark index to compute
* an array of radii for each landmark.
* @tparam value_idx
* @tparam value_t
* @param handle
* @param index
*/
template <typename value_idx, typename value_t, typename value_int = std::uint32_t>
void compute_landmark_radii(raft::resources const& handle,
BallCoverIndex<value_idx, value_t, value_int>& index)
{
auto entries = thrust::make_counting_iterator<value_idx>(0);
const value_idx* R_indptr_ptr = index.get_R_indptr().data_handle();
const value_t* R_1nn_dists_ptr = index.get_R_1nn_dists().data_handle();
value_t* R_radius_ptr = index.get_R_radius().data_handle();
thrust::for_each(raft::resource::get_thrust_policy(handle),
entries,
entries + index.n_landmarks,
[=] __device__(value_idx input) {
value_idx last_row_idx = R_indptr_ptr[input + 1] - 1;
R_radius_ptr[input] = R_1nn_dists_ptr[last_row_idx];
});
}
/**
* 4. Perform k-select over original KNN, using L_r to filter distances
*
* a. Map 1 row to each warp/block
* b. Add closest k R points to heap
* c. Iterate through batches of R, having each thread in the warp load a set
* of distances y from R (only if d(q, r) < 3 * distance to closest r) and
* marking the distance to be computed between x, y only
* if knn[k].distance >= d(x_i, R_k) + d(R_k, y)
*/
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
typename dist_func>
void perform_rbc_query(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query,
value_int n_query_pts,
std::uint32_t k,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
dist_func dfunc,
value_idx* inds,
value_t* dists,
value_int* dists_counter,
value_int* post_dists_counter,
float weight = 1.0,
bool perform_post_filtering = true)
{
// initialize output inds and dists
thrust::fill(raft::resource::get_thrust_policy(handle),
inds,
inds + (k * n_query_pts),
std::numeric_limits<value_idx>::max());
thrust::fill(raft::resource::get_thrust_policy(handle),
dists,
dists + (k * n_query_pts),
std::numeric_limits<value_t>::max());
if (index.n == 2) {
// Compute nearest k for each neighborhood in each closest R
rbc_low_dim_pass_one<value_idx, value_t, value_int, 2>(handle,
index,
query,
n_query_pts,
k,
R_knn_inds,
R_knn_dists,
dfunc,
inds,
dists,
weight,
dists_counter);
if (perform_post_filtering) {
rbc_low_dim_pass_two<value_idx, value_t, value_int, 2>(handle,
index,
query,
n_query_pts,
k,
R_knn_inds,
R_knn_dists,
dfunc,
inds,
dists,
weight,
post_dists_counter);
}
} else if (index.n == 3) {
// Compute nearest k for each neighborhood in each closest R
rbc_low_dim_pass_one<value_idx, value_t, value_int, 3>(handle,
index,
query,
n_query_pts,
k,
R_knn_inds,
R_knn_dists,
dfunc,
inds,
dists,
weight,
dists_counter);
if (perform_post_filtering) {
rbc_low_dim_pass_two<value_idx, value_t, value_int, 3>(handle,
index,
query,
n_query_pts,
k,
R_knn_inds,
R_knn_dists,
dfunc,
inds,
dists,
weight,
post_dists_counter);
}
}
}
/**
* Similar to a ball tree, the random ball cover algorithm
* uses the triangle inequality to prune distance computations
* in any metric space with a guarantee of sqrt(n) * c^{3/2}
* where `c` is an expansion constant based on the distance
* metric.
*
* This function variant performs an all nearest neighbors
* query which is useful for algorithms that need to perform
* A * A.T.
*/
template <typename value_idx = std::int64_t,
typename value_t,
typename value_int = std::uint32_t,
typename distance_func>
void rbc_build_index(raft::resources const& handle,
BallCoverIndex<value_idx, value_t, value_int>& index,
distance_func dfunc)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
ASSERT(!index.is_index_trained(), "index cannot be previously trained");
rmm::device_uvector<value_idx> R_knn_inds(index.m, resource::get_cuda_stream(handle));
// Initialize the uvectors
thrust::fill(raft::resource::get_thrust_policy(handle),
R_knn_inds.begin(),
R_knn_inds.end(),
std::numeric_limits<value_idx>::max());
thrust::fill(raft::resource::get_thrust_policy(handle),
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_closest_landmark_dists().data_handle() + index.m,
std::numeric_limits<value_t>::max());
/**
* 1. Randomly sample sqrt(n) points from X
*/
sample_landmarks<value_idx, value_t>(handle, index);
/**
* 2. Perform knn = bfknn(X, R, k)
*/
value_int k = 1;
k_closest_landmarks(handle,
index,
index.get_X().data_handle(),
index.m,
k,
R_knn_inds.data(),
index.get_R_closest_landmark_dists().data_handle());
/**
* 3. Create L_r = knn[:,0].T (CSR)
*
* Slice closest neighboring R
* Secondary sort by (R_knn_inds, R_knn_dists)
*/
construct_landmark_1nn(
handle, R_knn_inds.data(), index.get_R_closest_landmark_dists().data_handle(), k, index);
/**
* Compute radius of each R for filtering: p(q, r) <= p(q, q_r) + radius(r)
* (need to take the
*/
compute_landmark_radii(handle, index);
}
/**
* Performs an all neighbors knn query (e.g. index == query)
*/
template <typename value_idx = std::int64_t,
typename value_t,
typename value_int = std::uint32_t,
typename distance_func>
void rbc_all_knn_query(raft::resources const& handle,
BallCoverIndex<value_idx, value_t, value_int>& index,
value_int k,
value_idx* inds,
value_t* dists,
distance_func dfunc,
// approximate nn options
bool perform_post_filtering = true,
float weight = 1.0)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
ASSERT(index.n_landmarks >= k, "number of landmark samples must be >= k");
ASSERT(!index.is_index_trained(), "index cannot be previously trained");
rmm::device_uvector<value_idx> R_knn_inds(k * index.m, raft::resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> R_knn_dists(k * index.m, raft::resource::get_cuda_stream(handle));
// Initialize the uvectors
thrust::fill(raft::resource::get_thrust_policy(handle),
R_knn_inds.begin(),
R_knn_inds.end(),
std::numeric_limits<value_idx>::max());
thrust::fill(raft::resource::get_thrust_policy(handle),
R_knn_dists.begin(),
R_knn_dists.end(),
std::numeric_limits<value_t>::max());
thrust::fill(raft::resource::get_thrust_policy(handle),
inds,
inds + (k * index.m),
std::numeric_limits<value_idx>::max());
thrust::fill(raft::resource::get_thrust_policy(handle),
dists,
dists + (k * index.m),
std::numeric_limits<value_t>::max());
// For debugging / verification. Remove before releasing
rmm::device_uvector<value_int> dists_counter(index.m, raft::resource::get_cuda_stream(handle));
rmm::device_uvector<value_int> post_dists_counter(index.m,
raft::resource::get_cuda_stream(handle));
sample_landmarks<value_idx, value_t>(handle, index);
k_closest_landmarks(
handle, index, index.get_X().data_handle(), index.m, k, R_knn_inds.data(), R_knn_dists.data());
construct_landmark_1nn(handle, R_knn_inds.data(), R_knn_dists.data(), k, index);
compute_landmark_radii(handle, index);
perform_rbc_query(handle,
index,
index.get_X().data_handle(),
index.m,
k,
R_knn_inds.data(),
R_knn_dists.data(),
dfunc,
inds,
dists,
dists_counter.data(),
post_dists_counter.data(),
weight,
perform_post_filtering);
}
/**
* Performs a knn query against an index. This assumes the index has
* already been built.
*/
template <typename value_idx = std::int64_t,
typename value_t,
typename value_int = std::uint32_t,
typename distance_func>
void rbc_knn_query(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
value_int k,
const value_t* query,
value_int n_query_pts,
value_idx* inds,
value_t* dists,
distance_func dfunc,
// approximate nn options
bool perform_post_filtering = true,
float weight = 1.0)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
ASSERT(index.n_landmarks >= k, "number of landmark samples must be >= k");
ASSERT(index.is_index_trained(), "index must be previously trained");
rmm::device_uvector<value_idx> R_knn_inds(k * n_query_pts,
raft::resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> R_knn_dists(k * n_query_pts,
raft::resource::get_cuda_stream(handle));
// Initialize the uvectors
thrust::fill(raft::resource::get_thrust_policy(handle),
R_knn_inds.begin(),
R_knn_inds.end(),
std::numeric_limits<value_idx>::max());
thrust::fill(raft::resource::get_thrust_policy(handle),
R_knn_dists.begin(),
R_knn_dists.end(),
std::numeric_limits<value_t>::max());
thrust::fill(raft::resource::get_thrust_policy(handle),
inds,
inds + (k * n_query_pts),
std::numeric_limits<value_idx>::max());
thrust::fill(raft::resource::get_thrust_policy(handle),
dists,
dists + (k * n_query_pts),
std::numeric_limits<value_t>::max());
k_closest_landmarks(handle, index, query, n_query_pts, k, R_knn_inds.data(), R_knn_dists.data());
// For debugging / verification. Remove before releasing
rmm::device_uvector<value_int> dists_counter(index.m, raft::resource::get_cuda_stream(handle));
rmm::device_uvector<value_int> post_dists_counter(index.m,
raft::resource::get_cuda_stream(handle));
thrust::fill(raft::resource::get_thrust_policy(handle),
post_dists_counter.data(),
post_dists_counter.data() + post_dists_counter.size(),
0);
thrust::fill(raft::resource::get_thrust_policy(handle),
dists_counter.data(),
dists_counter.data() + dists_counter.size(),
0);
perform_rbc_query(handle,
index,
query,
n_query_pts,
k,
R_knn_inds.data(),
R_knn_dists.data(),
dfunc,
inds,
dists,
dists_counter.data(),
post_dists_counter.data(),
weight,
perform_post_filtering);
}
}; // namespace detail
}; // namespace knn
}; // namespace spatial
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/ann_quantized.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../ann_common.h"
#include "../ivf_flat.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include "processing.cuh"
#include <raft/core/operators.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <cuvs/distance/distance.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/neighbors/ivf_pq.cuh>
#include <raft/label/classlabels.cuh>
#include <raft/core/device_mdspan.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/transform_iterator.h>
namespace cuvs::spatial::knn::detail {
template <typename T = float, typename IntType = int>
void approx_knn_build_index(raft::resources const& handle,
knnIndex* index,
knnIndexParam* params,
cuvs::distance::DistanceType metric,
float metricArg,
T* index_array,
IntType n,
IntType D)
{
auto stream = resource::get_cuda_stream(handle);
index->metric = metric;
index->metricArg = metricArg;
if (dynamic_cast<const IVFParam*>(params)) {
index->nprobe = dynamic_cast<const IVFParam*>(params)->nprobe;
}
auto ivf_ft_pams = dynamic_cast<IVFFlatParam*>(params);
auto ivf_pq_pams = dynamic_cast<IVFPQParam*>(params);
if constexpr (std::is_same_v<T, float>) {
index->metric_processor = create_processor<float>(metric, n, D, 0, false, stream);
// For cosine/correlation distance, the metric processor translates distance
// to inner product via pre/post processing - pass the translated metric to
// ANN index
if (metric == cuvs::distance::DistanceType::CosineExpanded ||
metric == cuvs::distance::DistanceType::CorrelationExpanded) {
metric = index->metric = cuvs::distance::DistanceType::InnerProduct;
}
}
if constexpr (std::is_same_v<T, float>) { index->metric_processor->preprocess(index_array); }
if (ivf_ft_pams) {
auto new_params = from_legacy_index_params(*ivf_ft_pams, metric, metricArg);
index->ivf_flat<T, int64_t>() = std::make_unique<const ivf_flat::index<T, int64_t>>(
ivf_flat::build(handle, new_params, index_array, int64_t(n), D));
} else if (ivf_pq_pams) {
neighbors::ivf_pq::index_params params;
params.metric = metric;
params.metric_arg = metricArg;
params.n_lists = ivf_pq_pams->nlist;
params.pq_bits = ivf_pq_pams->n_bits;
params.pq_dim = ivf_pq_pams->M;
// TODO: handle ivf_pq_pams.usePrecomputedTables ?
auto index_view = raft::make_device_matrix_view<const T, int64_t>(index_array, n, D);
index->ivf_pq = std::make_unique<const neighbors::ivf_pq::index<int64_t>>(
neighbors::ivf_pq::build(handle, params, index_view));
} else {
RAFT_FAIL("Unrecognized index type.");
}
if constexpr (std::is_same_v<T, float>) { index->metric_processor->revert(index_array); }
}
template <typename T = float, typename IntType = int>
void approx_knn_search(raft::resources const& handle,
float* distances,
int64_t* indices,
knnIndex* index,
IntType k,
T* query_array,
IntType n)
{
if constexpr (std::is_same_v<T, float>) {
index->metric_processor->preprocess(query_array);
index->metric_processor->set_num_queries(k);
}
// search
if (index->ivf_flat<T, int64_t>()) {
ivf_flat::search_params params;
params.n_probes = index->nprobe;
ivf_flat::search(
handle, params, *(index->ivf_flat<T, int64_t>()), query_array, n, k, indices, distances);
} else if (index->ivf_pq) {
neighbors::ivf_pq::search_params params;
params.n_probes = index->nprobe;
auto query_view =
raft::make_device_matrix_view<const T, uint32_t>(query_array, n, index->ivf_pq->dim());
auto indices_view = raft::make_device_matrix_view<int64_t, uint32_t>(indices, n, k);
auto distances_view = raft::make_device_matrix_view<float, uint32_t>(distances, n, k);
neighbors::ivf_pq::search(
handle, params, *index->ivf_pq, query_view, indices_view, distances_view);
} else {
RAFT_FAIL("The model is not trained");
}
// revert changes to the query
if constexpr (std::is_same_v<T, float>) { index->metric_processor->revert(query_array); }
// perform post-processing to show the real distances
if (index->metric == cuvs::distance::DistanceType::L2SqrtExpanded ||
index->metric == cuvs::distance::DistanceType::L2SqrtUnexpanded ||
index->metric == cuvs::distance::DistanceType::LpUnexpanded) {
/**
* post-processing
*/
float p = 0.5; // standard l2
if (index->metric == cuvs::distance::DistanceType::LpUnexpanded) p = 1.0 / index->metricArg;
raft::linalg::unaryOp<float>(
distances, distances, n * k, raft::pow_const_op<float>(p), resource::get_cuda_stream(handle));
}
if constexpr (std::is_same_v<T, float>) { index->metric_processor->postprocess(distances); }
}
} // namespace cuvs::spatial::knn::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/processing.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "processing.hpp"
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/operators.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/stats/mean.cuh>
#include <raft/stats/mean_center.cuh>
#include <rmm/device_uvector.hpp>
namespace cuvs {
namespace spatial {
namespace knn {
template <typename math_t>
class CosineMetricProcessor : public MetricProcessor<math_t> {
protected:
int k_;
bool row_major_;
size_t n_rows_;
size_t n_cols_;
cudaStream_t stream_;
rmm::device_uvector<math_t> colsums_;
public:
CosineMetricProcessor(size_t n_rows, size_t n_cols, int k, bool row_major, cudaStream_t stream)
: stream_(stream),
colsums_(n_rows, stream),
n_cols_(n_cols),
n_rows_(n_rows),
row_major_(row_major),
k_(k)
{
}
void preprocess(math_t* data)
{
raft::linalg::rowNorm(colsums_.data(),
data,
n_cols_,
n_rows_,
raft::linalg::NormType::L2Norm,
row_major_,
stream_,
raft::sqrt_op{});
raft::linalg::matrixVectorOp(
data, data, colsums_.data(), n_cols_, n_rows_, row_major_, false, raft::div_op{}, stream_);
}
void revert(math_t* data)
{
raft::linalg::matrixVectorOp(
data, data, colsums_.data(), n_cols_, n_rows_, row_major_, false, raft::mul_op{}, stream_);
}
void postprocess(math_t* data)
{
raft::linalg::unaryOp(
data, data, k_ * n_rows_, [] __device__(math_t in) { return 1 - in; }, stream_);
}
void set_num_queries(int k) override { k_ = k; }
~CosineMetricProcessor() = default;
};
template <typename math_t>
class CorrelationMetricProcessor : public CosineMetricProcessor<math_t> {
using cosine = CosineMetricProcessor<math_t>;
public:
CorrelationMetricProcessor(
size_t n_rows, size_t n_cols, int k, bool row_major, cudaStream_t stream)
: CosineMetricProcessor<math_t>(n_rows, n_cols, k, row_major, stream), means_(n_rows, stream)
{
}
void preprocess(math_t* data)
{
math_t normalizer_const = 1.0 / (math_t)cosine::n_cols_;
raft::linalg::reduce(means_.data(),
data,
cosine::n_cols_,
cosine::n_rows_,
(math_t)0.0,
cosine::row_major_,
true,
cosine::stream_);
raft::linalg::unaryOp(means_.data(),
means_.data(),
cosine::n_rows_,
raft::mul_const_op<math_t>(normalizer_const),
cosine::stream_);
raft::stats::meanCenter(data,
data,
means_.data(),
cosine::n_cols_,
cosine::n_rows_,
cosine::row_major_,
false,
cosine::stream_);
CosineMetricProcessor<math_t>::preprocess(data);
}
void revert(math_t* data)
{
CosineMetricProcessor<math_t>::revert(data);
raft::stats::meanAdd(data,
data,
means_.data(),
cosine::n_cols_,
cosine::n_rows_,
cosine::row_major_,
false,
cosine::stream_);
}
void postprocess(math_t* data) { CosineMetricProcessor<math_t>::postprocess(data); }
~CorrelationMetricProcessor() = default;
rmm::device_uvector<math_t> means_;
};
template <typename math_t>
class DefaultMetricProcessor : public MetricProcessor<math_t> {
public:
void preprocess(math_t* data) {}
void revert(math_t* data) {}
void postprocess(math_t* data) {}
~DefaultMetricProcessor() = default;
};
template <typename math_t>
inline std::unique_ptr<MetricProcessor<math_t>> create_processor(
distance::DistanceType metric, int n, int D, int k, bool rowMajorQuery, cudaStream_t userStream)
{
MetricProcessor<math_t>* mp = nullptr;
switch (metric) {
case distance::DistanceType::CosineExpanded:
mp = new CosineMetricProcessor<math_t>(n, D, k, rowMajorQuery, userStream);
break;
case distance::DistanceType::CorrelationExpanded:
mp = new CorrelationMetricProcessor<math_t>(n, D, k, rowMajorQuery, userStream);
break;
default: mp = new DefaultMetricProcessor<math_t>();
}
return std::unique_ptr<MetricProcessor<math_t>>(mp);
}
// Currently only being used by floats
template class MetricProcessor<float>;
template class CosineMetricProcessor<float>;
template class CorrelationMetricProcessor<float>;
template class DefaultMetricProcessor<float>;
} // namespace knn
} // namespace spatial
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/ball_cover/registers-ext.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../../ball_cover_types.hpp" // BallCoverIndex
#include "registers_types.cuh" // DistFunc
#include <cstdint> // uint32_t
#include <raft/util/raft_explicit.hpp> //RAFT_EXPLICIT
#if defined(RAFT_EXPLICIT_INSTANTIATE_ONLY)
namespace cuvs::spatial::knn::detail {
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
int dims = 2,
typename dist_func>
void rbc_low_dim_pass_one(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query,
const value_int n_query_rows,
value_int k,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
dist_func& dfunc,
value_idx* inds,
value_t* dists,
float weight,
value_int* dists_counter) RAFT_EXPLICIT;
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
int dims = 2,
typename dist_func>
void rbc_low_dim_pass_two(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query,
const value_int n_query_rows,
value_int k,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
dist_func& dfunc,
value_idx* inds,
value_t* dists,
float weight,
value_int* post_dists_counter) RAFT_EXPLICIT;
}; // namespace cuvs::spatial::knn::detail
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
extern template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_one<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
extern template void \
cuvs::spatial::knn::detail::rbc_low_dim_pass_two<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::HaversineFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::HaversineFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::EuclideanFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::EuclideanFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::DistFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::DistFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::HaversineFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::HaversineFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::EuclideanFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::EuclideanFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 2, cuvs::spatial::knn::detail::DistFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 3, cuvs::spatial::knn::detail::DistFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/ball_cover/registers.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "registers-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "registers-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/ball_cover/registers-inl.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "common.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include "../../ball_cover_types.hpp"
#include "../haversine_distance.cuh"
#include "registers_types.cuh" // DistFunc
#include <cstdint>
#include <limits.h>
#include <cuvs/neighbors/detail/faiss_select/key_value_block_select.cuh>
#include <raft/util/cuda_utils.cuh>
#include <thrust/fill.h>
namespace cuvs {
namespace spatial {
namespace knn {
namespace detail {
/**
* To find exact neighbors, we perform a post-processing stage
* that filters out those points which might have neighbors outside
* of their k closest landmarks. This is usually a very small portion
* of the total points.
* @tparam value_idx
* @tparam value_t
* @tparam value_int
* @tparam tpb
* @param X
* @param n_cols
* @param R_knn_inds
* @param R_knn_dists
* @param R_radius
* @param landmarks
* @param n_landmarks
* @param bitset_size
* @param k
* @param output
* @param weight
*/
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
int col_q = 2,
int tpb = 32,
typename distance_func>
RAFT_KERNEL perform_post_filter_registers(const value_t* X,
value_int n_cols,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
const value_t* R_radius,
const value_t* landmarks,
int n_landmarks,
value_int bitset_size,
value_int k,
distance_func dfunc,
std::uint32_t* output,
float weight = 1.0)
{
// allocate array of size n_landmarks / 32 ints
extern __shared__ std::uint32_t shared_mem[];
// Start with all bits on
for (value_int i = threadIdx.x; i < bitset_size; i += tpb) {
shared_mem[i] = 0xffffffff;
}
__syncthreads();
// TODO: Would it be faster to use L1 for this?
value_t local_x_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_x_ptr[j] = X[n_cols * blockIdx.x + j];
}
value_t closest_R_dist = R_knn_dists[blockIdx.x * k + (k - 1)];
// zero out bits for closest k landmarks
for (value_int j = threadIdx.x; j < k; j += tpb) {
_zero_bit(shared_mem, (std::uint32_t)R_knn_inds[blockIdx.x * k + j]);
}
__syncthreads();
// Discard any landmarks where p(q, r) > p(q, r_q) + radius(r)
// That is, the distance between the current point and the current
// landmark is > the distance between the current point and
// its closest landmark + the radius of the current landmark.
for (value_int l = threadIdx.x; l < n_landmarks; l += tpb) {
// compute p(q, r)
value_t dist = dfunc(local_x_ptr, landmarks + (n_cols * l), n_cols);
if (dist > weight * (closest_R_dist + R_radius[l]) || dist > 3 * closest_R_dist) {
_zero_bit(shared_mem, l);
}
}
__syncthreads();
/**
* Output bitset
*/
for (value_int l = threadIdx.x; l < bitset_size; l += tpb) {
output[blockIdx.x * bitset_size + l] = shared_mem[l];
}
}
/**
* @tparam value_idx
* @tparam value_t
* @tparam value_int
* @tparam bitset_type
* @tparam warp_q number of registers to use per warp
* @tparam thread_q number of registers to use within each thread
* @tparam tpb number of threads per block
* @param X
* @param n_cols
* @param bitset
* @param bitset_size
* @param R_knn_dists
* @param R_indptr
* @param R_1nn_inds
* @param R_1nn_dists
* @param knn_inds
* @param knn_dists
* @param n_landmarks
* @param k
* @param dist_counter
*/
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
typename bitset_type = std::uint32_t,
typename dist_func,
int warp_q = 32,
int thread_q = 2,
int tpb = 128,
int col_q = 2>
RAFT_KERNEL compute_final_dists_registers(const value_t* X_index,
const value_t* X,
const value_int n_cols,
bitset_type* bitset,
value_int bitset_size,
const value_t* R_closest_landmark_dists,
const value_idx* R_indptr,
const value_idx* R_1nn_inds,
const value_t* R_1nn_dists,
value_idx* knn_inds,
value_t* knn_dists,
value_int n_landmarks,
value_int k,
dist_func dfunc,
value_int* dist_counter)
{
static constexpr int kNumWarps = tpb / raft::WarpSize;
__shared__ value_t shared_memK[kNumWarps * warp_q];
__shared__ KeyValuePair<value_t, value_idx> shared_memV[kNumWarps * warp_q];
const value_t* x_ptr = X + (n_cols * blockIdx.x);
value_t local_x_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_x_ptr[j] = x_ptr[j];
}
using namespace cuvs::neighbors::detail::faiss_select;
KeyValueBlockSelect<value_t, value_idx, false, Comparator<value_t>, warp_q, thread_q, tpb> heap(
std::numeric_limits<value_t>::max(),
std::numeric_limits<value_t>::max(),
-1,
shared_memK,
shared_memV,
k);
const value_int n_k = raft::Pow2<WarpSize>::roundDown(k);
value_int i = threadIdx.x;
for (; i < n_k; i += tpb) {
value_idx ind = knn_inds[blockIdx.x * k + i];
heap.add(knn_dists[blockIdx.x * k + i], R_closest_landmark_dists[ind], ind);
}
if (i < k) {
value_idx ind = knn_inds[blockIdx.x * k + i];
heap.addThreadQ(knn_dists[blockIdx.x * k + i], R_closest_landmark_dists[ind], ind);
}
heap.checkThreadQ();
for (value_int cur_R_ind = 0; cur_R_ind < n_landmarks; ++cur_R_ind) {
// if cur R overlaps cur point's closest R, it could be a
// candidate
if (_get_val(bitset + (blockIdx.x * bitset_size), cur_R_ind)) {
value_idx R_start_offset = R_indptr[cur_R_ind];
value_idx R_stop_offset = R_indptr[cur_R_ind + 1];
value_idx R_size = R_stop_offset - R_start_offset;
// Loop through R's neighborhood in parallel
// Round R_size to the nearest warp threads so they can
// all be computing in parallel.
const value_int limit = raft::Pow2<WarpSize>::roundDown(R_size);
i = threadIdx.x;
for (; i < limit; i += tpb) {
value_idx cur_candidate_ind = R_1nn_inds[R_start_offset + i];
value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i];
value_t z = heap.warpKTopRDist == 0.00 ? 0.0
: (abs(heap.warpKTop - heap.warpKTopRDist) *
abs(heap.warpKTopRDist - cur_candidate_dist) -
heap.warpKTop * cur_candidate_dist) /
heap.warpKTopRDist;
z = isnan(z) || isinf(z) ? 0.0 : z;
// If lower bound on distance could possibly be in
// the closest k neighbors, compute it and add to k-select
value_t dist = std::numeric_limits<value_t>::max();
if (z <= heap.warpKTop) {
const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind);
value_t local_y_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_y_ptr[j] = y_ptr[j];
}
dist = dfunc(local_x_ptr, local_y_ptr, n_cols);
}
heap.add(dist, cur_candidate_dist, cur_candidate_ind);
}
// second round guarantees to be only a single warp.
if (i < R_size) {
value_idx cur_candidate_ind = R_1nn_inds[R_start_offset + i];
value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i];
value_t z = heap.warpKTopRDist == 0.00 ? 0.0
: (abs(heap.warpKTop - heap.warpKTopRDist) *
abs(heap.warpKTopRDist - cur_candidate_dist) -
heap.warpKTop * cur_candidate_dist) /
heap.warpKTopRDist;
z = isnan(z) || isinf(z) ? 0.0 : z;
// If lower bound on distance could possibly be in
// the closest k neighbors, compute it and add to k-select
value_t dist = std::numeric_limits<value_t>::max();
if (z <= heap.warpKTop) {
const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind);
value_t local_y_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_y_ptr[j] = y_ptr[j];
}
dist = dfunc(local_x_ptr, local_y_ptr, n_cols);
}
heap.addThreadQ(dist, cur_candidate_dist, cur_candidate_ind);
}
heap.checkThreadQ();
}
}
heap.reduce();
for (value_int i = threadIdx.x; i < k; i += tpb) {
knn_dists[blockIdx.x * k + i] = shared_memK[i];
knn_inds[blockIdx.x * k + i] = shared_memV[i].value;
}
}
/**
* Random ball cover kernel for n_dims == 2
* @tparam value_idx
* @tparam value_t
* @tparam warp_q
* @tparam thread_q
* @tparam tpb
* @tparam value_idx
* @tparam value_t
* @param R_knn_inds
* @param R_knn_dists
* @param m
* @param k
* @param R_indptr
* @param R_1nn_cols
* @param R_1nn_dists
*/
template <typename value_idx = std::int64_t,
typename value_t,
int warp_q = 32,
int thread_q = 2,
int tpb = 128,
int col_q = 2,
typename value_int = std::uint32_t,
typename distance_func>
RAFT_KERNEL block_rbc_kernel_registers(const value_t* X_index,
const value_t* X,
value_int n_cols, // n_cols should be 2 or 3 dims
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
value_int m,
value_int k,
const value_idx* R_indptr,
const value_idx* R_1nn_cols,
const value_t* R_1nn_dists,
value_idx* out_inds,
value_t* out_dists,
value_int* dist_counter,
const value_t* R_radius,
distance_func dfunc,
float weight = 1.0)
{
static constexpr value_int kNumWarps = tpb / raft::WarpSize;
__shared__ value_t shared_memK[kNumWarps * warp_q];
__shared__ KeyValuePair<value_t, value_idx> shared_memV[kNumWarps * warp_q];
// TODO: Separate kernels for different widths:
// 1. Very small (between 3 and 32) just use registers for columns of "blockIdx.x"
// 2. Can fit comfortably in shared memory (32 to a few thousand?)
// 3. Load each time individually.
const value_t* x_ptr = X + (n_cols * blockIdx.x);
// Use registers only for 2d or 3d
value_t local_x_ptr[col_q];
for (value_int i = 0; i < n_cols; ++i) {
local_x_ptr[i] = x_ptr[i];
}
// Each warp works on 1 R
using namespace cuvs::neighbors::detail::faiss_select;
KeyValueBlockSelect<value_t, value_idx, false, Comparator<value_t>, warp_q, thread_q, tpb> heap(
std::numeric_limits<value_t>::max(),
std::numeric_limits<value_t>::max(),
-1,
shared_memK,
shared_memV,
k);
value_t min_R_dist = R_knn_dists[blockIdx.x * k + (k - 1)];
value_int n_dists_computed = 0;
/**
* First add distances for k closest neighbors of R
* to the heap
*/
// Start iterating through elements of each set from closest R elements,
// determining if the distance could even potentially be in the heap.
for (value_int cur_k = 0; cur_k < k; ++cur_k) {
// index and distance to current blockIdx.x's closest landmark
value_t cur_R_dist = R_knn_dists[blockIdx.x * k + cur_k];
value_idx cur_R_ind = R_knn_inds[blockIdx.x * k + cur_k];
// Equation (2) in Cayton's paper- prune out R's which are > 3 * p(q, r_q)
if (cur_R_dist > weight * (min_R_dist + R_radius[cur_R_ind])) continue;
if (cur_R_dist > 3 * min_R_dist) return;
// The whole warp should iterate through the elements in the current R
value_idx R_start_offset = R_indptr[cur_R_ind];
value_idx R_stop_offset = R_indptr[cur_R_ind + 1];
value_idx R_size = R_stop_offset - R_start_offset;
value_int limit = raft::Pow2<WarpSize>::roundDown(R_size);
value_int i = threadIdx.x;
for (; i < limit; i += tpb) {
// Index and distance of current candidate's nearest landmark
value_idx cur_candidate_ind = R_1nn_cols[R_start_offset + i];
value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i];
// Take 2 landmarks l_1 and l_2 where l_1 is the furthest point in the heap
// and l_2 is the current landmark R. s is the current data point and
// t is the new candidate data point. We know that:
// d(s, t) cannot possibly be any smaller than | d(s, l_1) - d(l_1, l_2) | * | d(l_1, l_2) -
// d(l_2, t) | - d(s, l_1) * d(l_2, t)
// Therefore, if d(s, t) >= d(s, l_1) from the computation above, we know that the distance to
// the candidate point cannot possibly be in the nearest neighbors. However, if d(s, t) < d(s,
// l_1) then we should compute the distance because it's possible it could be smaller.
//
value_t z = heap.warpKTopRDist == 0.00 ? 0.0
: (abs(heap.warpKTop - heap.warpKTopRDist) *
abs(heap.warpKTopRDist - cur_candidate_dist) -
heap.warpKTop * cur_candidate_dist) /
heap.warpKTopRDist;
z = isnan(z) || isinf(z) ? 0.0 : z;
value_t dist = std::numeric_limits<value_t>::max();
if (z <= heap.warpKTop) {
const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind);
value_t local_y_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_y_ptr[j] = y_ptr[j];
}
dist = dfunc(local_x_ptr, local_y_ptr, n_cols);
++n_dists_computed;
}
heap.add(dist, cur_candidate_dist, cur_candidate_ind);
}
if (i < R_size) {
value_idx cur_candidate_ind = R_1nn_cols[R_start_offset + i];
value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i];
value_t z = heap.warpKTopRDist == 0.0 ? 0.0
: (abs(heap.warpKTop - heap.warpKTopRDist) *
abs(heap.warpKTopRDist - cur_candidate_dist) -
heap.warpKTop * cur_candidate_dist) /
heap.warpKTopRDist;
z = isnan(z) || isinf(z) ? 0.0 : z;
value_t dist = std::numeric_limits<value_t>::max();
if (z <= heap.warpKTop) {
const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind);
value_t local_y_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_y_ptr[j] = y_ptr[j];
}
dist = dfunc(local_x_ptr, local_y_ptr, n_cols);
++n_dists_computed;
}
heap.addThreadQ(dist, cur_candidate_dist, cur_candidate_ind);
}
heap.checkThreadQ();
}
heap.reduce();
for (int i = threadIdx.x; i < k; i += tpb) {
out_dists[blockIdx.x * k + i] = shared_memK[i];
out_inds[blockIdx.x * k + i] = shared_memV[i].value;
}
}
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
int dims = 2,
typename dist_func>
void rbc_low_dim_pass_one(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query,
const value_int n_query_rows,
value_int k,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
dist_func& dfunc,
value_idx* inds,
value_t* dists,
float weight,
value_int* dists_counter)
{
if (k <= 32)
block_rbc_kernel_registers<value_idx, value_t, 32, 2, 128, dims, value_int>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
else if (k <= 64)
block_rbc_kernel_registers<value_idx, value_t, 64, 3, 128, 2, value_int>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
else if (k <= 128)
block_rbc_kernel_registers<value_idx, value_t, 128, 3, 128, dims, value_int>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
else if (k <= 256)
block_rbc_kernel_registers<value_idx, value_t, 256, 4, 128, dims, value_int>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
else if (k <= 512)
block_rbc_kernel_registers<value_idx, value_t, 512, 8, 64, dims, value_int>
<<<n_query_rows, 64, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
else if (k <= 1024)
block_rbc_kernel_registers<value_idx, value_t, 1024, 8, 64, dims, value_int>
<<<n_query_rows, 64, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
}
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
int dims = 2,
typename dist_func>
void rbc_low_dim_pass_two(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query,
const value_int n_query_rows,
value_int k,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
dist_func& dfunc,
value_idx* inds,
value_t* dists,
float weight,
value_int* post_dists_counter)
{
const value_int bitset_size = ceil(index.n_landmarks / 32.0);
rmm::device_uvector<std::uint32_t> bitset(bitset_size * n_query_rows,
resource::get_cuda_stream(handle));
thrust::fill(
resource::get_thrust_policy(handle), bitset.data(), bitset.data() + bitset.size(), 0);
perform_post_filter_registers<value_idx, value_t, value_int, dims, 128>
<<<n_query_rows, 128, bitset_size * sizeof(std::uint32_t), resource::get_cuda_stream(handle)>>>(
query,
index.n,
R_knn_inds,
R_knn_dists,
index.get_R_radius().data_handle(),
index.get_R().data_handle(),
index.n_landmarks,
bitset_size,
k,
dfunc,
bitset.data(),
weight);
if (k <= 32)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
32,
2,
128,
dims>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
else if (k <= 64)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
64,
3,
128,
dims>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
else if (k <= 128)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
128,
3,
128,
dims>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
else if (k <= 256)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
256,
4,
128,
dims>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
else if (k <= 512)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
512,
8,
64,
dims><<<n_query_rows, 64, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
else if (k <= 1024)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
1024,
8,
64,
dims><<<n_query_rows, 64, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
}
}; // namespace detail
}; // namespace knn
}; // namespace spatial
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/ball_cover/common.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../haversine_distance.cuh"
#include "registers_types.cuh"
#include <cstdint>
#include <thrust/functional.h>
#include <thrust/tuple.h>
namespace cuvs {
namespace spatial {
namespace knn {
namespace detail {
struct NNComp {
template <typename one, typename two>
__host__ __device__ bool operator()(const one& t1, const two& t2)
{
// sort first by each sample's reference landmark,
if (thrust::get<0>(t1) < thrust::get<0>(t2)) return true;
if (thrust::get<0>(t1) > thrust::get<0>(t2)) return false;
// then by closest neighbor,
return thrust::get<1>(t1) < thrust::get<1>(t2);
}
};
/**
* Zeros the bit at location h in a one-hot encoded 32-bit int array
*/
__device__ inline void _zero_bit(std::uint32_t* arr, std::uint32_t h)
{
int bit = h % 32;
int idx = h / 32;
std::uint32_t assumed;
std::uint32_t old = arr[idx];
do {
assumed = old;
old = atomicCAS(arr + idx, assumed, assumed & ~(1 << bit));
} while (assumed != old);
}
/**
* Returns whether or not bit at location h is nonzero in a one-hot
* encoded 32-bit in array.
*/
__device__ inline bool _get_val(std::uint32_t* arr, std::uint32_t h)
{
int bit = h % 32;
int idx = h / 32;
return (arr[idx] & (1 << bit)) > 0;
}
}; // namespace detail
}; // namespace knn
}; // namespace spatial
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/detail/ball_cover/registers_types.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../haversine_distance.cuh" // compute_haversine
#include <cstdint> // uint32_t
namespace cuvs {
namespace spatial {
namespace knn {
namespace detail {
template <typename value_t, typename value_int = std::uint32_t>
struct DistFunc {
virtual __device__ __host__ __forceinline__ value_t operator()(const value_t* a,
const value_t* b,
const value_int n_dims)
{
return -1;
};
};
template <typename value_t, typename value_int = std::uint32_t>
struct HaversineFunc : public DistFunc<value_t, value_int> {
__device__ __host__ __forceinline__ value_t operator()(const value_t* a,
const value_t* b,
const value_int n_dims) override
{
return cuvs::spatial::knn::detail::compute_haversine(a[0], b[0], a[1], b[1]);
}
};
template <typename value_t, typename value_int = std::uint32_t>
struct EuclideanFunc : public DistFunc<value_t, value_int> {
__device__ __host__ __forceinline__ value_t operator()(const value_t* a,
const value_t* b,
const value_int n_dims) override
{
value_t sum_sq = 0;
for (value_int i = 0; i < n_dims; ++i) {
value_t diff = a[i] - b[i];
sum_sq += diff * diff;
}
return raft::sqrt(sum_sq);
}
};
}; // namespace detail
}; // namespace knn
}; // namespace spatial
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn | rapidsai_public_repos/cuvs/cpp/include/cuvs/spatial/knn/specializations/knn.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/cluster_solvers.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CLUSTER_SOLVERS_H
#define __CLUSTER_SOLVERS_H
#pragma once
#include <cuvs/cluster/kmeans.cuh>
#include <raft/core/resource/thrust_policy.hpp>
#include <utility> // for std::pair
namespace cuvs {
namespace spectral {
using namespace matrix;
// aggregate of control params for Eigen Solver:
//
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct cluster_solver_config_t {
size_type_t n_clusters;
size_type_t maxIter;
value_type_t tol;
unsigned long long seed{123456};
};
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct kmeans_solver_t {
explicit kmeans_solver_t(
cluster_solver_config_t<index_type_t, value_type_t, size_type_t> const& config)
: config_(config)
{
}
std::pair<value_type_t, index_type_t> solve(raft::resources const& handle,
size_type_t n_obs_vecs,
size_type_t dim,
value_type_t const* __restrict__ obs,
index_type_t* __restrict__ codes) const
{
RAFT_EXPECTS(obs != nullptr, "Null obs buffer.");
RAFT_EXPECTS(codes != nullptr, "Null codes buffer.");
value_type_t residual{};
index_type_t iters{};
cuvs::cluster::KMeansParams km_params;
km_params.n_clusters = config_.n_clusters;
km_params.tol = config_.tol;
km_params.max_iter = config_.maxIter;
km_params.rng_state.seed = config_.seed;
auto X = raft::make_device_matrix_view<const value_type_t>(obs, n_obs_vecs, dim);
auto labels = raft::make_device_vector_view<index_type_t>(codes, n_obs_vecs);
auto centroids =
raft::make_device_matrix<value_type_t, index_type_t>(handle, config_.n_clusters, dim);
auto weight = raft::make_device_vector<value_type_t, index_type_t>(handle, n_obs_vecs);
thrust::fill(raft::resource::get_thrust_policy(handle),
weight.data_handle(),
weight.data_handle() + n_obs_vecs,
1);
auto sw = std::make_optional((raft::device_vector_view<const value_type_t>)weight.view());
cuvs::cluster::kmeans_fit_predict<value_type_t, index_type_t>(
handle,
km_params,
X,
sw,
centroids.view(),
labels,
raft::make_host_scalar_view(&residual),
raft::make_host_scalar_view(&iters));
return std::make_pair(residual, iters);
}
auto const& get_config(void) const { return config_; }
private:
cluster_solver_config_t<index_type_t, value_type_t, size_type_t> config_;
};
} // namespace spectral
} // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/eigen_solvers.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __EIGEN_SOLVERS_H
#define __EIGEN_SOLVERS_H
#pragma once
#include <raft/sparse/solver/lanczos.cuh>
#include <raft/spectral/matrix_wrappers.hpp>
namespace cuvs {
namespace spectral {
// aggregate of control params for Eigen Solver:
//
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct eigen_solver_config_t {
size_type_t n_eigVecs;
size_type_t maxIter;
size_type_t restartIter;
value_type_t tol;
bool reorthogonalize{false};
unsigned long long seed{
1234567}; // CAVEAT: this default value is now common to all instances of using seed in
// Lanczos; was not the case before: there were places where a default seed = 123456
// was used; this may trigger slightly different # solver iterations
};
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct lanczos_solver_t {
explicit lanczos_solver_t(
eigen_solver_config_t<index_type_t, value_type_t, size_type_t> const& config)
: config_(config)
{
}
index_type_t solve_smallest_eigenvectors(
raft::resources const& handle,
raft::matrix::sparse_matrix_t<index_type_t, value_type_t> const& A,
value_type_t* __restrict__ eigVals,
value_type_t* __restrict__ eigVecs) const
{
RAFT_EXPECTS(eigVals != nullptr, "Null eigVals buffer.");
RAFT_EXPECTS(eigVecs != nullptr, "Null eigVecs buffer.");
index_type_t iters{};
sparse::solver::computeSmallestEigenvectors(handle,
A,
config_.n_eigVecs,
config_.maxIter,
config_.restartIter,
config_.tol,
config_.reorthogonalize,
iters,
eigVals,
eigVecs,
config_.seed);
return iters;
}
index_type_t solve_largest_eigenvectors(
raft::resources const& handle,
raft::matrix::sparse_matrix_t<index_type_t, value_type_t> const& A,
value_type_t* __restrict__ eigVals,
value_type_t* __restrict__ eigVecs) const
{
RAFT_EXPECTS(eigVals != nullptr, "Null eigVals buffer.");
RAFT_EXPECTS(eigVecs != nullptr, "Null eigVecs buffer.");
index_type_t iters{};
sparse::solver::computeLargestEigenvectors(handle,
A,
config_.n_eigVecs,
config_.maxIter,
config_.restartIter,
config_.tol,
config_.reorthogonalize,
iters,
eigVals,
eigVecs,
config_.seed);
return iters;
}
auto const& get_config(void) const { return config_; }
private:
eigen_solver_config_t<index_type_t, value_type_t, size_type_t> config_;
};
} // namespace spectral
} // namespace cuvs
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/matrix_wrappers.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/spectral/detail/matrix_wrappers.hpp>
// =========================================================
// Useful macros
// =========================================================
namespace cuvs {
namespace spectral {
namespace matrix {
using size_type = int; // for now; TODO: move it in appropriate header
// specifies type of algorithm used
// for SpMv:
//
using detail::sparse_mv_alg_t;
// Vector "view"-like aggregate for linear algebra purposes
//
using detail::vector_view_t;
using detail::vector_t;
using detail::sparse_matrix_t;
using detail::laplacian_matrix_t;
using detail::modularity_matrix_t;
} // namespace matrix
} // namespace spectral
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/specializations.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/modularity_maximization.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MODULARITY_MAXIMIZATION_H
#define __MODULARITY_MAXIMIZATION_H
#pragma once
#include <tuple>
#include <raft/spectral/detail/modularity_maximization.hpp>
namespace cuvs {
namespace spectral {
// =========================================================
// Spectral modularity_maximization
// =========================================================
/** Compute partition for a weighted undirected graph. This
* partition attempts to minimize the cost function:
* Cost = \f$sum_i\f$ (Edges cut by ith partition)/(Vertices in ith partition)
*
* @param handle raft handle for managing expensive resources
* @param csr_m Weighted graph in CSR format
* @param eigen_solver Eigensolver implementation
* @param cluster_solver Cluster solver implementation
* @param clusters (Output, device memory, n entries) Partition
* assignments.
* @param eigVals Output eigenvalue array pointer on device
* @param eigVecs Output eigenvector array pointer on device
* @return statistics: number of eigensolver iterations, .
*/
template <typename vertex_t, typename weight_t, typename EigenSolver, typename ClusterSolver>
std::tuple<vertex_t, weight_t, vertex_t> modularity_maximization(
raft::resources const& handle,
raft::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
EigenSolver const& eigen_solver,
ClusterSolver const& cluster_solver,
vertex_t* __restrict__ clusters,
weight_t* eigVals,
weight_t* eigVecs)
{
return raft::spectral::detail::
modularity_maximization<vertex_t, weight_t, EigenSolver, ClusterSolver>(
handle, csr_m, eigen_solver, cluster_solver, clusters, eigVals, eigVecs);
}
//===================================================
// Analysis of graph partition
// =========================================================
/// Compute modularity
/** This function determines the modularity based on a graph and cluster assignments
* @param handle raft handle for managing expensive resources
* @param csr_m Weighted graph in CSR format
* @param nClusters Number of clusters.
* @param clusters (Input, device memory, n entries) Cluster assignments.
* @param modularity On exit, modularity
*/
template <typename vertex_t, typename weight_t>
void analyzeModularity(raft::resources const& handle,
raft::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
vertex_t nClusters,
vertex_t const* __restrict__ clusters,
weight_t& modularity)
{
raft::spectral::detail::analyzeModularity<vertex_t, weight_t>(
handle, csr_m, nClusters, clusters, modularity);
}
} // namespace spectral
} // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/cluster_solvers_deprecated.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Note: This file is deprecated and will be removed in a future release
* Please use include/cuvs/cluster/kmeans.cuh instead
*/
#ifndef __CLUSTER_SOLVERS_deprecated_H
#define __CLUSTER_SOLVERS_deprecated_H
#pragma once
#include <cuvs/cluster/kmeans_deprecated.cuh>
#include <utility> // for std::pair
namespace cuvs {
namespace spectral {
using namespace matrix;
// aggregate of control params for Eigen Solver:
//
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct cluster_solver_config_deprecated_t {
size_type_t n_clusters;
size_type_t maxIter;
value_type_t tol;
unsigned long long seed{123456};
};
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct kmeans_solver_deprecated_t {
explicit kmeans_solver_deprecated_t(
cluster_solver_config_deprecated_t<index_type_t, value_type_t, size_type_t> const& config)
: config_(config)
{
}
std::pair<value_type_t, index_type_t> solve(raft::resources const& handle,
size_type_t n_obs_vecs,
size_type_t dim,
value_type_t const* __restrict__ obs,
index_type_t* __restrict__ codes) const
{
RAFT_EXPECTS(obs != nullptr, "Null obs buffer.");
RAFT_EXPECTS(codes != nullptr, "Null codes buffer.");
value_type_t residual{};
index_type_t iters{};
cuvs::cluster::kmeans(handle,
n_obs_vecs,
dim,
config_.n_clusters,
config_.tol,
config_.maxIter,
obs,
codes,
residual,
iters,
config_.seed);
return std::make_pair(residual, iters);
}
auto const& get_config(void) const { return config_; }
private:
cluster_solver_config_deprecated_t<index_type_t, value_type_t, size_type_t> config_;
};
} // namespace spectral
} // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/partition.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __PARTITION_H
#define __PARTITION_H
#pragma once
#include <tuple>
#include <raft/spectral/detail/partition.hpp>
namespace cuvs {
namespace spectral {
// =========================================================
// Spectral partitioner
// =========================================================
/// Compute spectral graph partition
/** Compute partition for a weighted undirected graph. This
* partition attempts to minimize the cost function:
* Cost = \f$sum_i\f$ (Edges cut by ith partition)/(Vertices in ith partition)
*
* @param handle raft handle for managing expensive resources
* @param csr_m Weighted graph in CSR format
* @param eigen_solver Eigensolver implementation
* @param cluster_solver Cluster solver implementation
* @param clusters (Output, device memory, n entries) Partition
* assignments.
* @param eigVals Output eigenvalue array pointer on device
* @param eigVecs Output eigenvector array pointer on device
* @return statistics: number of eigensolver iterations, .
*/
template <typename vertex_t, typename weight_t, typename EigenSolver, typename ClusterSolver>
std::tuple<vertex_t, weight_t, vertex_t> partition(
raft::resources const& handle,
raft::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
EigenSolver const& eigen_solver,
ClusterSolver const& cluster_solver,
vertex_t* __restrict__ clusters,
weight_t* eigVals,
weight_t* eigVecs)
{
return raft::spectral::detail::partition<vertex_t, weight_t, EigenSolver, ClusterSolver>(
handle, csr_m, eigen_solver, cluster_solver, clusters, eigVals, eigVecs);
}
// =========================================================
// Analysis of graph partition
// =========================================================
/// Compute cost function for partition
/** This function determines the edges cut by a partition and a cost
* function:
* Cost = \f$sum_i\f$ (Edges cut by ith partition)/(Vertices in ith partition)
* Graph is assumed to be weighted and undirected.
*
* @param handle raft handle for managing expensive resources
* @param csr_m Weighted graph in CSR format
* @param nClusters Number of partitions.
* @param clusters (Input, device memory, n entries) Partition
* assignments.
* @param edgeCut On exit, weight of edges cut by partition.
* @param cost On exit, partition cost function.
*/
template <typename vertex_t, typename weight_t>
void analyzePartition(raft::resources const& handle,
raft::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
vertex_t nClusters,
const vertex_t* __restrict__ clusters,
weight_t& edgeCut,
weight_t& cost)
{
raft::spectral::detail::analyzePartition<vertex_t, weight_t>(
handle, csr_m, nClusters, clusters, edgeCut, cost);
}
} // namespace spectral
} // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/detail/warn_dbg.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdexcept>
#include <string>
#include <raft/core/detail/macros.hpp>
#ifdef DEBUG
#define COUT() (std::cout)
#define CERR() (std::cerr)
// nope:
//
#define WARNING(message) \
do { \
std::stringstream ss; \
ss << "Warning (" << __FILE__ << ":" << __LINE__ << "): " << message; \
CERR() << ss.str() << std::endl; \
} while (0)
#else // DEBUG
#define WARNING(message)
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/detail/spectral_util.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/spectral/matrix_wrappers.hpp>
#include <raft/util/cudart_utils.hpp>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <algorithm>
namespace cuvs {
namespace spectral {
template <typename index_type_t, typename value_type_t>
RAFT_KERNEL scale_obs_kernel(index_type_t m, index_type_t n, value_type_t* obs)
{
index_type_t i, j, k, index, mm;
value_type_t alpha, v, last;
bool valid;
// ASSUMPTION: kernel is launched with either 2, 4, 8, 16 or 32 threads in x-dimension
// compute alpha
mm = (((m + blockDim.x - 1) / blockDim.x) * blockDim.x); // m in multiple of blockDim.x
alpha = 0.0;
for (j = threadIdx.y + blockIdx.y * blockDim.y; j < n; j += blockDim.y * gridDim.y) {
for (i = threadIdx.x; i < mm; i += blockDim.x) {
// check if the thread is valid
valid = i < m;
// get the value of the last thread
last = __shfl_sync(warp_full_mask(), alpha, blockDim.x - 1, blockDim.x);
// if you are valid read the value from memory, otherwise set your value to 0
alpha = (valid) ? obs[i + j * m] : 0.0;
alpha = alpha * alpha;
// do prefix sum (of size warpSize=blockDim.x =< 32)
for (k = 1; k < blockDim.x; k *= 2) {
v = __shfl_up_sync(warp_full_mask(), alpha, k, blockDim.x);
if (threadIdx.x >= k) alpha += v;
}
// shift by last
alpha += last;
}
}
// scale by alpha
alpha = __shfl_sync(warp_full_mask(), alpha, blockDim.x - 1, blockDim.x);
alpha = raft::sqrt(alpha);
for (j = threadIdx.y + blockIdx.y * blockDim.y; j < n; j += blockDim.y * gridDim.y) {
for (i = threadIdx.x; i < m; i += blockDim.x) { // blockDim.x=32
index = i + j * m;
obs[index] = obs[index] / alpha;
}
}
}
template <typename index_type_t>
index_type_t next_pow2(index_type_t n)
{
index_type_t v;
// Reference:
// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2Float
v = n - 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return v + 1;
}
template <typename index_type_t, typename value_type_t>
cudaError_t scale_obs(index_type_t m, index_type_t n, value_type_t* obs)
{
index_type_t p2m;
// find next power of 2
p2m = next_pow2<index_type_t>(m);
// setup launch configuration
unsigned int xsize = std::max(2, std::min(p2m, 32));
dim3 nthreads{xsize, 256 / xsize, 1};
dim3 nblocks{1, (n + nthreads.y - 1) / nthreads.y, 1};
// launch scaling kernel (scale each column of obs by its norm)
scale_obs_kernel<index_type_t, value_type_t><<<nblocks, nthreads>>>(m, n, obs);
return cudaSuccess;
}
template <typename vertex_t, typename edge_t, typename weight_t>
void transform_eigen_matrix(raft::resources const& handle,
edge_t n,
vertex_t nEigVecs,
weight_t* eigVecs)
{
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
auto thrust_exec_policy = resource::get_thrust_policy(handle);
const weight_t zero{0.0};
const weight_t one{1.0};
// Whiten eigenvector matrix
for (auto i = 0; i < nEigVecs; ++i) {
weight_t mean, std;
mean = thrust::reduce(thrust_exec_policy,
thrust::device_pointer_cast(eigVecs + IDX(0, i, n)),
thrust::device_pointer_cast(eigVecs + IDX(0, i + 1, n)));
RAFT_CHECK_CUDA(stream);
mean /= n;
thrust::transform(thrust_exec_policy,
thrust::device_pointer_cast(eigVecs + IDX(0, i, n)),
thrust::device_pointer_cast(eigVecs + IDX(0, i + 1, n)),
thrust::make_constant_iterator(mean),
thrust::device_pointer_cast(eigVecs + IDX(0, i, n)),
thrust::minus<weight_t>());
RAFT_CHECK_CUDA(stream);
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasnrm2(cublas_h, n, eigVecs + IDX(0, i, n), 1, &std, stream));
std /= std::sqrt(static_cast<weight_t>(n));
thrust::transform(thrust_exec_policy,
thrust::device_pointer_cast(eigVecs + IDX(0, i, n)),
thrust::device_pointer_cast(eigVecs + IDX(0, i + 1, n)),
thrust::make_constant_iterator(std),
thrust::device_pointer_cast(eigVecs + IDX(0, i, n)),
thrust::divides<weight_t>());
RAFT_CHECK_CUDA(stream);
}
// Transpose eigenvector matrix
// TODO: in-place transpose
{
raft::spectral::matrix::vector_t<weight_t> work(handle, nEigVecs * n);
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgeam(cublas_h,
CUBLAS_OP_T,
CUBLAS_OP_N,
nEigVecs,
n,
&one,
eigVecs,
n,
&zero,
(weight_t*)NULL,
nEigVecs,
work.raw(),
nEigVecs,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(
eigVecs, work.raw(), nEigVecs * n * sizeof(weight_t), cudaMemcpyDeviceToDevice, stream));
}
}
namespace {
/// Functor to generate indicator vectors
/** For use in Thrust transform
*/
template <typename index_type_t, typename value_type_t>
struct equal_to_i_op {
const index_type_t i;
public:
equal_to_i_op(index_type_t _i) : i(_i) {}
template <typename Tuple_>
__host__ __device__ void operator()(Tuple_ t)
{
thrust::get<1>(t) = (thrust::get<0>(t) == i) ? (value_type_t)1.0 : (value_type_t)0.0;
}
};
} // namespace
// Construct indicator vector for ith partition
//
template <typename vertex_t, typename edge_t, typename weight_t>
bool construct_indicator(raft::resources const& handle,
edge_t index,
edge_t n,
weight_t& clustersize,
weight_t& partStats,
vertex_t const* __restrict__ clusters,
raft::spectral::matrix::vector_t<weight_t>& part_i,
raft::spectral::matrix::vector_t<weight_t>& Bx,
raft::spectral::matrix::laplacian_matrix_t<vertex_t, weight_t> const& B)
{
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
auto thrust_exec_policy = resource::get_thrust_policy(handle);
thrust::for_each(
thrust_exec_policy,
thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(clusters),
thrust::device_pointer_cast(part_i.raw()))),
thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(clusters + n),
thrust::device_pointer_cast(part_i.raw() + n))),
equal_to_i_op<vertex_t, weight_t>(index));
RAFT_CHECK_CUDA(stream);
// Compute size of ith partition
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasdot(
cublas_h, n, part_i.raw(), 1, part_i.raw(), 1, &clustersize, stream));
clustersize = round(clustersize);
if (clustersize < 0.5) { return false; }
// Compute part stats
B.mv(1, part_i.raw(), 0, Bx.raw());
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasdot(cublas_h, n, Bx.raw(), 1, part_i.raw(), 1, &partStats, stream));
return true;
}
} // namespace spectral
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/detail/lapack.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusolverDn.h>
#include <raft/core/error.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/detail/cusolver_wrappers.hpp>
// for now; TODO: check if/where this `define` should be;
//
#define USE_LAPACK
namespace cuvs {
#define lapackCheckError(status) \
{ \
if (status < 0) { \
std::stringstream ss; \
ss << "Lapack error: argument number " << -status << " had an illegal value."; \
throw exception(ss.str()); \
} else if (status > 0) \
RAFT_FAIL("Lapack error: internal error."); \
}
extern "C" void sgeqrf_(
int* m, int* n, float* a, int* lda, float* tau, float* work, int* lwork, int* info);
extern "C" void dgeqrf_(
int* m, int* n, double* a, int* lda, double* tau, double* work, int* lwork, int* info);
extern "C" void sormqr_(char* side,
char* trans,
int* m,
int* n,
int* k,
float* a,
int* lda,
const float* tau,
float* c,
int* ldc,
float* work,
int* lwork,
int* info);
extern "C" void dormqr_(char* side,
char* trans,
int* m,
int* n,
int* k,
double* a,
int* lda,
const double* tau,
double* c,
int* ldc,
double* work,
int* lwork,
int* info);
extern "C" int dgeev_(char* jobvl,
char* jobvr,
int* n,
double* a,
int* lda,
double* wr,
double* wi,
double* vl,
int* ldvl,
double* vr,
int* ldvr,
double* work,
int* lwork,
int* info);
extern "C" int sgeev_(char* jobvl,
char* jobvr,
int* n,
float* a,
int* lda,
float* wr,
float* wi,
float* vl,
int* ldvl,
float* vr,
int* ldvr,
float* work,
int* lwork,
int* info);
extern "C" cusolverStatus_t cusolverDnSgemmHost(cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const float* alpha,
const float* A,
int lda,
const float* B,
int ldb,
const float* beta,
float* C,
int ldc);
extern "C" cusolverStatus_t cusolverDnDgemmHost(cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const double* alpha,
const double* A,
int lda,
const double* B,
int ldb,
const double* beta,
double* C,
int ldc);
extern "C" cusolverStatus_t cusolverDnSsterfHost(int n, float* d, float* e, int* info);
extern "C" cusolverStatus_t cusolverDnDsterfHost(int n, double* d, double* e, int* info);
extern "C" cusolverStatus_t cusolverDnSsteqrHost(
const signed char* compz, int n, float* d, float* e, float* z, int ldz, float* work, int* info);
extern "C" cusolverStatus_t cusolverDnDsteqrHost(const signed char* compz,
int n,
double* d,
double* e,
double* z,
int ldz,
double* work,
int* info);
template <typename T>
class Lapack {
private:
Lapack();
~Lapack();
public:
static void check_lapack_enabled();
static void gemm(bool transa,
bool transb,
int m,
int n,
int k,
T alpha,
const T* A,
int lda,
const T* B,
int ldb,
T beta,
T* C,
int ldc);
// special QR for lanczos
static void sterf(int n, T* d, T* e);
static void steqr(char compz, int n, T* d, T* e, T* z, int ldz, T* work);
// QR
// computes the QR factorization of a general matrix
static void geqrf(int m, int n, T* a, int lda, T* tau, T* work, int* lwork);
// Generates the real orthogonal matrix Q of the QR factorization formed by geqrf.
// multiply C by implicit Q
static void ormqr(bool right_side,
bool transq,
int m,
int n,
int k,
T* a,
int lda,
T* tau,
T* c,
int ldc,
T* work,
int* lwork);
static void geev(T* A, T* eigenvalues, int dim, int lda);
static void geev(T* A, T* eigenvalues, T* eigenvectors, int dim, int lda, int ldvr);
static void geev(T* A,
T* eigenvalues_r,
T* eigenvalues_i,
T* eigenvectors_r,
T* eigenvectors_i,
int dim,
int lda,
int ldvr);
private:
static void lapack_gemm(const char transa,
const char transb,
int m,
int n,
int k,
float alpha,
const float* a,
int lda,
const float* b,
int ldb,
float beta,
float* c,
int ldc)
{
cublasOperation_t cublas_transa = (transa == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cublas_transb = (transb == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T;
cusolverDnSgemmHost(
cublas_transa, cublas_transb, m, n, k, &alpha, (float*)a, lda, (float*)b, ldb, &beta, c, ldc);
}
static void lapack_gemm(const signed char transa,
const signed char transb,
int m,
int n,
int k,
double alpha,
const double* a,
int lda,
const double* b,
int ldb,
double beta,
double* c,
int ldc)
{
cublasOperation_t cublas_transa = (transa == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cublas_transb = (transb == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T;
cusolverDnDgemmHost(cublas_transa,
cublas_transb,
m,
n,
k,
&alpha,
(double*)a,
lda,
(double*)b,
ldb,
&beta,
c,
ldc);
}
static void lapack_sterf(int n, float* d, float* e, int* info)
{
cusolverDnSsterfHost(n, d, e, info);
}
static void lapack_sterf(int n, double* d, double* e, int* info)
{
cusolverDnDsterfHost(n, d, e, info);
}
static void lapack_steqr(
const signed char compz, int n, float* d, float* e, float* z, int ldz, float* work, int* info)
{
cusolverDnSsteqrHost(&compz, n, d, e, z, ldz, work, info);
}
static void lapack_steqr(const signed char compz,
int n,
double* d,
double* e,
double* z,
int ldz,
double* work,
int* info)
{
cusolverDnDsteqrHost(&compz, n, d, e, z, ldz, work, info);
}
static void lapack_geqrf(
int m, int n, float* a, int lda, float* tau, float* work, int* lwork, int* info)
{
sgeqrf_(&m, &n, a, &lda, tau, work, lwork, info);
}
static void lapack_geqrf(
int m, int n, double* a, int lda, double* tau, double* work, int* lwork, int* info)
{
dgeqrf_(&m, &n, a, &lda, tau, work, lwork, info);
}
static void lapack_ormqr(char side,
char trans,
int m,
int n,
int k,
float* a,
int lda,
float* tau,
float* c,
int ldc,
float* work,
int* lwork,
int* info)
{
sormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info);
}
static void lapack_ormqr(char side,
char trans,
int m,
int n,
int k,
double* a,
int lda,
double* tau,
double* c,
int ldc,
double* work,
int* lwork,
int* info)
{
dormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info);
}
static int lapack_geev_dispatch(char* jobvl,
char* jobvr,
int* n,
double* a,
int* lda,
double* wr,
double* wi,
double* vl,
int* ldvl,
double* vr,
int* ldvr,
double* work,
int* lwork,
int* info)
{
return dgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
static int lapack_geev_dispatch(char* jobvl,
char* jobvr,
int* n,
float* a,
int* lda,
float* wr,
float* wi,
float* vl,
int* ldvl,
float* vr,
int* ldvr,
float* work,
int* lwork,
int* info)
{
return sgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
// real eigenvalues
static void lapack_geev(T* A, T* eigenvalues, int dim, int lda)
{
char job = 'N';
std::vector<T> WI(dim);
int ldv = 1;
T* vl = 0;
int work_size = 6 * dim;
std::vector<T> work(work_size);
int info;
lapack_geev_dispatch(&job,
&job,
&dim,
A,
&lda,
eigenvalues,
WI.data(),
vl,
&ldv,
vl,
&ldv,
work.data(),
&work_size,
&info);
lapackCheckError(info);
}
// real eigenpairs
static void lapack_geev(T* A, T* eigenvalues, T* eigenvectors, int dim, int lda, int ldvr)
{
char jobvl = 'N';
char jobvr = 'V';
std::vector<T> WI(dim);
int work_size = 6 * dim;
T* vl = 0;
int ldvl = 1;
std::vector<T> work(work_size);
int info;
lapack_geev_dispatch(&jobvl,
&jobvr,
&dim,
A,
&lda,
eigenvalues,
WI.data(),
vl,
&ldvl,
eigenvectors,
&ldvr,
work.data(),
&work_size,
&info);
lapackCheckError(info);
}
// complex eigenpairs
static void lapack_geev(T* A,
T* eigenvalues_r,
T* eigenvalues_i,
T* eigenvectors_r,
T* eigenvectors_i,
int dim,
int lda,
int ldvr)
{
char jobvl = 'N';
char jobvr = 'V';
int work_size = 8 * dim;
int ldvl = 1;
std::vector<T> work(work_size);
int info;
lapack_geev_dispatch(&jobvl,
&jobvr,
&dim,
A,
&lda,
eigenvalues_r,
eigenvalues_i,
0,
&ldvl,
eigenvectors_r,
&ldvr,
work.data(),
&work_size,
&info);
lapackCheckError(info);
}
};
template <typename T>
void Lapack<T>::check_lapack_enabled()
{
#ifndef USE_LAPACK
RAFT_FAIL("Error: LAPACK not enabled.");
#endif
}
template <typename T>
void Lapack<T>::gemm(bool transa,
bool transb,
int m,
int n,
int k,
T alpha,
const T* A,
int lda,
const T* B,
int ldb,
T beta,
T* C,
int ldc)
{
// check_lapack_enabled();
// #ifdef NVGRAPH_USE_LAPACK
const char transA_char = transa ? 'T' : 'N';
const char transB_char = transb ? 'T' : 'N';
lapack_gemm(transA_char, transB_char, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
// #endif
}
template <typename T>
void Lapack<T>::sterf(int n, T* d, T* e)
{
// check_lapack_enabled();
// #ifdef NVGRAPH_USE_LAPACK
int info;
lapack_sterf(n, d, e, &info);
lapackCheckError(info);
// #endif
}
template <typename T>
void Lapack<T>::steqr(char compz, int n, T* d, T* e, T* z, int ldz, T* work)
{
// check_lapack_enabled();
// #ifdef NVGRAPH_USE_LAPACK
int info;
lapack_steqr(compz, n, d, e, z, ldz, work, &info);
lapackCheckError(info);
// #endif
}
template <typename T>
void Lapack<T>::geqrf(int m, int n, T* a, int lda, T* tau, T* work, int* lwork)
{
check_lapack_enabled();
#ifdef USE_LAPACK
int info;
lapack_geqrf(m, n, a, lda, tau, work, lwork, &info);
lapackCheckError(info);
#endif
}
template <typename T>
void Lapack<T>::ormqr(bool right_side,
bool transq,
int m,
int n,
int k,
T* a,
int lda,
T* tau,
T* c,
int ldc,
T* work,
int* lwork)
{
check_lapack_enabled();
#ifdef USE_LAPACK
char side = right_side ? 'R' : 'L';
char trans = transq ? 'T' : 'N';
int info;
lapack_ormqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, &info);
lapackCheckError(info);
#endif
}
// real eigenvalues
template <typename T>
void Lapack<T>::geev(T* A, T* eigenvalues, int dim, int lda)
{
check_lapack_enabled();
#ifdef USE_LAPACK
lapack_geev(A, eigenvalues, dim, lda);
#endif
}
// real eigenpairs
template <typename T>
void Lapack<T>::geev(T* A, T* eigenvalues, T* eigenvectors, int dim, int lda, int ldvr)
{
check_lapack_enabled();
#ifdef USE_LAPACK
lapack_geev(A, eigenvalues, eigenvectors, dim, lda, ldvr);
#endif
}
// complex eigenpairs
template <typename T>
void Lapack<T>::geev(T* A,
T* eigenvalues_r,
T* eigenvalues_i,
T* eigenvectors_r,
T* eigenvectors_i,
int dim,
int lda,
int ldvr)
{
check_lapack_enabled();
#ifdef USE_LAPACK
lapack_geev(A, eigenvalues_r, eigenvalues_i, eigenvectors_r, eigenvectors_i, dim, lda, ldvr);
#endif
}
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/detail/partition.hpp | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <math.h>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <stdio.h>
#include <cuda.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <tuple>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/spectral/cluster_solvers.cuh>
#include <raft/spectral/detail/spectral_util.cuh>
#include <raft/spectral/eigen_solvers.cuh>
#include <raft/spectral/matrix_wrappers.hpp>
namespace cuvs {
namespace spectral {
namespace detail {
// =========================================================
// Spectral partitioner
// =========================================================
/// Compute spectral graph partition
/** Compute partition for a weighted undirected graph. This
* partition attempts to minimize the cost function:
* Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition)
*
* @param G Weighted graph in CSR format
* @param nClusters Number of partitions.
* @param nEigVecs Number of eigenvectors to compute.
* @param maxIter_lanczos Maximum number of Lanczos iterations.
* @param restartIter_lanczos Maximum size of Lanczos system before
* implicit restart.
* @param tol_lanczos Convergence tolerance for Lanczos method.
* @param maxIter_kmeans Maximum number of k-means iterations.
* @param tol_kmeans Convergence tolerance for k-means algorithm.
* @param clusters (Output, device memory, n entries) Partition
* assignments.
* @param iters_lanczos On exit, number of Lanczos iterations
* performed.
* @param iters_kmeans On exit, number of k-means iterations
* performed.
* @return statistics: number of eigensolver iterations, .
*/
template <typename vertex_t, typename weight_t, typename EigenSolver, typename ClusterSolver>
std::tuple<vertex_t, weight_t, vertex_t> partition(
raft::resources const& handle,
spectral::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
EigenSolver const& eigen_solver,
ClusterSolver const& cluster_solver,
vertex_t* __restrict__ clusters,
weight_t* eigVals,
weight_t* eigVecs)
{
RAFT_EXPECTS(clusters != nullptr, "Null clusters buffer.");
RAFT_EXPECTS(eigVals != nullptr, "Null eigVals buffer.");
RAFT_EXPECTS(eigVecs != nullptr, "Null eigVecs buffer.");
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
std::tuple<vertex_t, weight_t, vertex_t>
stats; //{iters_eig_solver,residual_cluster,iters_cluster_solver} // # iters eigen solver,
// cluster solver residual, # iters cluster solver
vertex_t n = csr_m.nrows_;
// -------------------------------------------------------
// Spectral partitioner
// -------------------------------------------------------
// Compute eigenvectors of Laplacian
// Initialize Laplacian
/// sparse_matrix_t<vertex_t, weight_t> A{handle, graph};
spectral::matrix::laplacian_matrix_t<vertex_t, weight_t> L{handle, csr_m};
auto eigen_config = eigen_solver.get_config();
auto nEigVecs = eigen_config.n_eigVecs;
// Compute smallest eigenvalues and eigenvectors
std::get<0>(stats) = eigen_solver.solve_smallest_eigenvectors(handle, L, eigVals, eigVecs);
// Whiten eigenvector matrix
transform_eigen_matrix(handle, n, nEigVecs, eigVecs);
// Find partition clustering
auto pair_cluster = cluster_solver.solve(handle, n, nEigVecs, eigVecs, clusters);
std::get<1>(stats) = pair_cluster.first;
std::get<2>(stats) = pair_cluster.second;
return stats;
}
// =========================================================
// Analysis of graph partition
// =========================================================
/// Compute cost function for partition
/** This function determines the edges cut by a partition and a cost
* function:
* Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition)
* Graph is assumed to be weighted and undirected.
*
* @param G Weighted graph in CSR format
* @param nClusters Number of partitions.
* @param clusters (Input, device memory, n entries) Partition
* assignments.
* @param edgeCut On exit, weight of edges cut by partition.
* @param cost On exit, partition cost function.
* @return error flag.
*/
template <typename vertex_t, typename weight_t>
void analyzePartition(raft::resources const& handle,
spectral::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
vertex_t nClusters,
const vertex_t* __restrict__ clusters,
weight_t& edgeCut,
weight_t& cost)
{
RAFT_EXPECTS(clusters != nullptr, "Null clusters buffer.");
vertex_t i;
vertex_t n = csr_m.nrows_;
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
weight_t partEdgesCut, clustersize;
// Device memory
spectral::matrix::vector_t<weight_t> part_i(handle, n);
spectral::matrix::vector_t<weight_t> Lx(handle, n);
// Initialize cuBLAS
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// Initialize Laplacian
/// sparse_matrix_t<vertex_t, weight_t> A{handle, graph};
spectral::matrix::laplacian_matrix_t<vertex_t, weight_t> L{handle, csr_m};
// Initialize output
cost = 0;
edgeCut = 0;
// Iterate through partitions
for (i = 0; i < nClusters; ++i) {
// Construct indicator vector for ith partition
if (!construct_indicator(handle, i, n, clustersize, partEdgesCut, clusters, part_i, Lx, L)) {
WARNING("empty partition");
continue;
}
// Record results
cost += partEdgesCut / clustersize;
edgeCut += partEdgesCut / 2;
}
}
} // namespace detail
} // namespace spectral
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/detail/matrix_wrappers.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cusparse_handle.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/system/cuda/execution_policy.h>
#include <algorithm>
// =========================================================
// Useful macros
// =========================================================
// Get index of matrix entry
#define IDX(i, j, lda) ((i) + (j) * (lda))
namespace cuvs {
namespace spectral {
namespace matrix {
namespace detail {
using size_type = int; // for now; TODO: move it in appropriate header
// Apply diagonal matrix to vector:
//
template <typename IndexType_, typename ValueType_>
RAFT_KERNEL diagmv(IndexType_ n,
ValueType_ alpha,
const ValueType_* __restrict__ D,
const ValueType_* __restrict__ x,
ValueType_* __restrict__ y)
{
IndexType_ i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
y[i] += alpha * D[i] * x[i];
i += blockDim.x * gridDim.x;
}
}
// specifies type of algorithm used
// for SpMv:
//
enum struct sparse_mv_alg_t : int {
SPARSE_MV_UNDEFINED = -1,
SPARSE_MV_ALG_DEFAULT, // generic, for any sparse matrix
SPARSE_MV_ALG1, // typical for CSR
SPARSE_MV_ALG2 // may provide better performance for irregular sparse matrices
};
// Vector "view"-like aggregate for linear algebra purposes
//
template <typename value_type>
struct vector_view_t {
value_type* buffer_;
size_type size_;
vector_view_t(value_type* buffer, size_type sz) : buffer_(buffer), size_(sz) {}
vector_view_t(vector_view_t&& other) : buffer_(other.raw()), size_(other.size()) {}
vector_view_t& operator=(vector_view_t&& other)
{
buffer_ = other.raw();
size_ = other.size();
}
};
template <typename value_type>
class vector_t {
public:
vector_t(raft::resources const& raft_handle, size_type sz)
: buffer_(sz, resource::get_cuda_stream(raft_handle)),
thrust_policy(raft::resource::get_thrust_policy(raft_handle))
{
}
size_type size(void) const { return buffer_.size(); }
value_type* raw(void) { return buffer_.data(); }
value_type const* raw(void) const { return buffer_.data(); }
value_type nrm1() const
{
return thrust::reduce(thrust_policy,
buffer_.data(),
buffer_.data() + buffer_.size(),
value_type{0},
[] __device__(auto left, auto right) {
auto abs_left = left > 0 ? left : -left;
auto abs_right = right > 0 ? right : -right;
return abs_left + abs_right;
});
}
void fill(value_type value)
{
thrust::fill_n(thrust_policy, buffer_.data(), buffer_.size(), value);
}
private:
using thrust_exec_policy_t =
thrust::detail::execute_with_allocator<rmm::mr::thrust_allocator<char>,
thrust::cuda_cub::execute_on_stream_base>;
rmm::device_uvector<value_type> buffer_;
const thrust_exec_policy_t thrust_policy;
};
template <typename index_type, typename value_type>
struct sparse_matrix_t {
sparse_matrix_t(raft::resources const& raft_handle,
index_type const* row_offsets,
index_type const* col_indices,
value_type const* values,
index_type const nrows,
index_type const ncols,
index_type const nnz)
: handle_(raft_handle),
row_offsets_(row_offsets),
col_indices_(col_indices),
values_(values),
nrows_(nrows),
ncols_(ncols),
nnz_(nnz)
{
}
sparse_matrix_t(raft::resources const& raft_handle,
index_type const* row_offsets,
index_type const* col_indices,
value_type const* values,
index_type const nrows,
index_type const nnz)
: handle_(raft_handle),
row_offsets_(row_offsets),
col_indices_(col_indices),
values_(values),
nrows_(nrows),
ncols_(nrows),
nnz_(nnz)
{
}
template <typename CSRView>
sparse_matrix_t(raft::resources const& raft_handle, CSRView const& csr_view)
: handle_(raft_handle),
row_offsets_(csr_view.offsets),
col_indices_(csr_view.indices),
values_(csr_view.edge_data),
nrows_(csr_view.number_of_vertices),
ncols_(csr_view.number_of_vertices),
nnz_(csr_view.number_of_edges)
{
}
virtual ~sparse_matrix_t(void) =
default; // virtual because used as base for following matrix types
// y = alpha*A*x + beta*y
//(Note: removed const-ness of x, because CUDA 11 SpMV
// descriptor creation works with non-const, and const-casting
// down is dangerous)
//
virtual void mv(value_type alpha,
value_type* __restrict__ x,
value_type beta,
value_type* __restrict__ y,
sparse_mv_alg_t alg = sparse_mv_alg_t::SPARSE_MV_ALG1,
bool transpose = false,
bool symmetric = false) const
{
using namespace sparse;
RAFT_EXPECTS(x != nullptr, "Null x buffer.");
RAFT_EXPECTS(y != nullptr, "Null y buffer.");
auto cusparse_h = resource::get_cusparse_handle(handle_);
auto stream = resource::get_cuda_stream(handle_);
cusparseOperation_t trans = transpose ? CUSPARSE_OPERATION_TRANSPOSE : // transpose
CUSPARSE_OPERATION_NON_TRANSPOSE; // non-transpose
#if not defined CUDA_ENFORCE_LOWER and CUDA_VER_10_1_UP
auto size_x = transpose ? nrows_ : ncols_;
auto size_y = transpose ? ncols_ : nrows_;
cusparseSpMVAlg_t spmv_alg = translate_algorithm(alg);
// create descriptors:
//(below casts are necessary, because
// cusparseCreateCsr(...) takes non-const
// void*; the casts should be harmless)
//
cusparseSpMatDescr_t matA;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecreatecsr(&matA,
nrows_,
ncols_,
nnz_,
const_cast<index_type*>(row_offsets_),
const_cast<index_type*>(col_indices_),
const_cast<value_type*>(values_)));
cusparseDnVecDescr_t vecX;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecreatednvec(&vecX, size_x, x));
rmm::device_uvector<value_type> y_tmp(size_y, stream);
raft::copy(y_tmp.data(), y, size_y, stream);
cusparseDnVecDescr_t vecY;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecreatednvec(&vecY, size_y, y_tmp.data()));
// get (scratch) external device buffer size:
//
size_t bufferSize;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsespmv_buffersize(
cusparse_h, trans, &alpha, matA, vecX, &beta, vecY, spmv_alg, &bufferSize, stream));
// allocate external buffer:
//
vector_t<value_type> external_buffer(handle_, bufferSize);
// finally perform SpMV:
//
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsespmv(
cusparse_h, trans, &alpha, matA, vecX, &beta, vecY, spmv_alg, external_buffer.raw(), stream));
// FIXME: This is a workaround for a cusparse issue being encountered in CUDA 12
raft::copy(y, y_tmp.data(), size_y, stream);
// free descriptors:
//(TODO: maybe wrap them in a RAII struct?)
//
RAFT_CUSPARSE_TRY(cusparseDestroyDnVec(vecY));
RAFT_CUSPARSE_TRY(cusparseDestroyDnVec(vecX));
RAFT_CUSPARSE_TRY(cusparseDestroySpMat(matA));
#else
RAFT_CUSPARSE_TRY(
raft::sparse::detail::cusparsesetpointermode(cusparse_h, CUSPARSE_POINTER_MODE_HOST, stream));
cusparseMatDescr_t descr = 0;
RAFT_CUSPARSE_TRY(cusparseCreateMatDescr(&descr));
if (symmetric) {
RAFT_CUSPARSE_TRY(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_SYMMETRIC));
} else {
RAFT_CUSPARSE_TRY(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL));
}
RAFT_CUSPARSE_TRY(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO));
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecsrmv(cusparse_h,
trans,
nrows_,
ncols_,
nnz_,
&alpha,
descr,
values_,
row_offsets_,
col_indices_,
x,
&beta,
y,
stream));
RAFT_CUSPARSE_TRY(cusparseDestroyMatDescr(descr));
#endif
}
resources const& get_handle(void) const { return handle_; }
#if not defined CUDA_ENFORCE_LOWER and CUDA_VER_10_1_UP
cusparseSpMVAlg_t translate_algorithm(sparse_mv_alg_t alg) const
{
switch (alg) {
case sparse_mv_alg_t::SPARSE_MV_ALG1: return CUSPARSE_SPMV_CSR_ALG1;
case sparse_mv_alg_t::SPARSE_MV_ALG2: return CUSPARSE_SPMV_CSR_ALG2;
default: return CUSPARSE_SPMV_ALG_DEFAULT;
}
}
#endif
// private: // maybe not, keep this ASAPBNS ("as simple as possible, but not simpler"); hence,
// aggregate
raft::resources const& handle_;
index_type const* row_offsets_;
index_type const* col_indices_;
value_type const* values_;
index_type const nrows_;
index_type const ncols_;
index_type const nnz_;
};
template <typename index_type, typename value_type>
struct laplacian_matrix_t : sparse_matrix_t<index_type, value_type> {
laplacian_matrix_t(raft::resources const& raft_handle,
index_type const* row_offsets,
index_type const* col_indices,
value_type const* values,
index_type const nrows,
index_type const nnz)
: sparse_matrix_t<index_type, value_type>(
raft_handle, row_offsets, col_indices, values, nrows, nnz),
diagonal_(raft_handle, nrows)
{
vector_t<value_type> ones{raft_handle, nrows};
ones.fill(1.0);
sparse_matrix_t<index_type, value_type>::mv(1, ones.raw(), 0, diagonal_.raw());
}
laplacian_matrix_t(raft::resources const& raft_handle,
sparse_matrix_t<index_type, value_type> const& csr_m)
: sparse_matrix_t<index_type, value_type>(raft_handle,
csr_m.row_offsets_,
csr_m.col_indices_,
csr_m.values_,
csr_m.nrows_,
csr_m.nnz_),
diagonal_(raft_handle, csr_m.nrows_)
{
vector_t<value_type> ones{raft_handle, csr_m.nrows_};
ones.fill(1.0);
sparse_matrix_t<index_type, value_type>::mv(1, ones.raw(), 0, diagonal_.raw());
}
// y = alpha*A*x + beta*y
//
void mv(value_type alpha,
value_type* __restrict__ x,
value_type beta,
value_type* __restrict__ y,
sparse_mv_alg_t alg = sparse_mv_alg_t::SPARSE_MV_ALG1,
bool transpose = false,
bool symmetric = false) const override
{
constexpr int BLOCK_SIZE = 1024;
auto n = sparse_matrix_t<index_type, value_type>::nrows_;
auto handle = sparse_matrix_t<index_type, value_type>::get_handle();
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// scales y by beta:
//
if (beta == 0) {
RAFT_CUDA_TRY(cudaMemsetAsync(y, 0, n * sizeof(value_type), stream));
} else if (beta != 1) {
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasscal(cublas_h, n, &beta, y, 1, stream));
}
// Apply diagonal matrix
//
dim3 gridDim{std::min<unsigned int>((n + BLOCK_SIZE - 1) / BLOCK_SIZE, 65535), 1, 1};
dim3 blockDim{BLOCK_SIZE, 1, 1};
diagmv<<<gridDim, blockDim, 0, stream>>>(n, alpha, diagonal_.raw(), x, y);
RAFT_CHECK_CUDA(stream);
// Apply adjacency matrix
//
sparse_matrix_t<index_type, value_type>::mv(-alpha, x, 1, y, alg, transpose, symmetric);
}
vector_t<value_type> diagonal_;
};
template <typename index_type, typename value_type>
struct modularity_matrix_t : laplacian_matrix_t<index_type, value_type> {
modularity_matrix_t(raft::resources const& raft_handle,
index_type const* row_offsets,
index_type const* col_indices,
value_type const* values,
index_type const nrows,
index_type const nnz)
: laplacian_matrix_t<index_type, value_type>(
raft_handle, row_offsets, col_indices, values, nrows, nnz)
{
edge_sum_ = laplacian_matrix_t<index_type, value_type>::diagonal_.nrm1();
}
modularity_matrix_t(raft::resources const& raft_handle,
sparse_matrix_t<index_type, value_type> const& csr_m)
: laplacian_matrix_t<index_type, value_type>(raft_handle, csr_m)
{
edge_sum_ = laplacian_matrix_t<index_type, value_type>::diagonal_.nrm1();
}
// y = alpha*A*x + beta*y
//
void mv(value_type alpha,
value_type* __restrict__ x,
value_type beta,
value_type* __restrict__ y,
sparse_mv_alg_t alg = sparse_mv_alg_t::SPARSE_MV_ALG1,
bool transpose = false,
bool symmetric = false) const override
{
auto n = sparse_matrix_t<index_type, value_type>::nrows_;
auto handle = sparse_matrix_t<index_type, value_type>::get_handle();
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// y = A*x
//
sparse_matrix_t<index_type, value_type>::mv(alpha, x, 0, y, alg, transpose, symmetric);
value_type dot_res;
// gamma = d'*x
//
// Cublas::dot(this->n, D.raw(), 1, x, 1, &dot_res);
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasdot(cublas_h,
n,
laplacian_matrix_t<index_type, value_type>::diagonal_.raw(),
1,
x,
1,
&dot_res,
stream));
// y = y -(gamma/edge_sum)*d
//
value_type gamma_ = -dot_res / edge_sum_;
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasaxpy(cublas_h,
n,
&gamma_,
laplacian_matrix_t<index_type, value_type>::diagonal_.raw(),
1,
y,
1,
stream));
}
value_type edge_sum_;
};
} // namespace detail
} // namespace matrix
} // namespace spectral
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral | rapidsai_public_repos/cuvs/cpp/include/cuvs/spectral/detail/modularity_maximization.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <math.h>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <stdio.h>
#include <cuda.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <tuple>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/spectral/cluster_solvers.cuh>
#include <raft/spectral/detail/spectral_util.cuh>
#include <raft/spectral/eigen_solvers.cuh>
#include <raft/spectral/matrix_wrappers.hpp>
namespace cuvs {
namespace spectral {
namespace detail {
// =========================================================
// Spectral modularity_maximization
// =========================================================
/** Compute partition for a weighted undirected graph. This
* partition attempts to minimize the cost function:
* Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition)
*
* @param G Weighted graph in CSR format
* @param nClusters Number of partitions.
* @param nEigVecs Number of eigenvectors to compute.
* @param maxIter_lanczos Maximum number of Lanczos iterations.
* @param restartIter_lanczos Maximum size of Lanczos system before
* implicit restart.
* @param tol_lanczos Convergence tolerance for Lanczos method.
* @param maxIter_kmeans Maximum number of k-means iterations.
* @param tol_kmeans Convergence tolerance for k-means algorithm.
* @param clusters (Output, device memory, n entries) Cluster
* assignments.
* @param iters_lanczos On exit, number of Lanczos iterations
* performed.
* @param iters_kmeans On exit, number of k-means iterations
* performed.
* @return error flag.
*/
template <typename vertex_t, typename weight_t, typename EigenSolver, typename ClusterSolver>
std::tuple<vertex_t, weight_t, vertex_t> modularity_maximization(
raft::resources const& handle,
raft::spectral::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
EigenSolver const& eigen_solver,
ClusterSolver const& cluster_solver,
vertex_t* __restrict__ clusters,
weight_t* eigVals,
weight_t* eigVecs)
{
RAFT_EXPECTS(clusters != nullptr, "Null clusters buffer.");
RAFT_EXPECTS(eigVals != nullptr, "Null eigVals buffer.");
RAFT_EXPECTS(eigVecs != nullptr, "Null eigVecs buffer.");
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
std::tuple<vertex_t, weight_t, vertex_t>
stats; // # iters eigen solver, cluster solver residual, # iters cluster solver
vertex_t n = csr_m.nrows_;
// Compute eigenvectors of Modularity Matrix
// Initialize Modularity Matrix
raft::spectral::matrix::modularity_matrix_t<vertex_t, weight_t> B{handle, csr_m};
auto eigen_config = eigen_solver.get_config();
auto nEigVecs = eigen_config.n_eigVecs;
// Compute eigenvectors corresponding to largest eigenvalues
std::get<0>(stats) = eigen_solver.solve_largest_eigenvectors(handle, B, eigVals, eigVecs);
// Whiten eigenvector matrix
transform_eigen_matrix(handle, n, nEigVecs, eigVecs);
// notice that at this point the matrix has already been transposed, so we are scaling
// columns
scale_obs(nEigVecs, n, eigVecs);
RAFT_CHECK_CUDA(stream);
// Find partition clustering
auto pair_cluster = cluster_solver.solve(handle, n, nEigVecs, eigVecs, clusters);
std::get<1>(stats) = pair_cluster.first;
std::get<2>(stats) = pair_cluster.second;
return stats;
}
//===================================================
// Analysis of graph partition
// =========================================================
/// Compute modularity
/** This function determines the modularity based on a graph and cluster assignments
* @param G Weighted graph in CSR format
* @param nClusters Number of clusters.
* @param clusters (Input, device memory, n entries) Cluster assignments.
* @param modularity On exit, modularity
*/
template <typename vertex_t, typename weight_t>
void analyzeModularity(raft::resources const& handle,
raft::spectral::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
vertex_t nClusters,
vertex_t const* __restrict__ clusters,
weight_t& modularity)
{
RAFT_EXPECTS(clusters != nullptr, "Null clusters buffer.");
vertex_t i;
vertex_t n = csr_m.nrows_;
weight_t partModularity, clustersize;
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// Device memory
raft::spectral::matrix::vector_t<weight_t> part_i(handle, n);
raft::spectral::matrix::vector_t<weight_t> Bx(handle, n);
// Initialize cuBLAS
RAFT_CUBLAS_TRY(linalg::detail::cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// Initialize Modularity
raft::spectral::matrix::modularity_matrix_t<vertex_t, weight_t> B{handle, csr_m};
// Initialize output
modularity = 0;
// Iterate through partitions
for (i = 0; i < nClusters; ++i) {
if (!construct_indicator(handle, i, n, clustersize, partModularity, clusters, part_i, Bx, B)) {
WARNING("empty partition");
continue;
}
// Record results
modularity += partModularity;
}
modularity = modularity / B.diagonal_.nrm1();
}
} // namespace detail
} // namespace spectral
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/distance.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "distance-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "distance-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/distance_types.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace cuvs::distance {
/** enum to tell how to compute distance */
enum DistanceType : unsigned short {
/** evaluate as dist_ij = sum(x_ik^2) + sum(y_ij)^2 - 2*sum(x_ik * y_jk) */
L2Expanded = 0,
/** same as above, but inside the epilogue, perform square root operation */
L2SqrtExpanded = 1,
/** cosine distance */
CosineExpanded = 2,
/** L1 distance */
L1 = 3,
/** evaluate as dist_ij += (x_ik - y-jk)^2 */
L2Unexpanded = 4,
/** same as above, but inside the epilogue, perform square root operation */
L2SqrtUnexpanded = 5,
/** basic inner product **/
InnerProduct = 6,
/** Chebyshev (Linf) distance **/
Linf = 7,
/** Canberra distance **/
Canberra = 8,
/** Generalized Minkowski distance **/
LpUnexpanded = 9,
/** Correlation distance **/
CorrelationExpanded = 10,
/** Jaccard distance **/
JaccardExpanded = 11,
/** Hellinger distance **/
HellingerExpanded = 12,
/** Haversine distance **/
Haversine = 13,
/** Bray-Curtis distance **/
BrayCurtis = 14,
/** Jensen-Shannon distance**/
JensenShannon = 15,
/** Hamming distance **/
HammingUnexpanded = 16,
/** KLDivergence **/
KLDivergence = 17,
/** RusselRao **/
RusselRaoExpanded = 18,
/** Dice-Sorensen distance **/
DiceExpanded = 19,
/** Precomputed (special value) **/
Precomputed = 100
};
/**
* Whether minimal distance corresponds to similar elements (using the given metric).
*/
inline bool is_min_close(DistanceType metric)
{
bool select_min;
switch (metric) {
case DistanceType::InnerProduct:
// Similarity metrics have the opposite meaning, i.e. nearest neighbors are those with larger
// similarity (See the same logic at cpp/include/raft/sparse/spatial/detail/knn.cuh:362
// {perform_k_selection})
select_min = false;
break;
default: select_min = true;
}
return select_min;
}
namespace kernels {
enum KernelType { LINEAR, POLYNOMIAL, RBF, TANH };
/**
* Parameters for kernel matrices.
* The following kernels are implemented:
* - LINEAR \f[ K(x_1,x_2) = <x_1,x_2>, \f] where \f$< , >\f$ is the dot product
* - POLYNOMIAL \f[ K(x_1, x_2) = (\gamma <x_1,x_2> + \mathrm{coef0})^\mathrm{degree} \f]
* - RBF \f[ K(x_1, x_2) = \exp(- \gamma |x_1-x_2|^2) \f]
* - TANH \f[ K(x_1, x_2) = \tanh(\gamma <x_1,x_2> + \mathrm{coef0}) \f]
*/
struct KernelParams {
// Kernel function parameters
KernelType kernel; //!< Type of the kernel function
int degree; //!< Degree of polynomial kernel (ignored by others)
double gamma; //!< multiplier in the
double coef0; //!< additive constant in poly and tanh kernels
};
} // end namespace kernels
}; // namespace cuvs::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/distance-inl.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/detail/distance.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <rmm/device_uvector.hpp>
#include <type_traits>
#include <raft/core/device_mdspan.hpp>
namespace cuvs {
namespace distance {
/**
* @defgroup pairwise_distance pointer-based pairwise distance prims
* @{
*/
/**
* @brief Evaluate pairwise distances with the user epilogue lamba allowed
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam FinalLambda user-defined epilogue lamba
* @tparam IdxT Index type
* @param handle raft handle for managing expensive resources
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param workspace temporary workspace needed for computations
* @param worksize number of bytes of the workspace
* @param fin_op the final gemm epilogue lambda
* @param isRowMajor whether the matrices are row-major or col-major
* @param metric_arg metric argument (used for Minkowski distance)
*
* @note fin_op: This is a device lambda which is supposed to operate upon the
* input which is AccT and returns the output in OutT. It's signature is
* as follows: <pre>OutT fin_op(AccT in, int g_idx);</pre>. If one needs
* any other parameters, feel free to pass them via closure.
*/
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename FinalLambda,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
size_t worksize,
FinalLambda fin_op,
bool isRowMajor = true,
DataT metric_arg = 2.0f)
{
detail::distance<DistT, DataT, AccT, OutT, FinalLambda, IdxT>(
handle, x, y, dist, m, n, k, workspace, worksize, fin_op, isRowMajor, metric_arg);
}
/**
* @brief Evaluate pairwise distances for the simple use case
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam IdxT Index type
* @param handle raft handle for managing expensive resources
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param workspace temporary workspace needed for computations
* @param worksize number of bytes of the workspace
* @param isRowMajor whether the matrices are row-major or col-major
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
size_t worksize,
bool isRowMajor = true,
DataT metric_arg = 2.0f)
{
detail::distance<DistT, DataT, AccT, OutT, IdxT>(
handle, x, y, dist, m, n, k, workspace, worksize, isRowMajor, metric_arg);
}
/**
* @brief Return the exact workspace size to compute the distance
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam IdxT Index type
* @param x first set of points
* @param y second set of points
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
*
* @note If the specified DistT doesn't need the workspace at all, it
* returns 0.
*/
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
size_t getWorkspaceSize(const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k)
{
return detail::getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>(x, y, m, n, k);
}
/**
* @brief Return the exact workspace size to compute the distance
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam IdxT Index type
* @param x first set of points (size m*k)
* @param y second set of points (size n*k)
* @return number of bytes needed in workspace
*
* @note If the specified DistT doesn't need the workspace at all, it
* returns 0.
*/
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int,
typename layout>
size_t getWorkspaceSize(raft::device_matrix_view<DataT, IdxT, layout> const& x,
raft::device_matrix_view<DataT, IdxT, layout> const& y)
{
RAFT_EXPECTS(x.extent(1) == y.extent(1), "Number of columns must be equal.");
return getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>(
x.data_handle(), y.data_handle(), x.extent(0), y.extent(0), x.extent(1));
}
/**
* @brief Evaluate pairwise distances for the simple use case
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam IdxT Index type
* @param handle raft handle for managing expensive resources
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param isRowMajor whether the matrices are row-major or col-major
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
bool isRowMajor = true,
DataT metric_arg = 2.0f)
{
auto stream = raft::resource::get_cuda_stream(handle);
rmm::device_uvector<char> workspace(0, stream);
auto worksize = getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>(x, y, m, n, k);
workspace.resize(worksize, stream);
detail::distance<DistT, DataT, AccT, OutT, IdxT>(
handle, x, y, dist, m, n, k, workspace.data(), worksize, isRowMajor, metric_arg);
}
/**
* @brief Convenience wrapper around 'distance' prim to convert runtime metric
* into compile time for the purpose of dispatch
* @tparam Type input/accumulation/output data-type
* @tparam IdxT indexing type
* @param handle raft handle for managing expensive resources
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param workspace temporary workspace buffer which can get resized as per the
* needed workspace size
* @param metric distance metric
* @param isRowMajor whether the matrices are row-major or col-major
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <typename Type, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
const Type* x,
const Type* y,
Type* dist,
IdxT m,
IdxT n,
IdxT k,
rmm::device_uvector<char>& workspace,
cuvs::distance::DistanceType metric,
bool isRowMajor = true,
Type metric_arg = 2.0f)
{
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
auto dispatch = [&](auto distance_type) {
auto worksize = getWorkspaceSize<distance_type(), Type, Type, Type, IdxT>(x, y, m, n, k);
workspace.resize(worksize, stream);
detail::distance<distance_type(), Type, Type, Type, IdxT>(
handle, x, y, dist, m, n, k, workspace.data(), worksize, isRowMajor, metric_arg);
};
switch (metric) {
case DistanceType::Canberra:
dispatch(std::integral_constant<DistanceType, DistanceType::Canberra>{});
break;
case DistanceType::CorrelationExpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::CorrelationExpanded>{});
break;
case DistanceType::CosineExpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::CosineExpanded>{});
break;
case DistanceType::HammingUnexpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::HammingUnexpanded>{});
break;
case DistanceType::HellingerExpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::HellingerExpanded>{});
break;
case cuvs::distance::DistanceType::InnerProduct:
dispatch(std::integral_constant<DistanceType, DistanceType::InnerProduct>{});
break;
case DistanceType::JensenShannon:
dispatch(std::integral_constant<DistanceType, DistanceType::JensenShannon>{});
break;
case DistanceType::KLDivergence:
dispatch(std::integral_constant<DistanceType, DistanceType::KLDivergence>{});
break;
case DistanceType::L1:
dispatch(std::integral_constant<DistanceType, DistanceType::L1>{});
break;
case DistanceType::L2Expanded:
dispatch(std::integral_constant<DistanceType, DistanceType::L2Expanded>{});
break;
case DistanceType::L2SqrtExpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::L2SqrtExpanded>{});
break;
case DistanceType::L2SqrtUnexpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::L2SqrtUnexpanded>{});
break;
case DistanceType::L2Unexpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::L2Unexpanded>{});
break;
case DistanceType::Linf:
dispatch(std::integral_constant<DistanceType, DistanceType::Linf>{});
break;
case DistanceType::LpUnexpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::LpUnexpanded>{});
break;
case DistanceType::RusselRaoExpanded:
dispatch(std::integral_constant<DistanceType, DistanceType::RusselRaoExpanded>{});
break;
default: THROW("Unknown or unsupported distance metric '%d'!", (int)metric);
};
}
/**
* @brief Convenience wrapper around 'distance' prim to convert runtime metric
* into compile time for the purpose of dispatch
* @tparam Type input/accumulation/output data-type
* @tparam IdxT indexing type
* @param handle raft handle for managing expensive resources
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param metric distance metric
* @param isRowMajor whether the matrices are row-major or col-major
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <typename Type, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
const Type* x,
const Type* y,
Type* dist,
IdxT m,
IdxT n,
IdxT k,
cuvs::distance::DistanceType metric,
bool isRowMajor = true,
Type metric_arg = 2.0f)
{
auto stream = raft::resource::get_cuda_stream(handle);
rmm::device_uvector<char> workspace(0, stream);
pairwise_distance<Type, IdxT>(
handle, x, y, dist, m, n, k, workspace, metric, isRowMajor, metric_arg);
}
/** @} */
/**
* \defgroup distance_mdspan Pairwise distance functions
* @{
*/
/**
* @brief Evaluate pairwise distances for the simple use case.
*
* Note: Only contiguous row- or column-major layouts supported currently.
*
* Usage example:
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_mdarray.hpp>
* #include <raft/random/make_blobs.cuh>
* #include <cuvs/distance/distance.cuh>
*
* raft::raft::resources handle;
* int n_samples = 5000;
* int n_features = 50;
*
* auto input = raft::make_device_matrix<float>(handle, n_samples, n_features);
* auto labels = raft::make_device_vector<int>(handle, n_samples);
* auto output = raft::make_device_matrix<float>(handle, n_samples, n_samples);
*
* raft::random::make_blobs(handle, input.view(), labels.view());
* auto metric = cuvs::distance::DistanceType::L2SqrtExpanded;
* cuvs::distance::pairwise_distance(handle, input.view(), input.view(), output.view(), metric);
* @endcode
*
* @tparam DistanceType which distance to evaluate
* @tparam DataT input argument type
* @tparam AccT accumulation type
* @tparam OutT output type
* @tparam IdxT Index type
* @param handle raft handle for managing expensive resources
* @param x first set of points (size n*k)
* @param y second set of points (size m*k)
* @param dist output distance matrix (size n*m)
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename layout = raft::layout_c_contiguous,
typename IdxT = int>
void distance(raft::resources const& handle,
raft::device_matrix_view<DataT, IdxT, layout> const x,
raft::device_matrix_view<DataT, IdxT, layout> const y,
raft::device_matrix_view<OutT, IdxT, layout> dist,
DataT metric_arg = 2.0f)
{
RAFT_EXPECTS(x.extent(1) == y.extent(1), "Number of columns must be equal.");
RAFT_EXPECTS(dist.extent(0) == x.extent(0),
"Number of rows in output must be equal to "
"number of rows in X");
RAFT_EXPECTS(dist.extent(1) == y.extent(0),
"Number of columns in output must be equal to "
"number of rows in Y");
RAFT_EXPECTS(x.is_exhaustive(), "Input x must be contiguous.");
RAFT_EXPECTS(y.is_exhaustive(), "Input y must be contiguous.");
constexpr auto is_rowmajor = std::is_same_v<layout, raft::layout_c_contiguous>;
distance<DistT, DataT, AccT, OutT, IdxT>(handle,
x.data_handle(),
y.data_handle(),
dist.data_handle(),
x.extent(0),
y.extent(0),
x.extent(1),
is_rowmajor,
metric_arg);
}
/**
* @brief Convenience wrapper around 'distance' prim to convert runtime metric
* into compile time for the purpose of dispatch
* @tparam Type input/accumulation/output data-type
* @tparam IdxT indexing type
* @param handle raft handle for managing expensive resources
* @param x first matrix of points (size mxk)
* @param y second matrix of points (size nxk)
* @param dist output distance matrix (size mxn)
* @param metric distance metric
* @param metric_arg metric argument (used for Minkowski distance)
*/
template <typename Type, typename layout = raft::layout_c_contiguous, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
raft::device_matrix_view<Type, IdxT, layout> const x,
raft::device_matrix_view<Type, IdxT, layout> const y,
raft::device_matrix_view<Type, IdxT, layout> dist,
cuvs::distance::DistanceType metric,
Type metric_arg = 2.0f)
{
RAFT_EXPECTS(x.extent(1) == y.extent(1), "Number of columns must be equal.");
RAFT_EXPECTS(dist.extent(0) == x.extent(0),
"Number of rows in output must be equal to "
"number of rows in X");
RAFT_EXPECTS(dist.extent(1) == y.extent(0),
"Number of columns in output must be equal to "
"number of rows in Y");
RAFT_EXPECTS(x.is_exhaustive(), "Input x must be contiguous.");
RAFT_EXPECTS(y.is_exhaustive(), "Input y must be contiguous.");
RAFT_EXPECTS(dist.is_exhaustive(), "Output must be contiguous.");
constexpr auto rowmajor = std::is_same_v<layout, raft::layout_c_contiguous>;
auto stream = raft::resource::get_cuda_stream(handle);
rmm::device_uvector<char> workspace(0, stream);
pairwise_distance(handle,
x.data_handle(),
y.data_handle(),
dist.data_handle(),
x.extent(0),
y.extent(0),
x.extent(1),
metric,
rowmajor,
metric_arg);
}
/** @} */
}; // namespace distance
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/fused_l2_nn.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "fused_l2_nn-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "fused_l2_nn-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/fused_l2_nn_helpers.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/detail/fused_l2_nn.cuh>
#include <raft/core/resource/cuda_stream.hpp>
namespace cuvs::distance {
/**
* \defgroup fused_l2_nn Fused 1-nearest neighbors
* @{
*/
template <typename LabelT, typename DataT>
using KVPMinReduce = detail::KVPMinReduceImpl<LabelT, DataT>;
template <typename LabelT, typename DataT>
using MinAndDistanceReduceOp = detail::MinAndDistanceReduceOpImpl<LabelT, DataT>;
template <typename LabelT, typename DataT>
using MinReduceOp = detail::MinReduceOpImpl<LabelT, DataT>;
/** @} */
/**
* Initialize array using init value from reduction op
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT>
void initialize(raft::resources const& handle, OutT* min, IdxT m, DataT maxVal, ReduceOpT redOp)
{
detail::initialize<DataT, OutT, IdxT, ReduceOpT>(
min, m, maxVal, redOp, resource::get_cuda_stream(handle));
}
} // namespace cuvs::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/fused_l2_nn-ext.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // int64_t
#include <cuvs/distance/fused_l2_nn_helpers.cuh> // include initialize and reduce operations
#include <raft/core/kvp.hpp> // raft::KeyValuePair
#include <raft/core/resources.hpp> // raft::resources
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace cuvs {
namespace distance {
template <typename DataT, typename OutT, typename IdxT>
void fusedL2NNMinReduce(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
bool sqrt,
bool initOutBuffer,
cudaStream_t stream) RAFT_EXPLICIT;
} // namespace distance
} // namespace cuvs
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_distance_fusedL2NNMinReduce(DataT, OutT, IdxT) \
extern template void cuvs::distance::fusedL2NNMinReduce<DataT, OutT, IdxT>(OutT * min, \
const DataT* x, \
const DataT* y, \
const DataT* xn, \
const DataT* yn, \
IdxT m, \
IdxT n, \
IdxT k, \
void* workspace, \
bool sqrt, \
bool initOutBuffer, \
cudaStream_t stream)
instantiate_raft_distance_fusedL2NNMinReduce(double, double, int);
instantiate_raft_distance_fusedL2NNMinReduce(double, double, int64_t);
instantiate_raft_distance_fusedL2NNMinReduce(float, float, int);
instantiate_raft_distance_fusedL2NNMinReduce(float, float, int64_t);
// We can't have comma's in the macro expansion, so we use the COMMA macro:
#define COMMA ,
instantiate_raft_distance_fusedL2NNMinReduce(double, raft::KeyValuePair<int COMMA double>, int);
instantiate_raft_distance_fusedL2NNMinReduce(double,
raft::KeyValuePair<int64_t COMMA double>,
int64_t);
instantiate_raft_distance_fusedL2NNMinReduce(float, raft::KeyValuePair<int COMMA float>, int);
instantiate_raft_distance_fusedL2NNMinReduce(float,
raft::KeyValuePair<int64_t COMMA float>,
int64_t);
#undef COMMA
#undef instantiate_raft_distance_fusedL2NNMinReduce
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/masked_nn.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MASKED_L2_NN_H
#define __MASKED_L2_NN_H
#pragma once
#include <cuvs/distance/detail/masked_nn.cuh>
#include <cuvs/distance/fused_l2_nn.cuh>
#include <limits>
#include <raft/core/handle.hpp>
#include <raft/util/cuda_utils.cuh>
#include <stdint.h>
namespace cuvs {
namespace distance {
/**
* \defgroup masked_nn Masked 1-nearest neighbors
* @{
*/
/**
* @brief Parameter struct for masked_l2_nn function
*
* @tparam ReduceOpT Type of reduction operator in the epilogue.
* @tparam KVPReduceOpT Type of Reduction operation on key value pairs.
*
* Usage example:
* @code{.cpp}
* #include <cuvs/distance/masked_nn.cuh>
*
* using IdxT = int;
* using DataT = float;
* using RedOpT = cuvs::distance::MinAndDistanceReduceOp<IdxT, DataT>;
* using PairRedOpT = cuvs::distance::KVPMinReduce<IdxT, DataT>;
* using ParamT = cuvs::distance::masked_l2_nn_params<RedOpT, PairRedOpT>;
*
* bool init_out = true;
* bool sqrt = false;
*
* ParamT masked_l2_params{RedOpT{}, PairRedOpT{}, sqrt, init_out};
* @endcode
*
* Prescribes how to reduce a distance to an intermediate type (`redOp`), and
* how to reduce two intermediate types (`pairRedOp`). Typically, a distance is
* mapped to an (index, value) pair and (index, value) pair with the lowest
* value (distance) is selected.
*
* In addition, prescribes whether to compute the square root of the distance
* (`sqrt`) and whether to initialize the output buffer (`initOutBuffer`).
*/
template <typename ReduceOpT, typename KVPReduceOpT>
struct masked_l2_nn_params {
/** Reduction operator in the epilogue */
ReduceOpT redOp;
/** Reduction operation on key value pairs */
KVPReduceOpT pairRedOp;
/** Whether the output `minDist` should contain L2-sqrt */
bool sqrt;
/** Whether to initialize the output buffer before the main kernel launch */
bool initOutBuffer;
};
/**
* @brief Masked L2 distance and 1-nearest-neighbor computation in a single call.
*
* This function enables faster computation of nearest neighbors if the
* computation of distances between certain point pairs can be skipped.
*
* We use an adjacency matrix that describes which distances to calculate. The
* points in `y` are divided into groups, and the adjacency matrix indicates
* whether to compute distances between points in `x` and groups in `y`. In other
* words, if `adj[i,k]` is true then distance between point `x_i`, and points in
* `group_k` will be calculated.
*
* **Performance considerations**
*
* The points in `x` are processed in tiles of `M` points (`M` is currently 64,
* but may change in the future). As a result, the largest compute time
* reduction occurs if all `M` points can skip a group. If only part of the `M`
* points can skip a group, then at most a minor compute time reduction and a
* modest energy use reduction can be expected.
*
* The points in `y` are also grouped into tiles of `N` points (`N` is currently
* 64, but may change in the future). As a result, group sizes should be larger
* than `N` to avoid wasting computational resources. If the group sizes are
* evenly divisible by `N`, then the computation is most efficient, although for
* larger group sizes this effect is minor.
*
*
* **Comparison to SDDM**
*
* [SDDMM](https://ieeexplore.ieee.org/document/8638042) (sampled dense-dense
* matrix multiplication) is a matrix-matrix multiplication where only part of
* the output is computed. Compared to masked_l2_nn, there are a few differences:
*
* - The output of masked_l2_nn is a single vector (of nearest neighbors) and not
* a sparse matrix.
*
* - The sampling in masked_l2_nn is expressed through intermediate "groups"
rather than a CSR format.
*
* @tparam DataT data type
* @tparam OutT output type to either store 1-NN indices and their minimum
* distances or store only the min distances. Accordingly, one
* has to pass an appropriate `ReduceOpT`
* @tparam IdxT indexing arithmetic type
* @tparam ReduceOpT A struct to perform the final needed reduction operation
* and also to initialize the output array elements with the
* appropriate initial value needed for reduction.
*
* @param handle RAFT handle for managing expensive resources
* @param params Parameter struct specifying the reduction operations.
* @param[in] x First matrix. Row major. Dim = `m x k`.
* (on device).
* @param[in] y Second matrix. Row major. Dim = `n x k`.
* (on device).
* @param[in] x_norm L2 squared norm of `x`. Length = `m`. (on device).
* @param[in] y_norm L2 squared norm of `y`. Length = `n`. (on device)
* @param[in] adj A boolean adjacency matrix indicating for each
* row of `x` and each group in `y` whether to compute the
* distance. Dim = `m x num_groups`.
* @param[in] group_idxs An array containing the *end* indices of each group
* in `y`. The value of group_idxs[j] indicates the
* start of group j + 1, i.e., it is the inclusive
* scan of the group lengths. The first group is
* always assumed to start at index 0 and the last
* group typically ends at index `n`. Length =
* `num_groups`.
* @param[out] out will contain the reduced output (Length = `m`)
* (on device)
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT, typename KVPReduceOpT>
void masked_l2_nn(raft::resources const& handle,
cuvs::distance::masked_l2_nn_params<ReduceOpT, KVPReduceOpT> params,
raft::device_matrix_view<const DataT, IdxT, raft::layout_c_contiguous> x,
raft::device_matrix_view<const DataT, IdxT, raft::layout_c_contiguous> y,
raft::device_vector_view<const DataT, IdxT, raft::layout_c_contiguous> x_norm,
raft::device_vector_view<const DataT, IdxT, raft::layout_c_contiguous> y_norm,
raft::device_matrix_view<const bool, IdxT, raft::layout_c_contiguous> adj,
raft::device_vector_view<const IdxT, IdxT, raft::layout_c_contiguous> group_idxs,
raft::device_vector_view<OutT, IdxT, raft::layout_c_contiguous> out)
{
IdxT m = x.extent(0);
IdxT n = y.extent(0);
IdxT k = x.extent(1);
IdxT num_groups = group_idxs.extent(0);
// Match k dimension of x, y
RAFT_EXPECTS(x.extent(1) == y.extent(1), "Dimension of vectors in x and y must be equal.");
// Match x, x_norm and y, y_norm
RAFT_EXPECTS(m == x_norm.extent(0), "Length of `x_norm` must match input `x`.");
RAFT_EXPECTS(n == y_norm.extent(0), "Length of `y_norm` must match input `y` ");
// Match adj to x and group_idxs
RAFT_EXPECTS(m == adj.extent(0), "#rows in `adj` must match input `x`.");
RAFT_EXPECTS(num_groups == adj.extent(1), "#cols in `adj` must match length of `group_idxs`.");
// NOTE: We do not check if all indices in group_idxs actually points *inside* y.
// If there is no work to be done, return immediately.
if (m == 0 || n == 0 || k == 0 || num_groups == 0) { return; }
detail::masked_l2_nn_impl<DataT, OutT, IdxT, ReduceOpT>(handle,
out.data_handle(),
x.data_handle(),
y.data_handle(),
x_norm.data_handle(),
y_norm.data_handle(),
adj.data_handle(),
group_idxs.data_handle(),
num_groups,
m,
n,
k,
params.redOp,
params.pairRedOp,
params.sqrt,
params.initOutBuffer);
}
/** @} */
} // namespace distance
} // namespace cuvs
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/distance-ext.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/detail/kernels/rbf_fin_op.cuh> // rbf_fin_op
#include <cuvs/distance/distance_types.hpp> // cuvs::distance::DistanceType
#include <raft/core/device_mdspan.hpp> // raft::device_matrix_view
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/core/resources.hpp> // raft::resources
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#include <rmm/device_uvector.hpp> // rmm::device_uvector
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace cuvs {
namespace distance {
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename FinalLambda,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
size_t worksize,
FinalLambda fin_op,
bool isRowMajor = true,
DataT metric_arg = 2.0f) RAFT_EXPLICIT;
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
size_t worksize,
bool isRowMajor = true,
DataT metric_arg = 2.0f) RAFT_EXPLICIT;
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
size_t getWorkspaceSize(const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k) RAFT_EXPLICIT;
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int,
typename layout>
size_t getWorkspaceSize(raft::device_matrix_view<DataT, IdxT, layout> const& x,
raft::device_matrix_view<DataT, IdxT, layout> const& y) RAFT_EXPLICIT;
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename IdxT = int>
void distance(raft::resources const& handle,
const DataT* x,
const DataT* y,
OutT* dist,
IdxT m,
IdxT n,
IdxT k,
bool isRowMajor = true,
DataT metric_arg = 2.0f) RAFT_EXPLICIT;
template <typename Type, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
const Type* x,
const Type* y,
Type* dist,
IdxT m,
IdxT n,
IdxT k,
rmm::device_uvector<char>& workspace,
cuvs::distance::DistanceType metric,
bool isRowMajor = true,
Type metric_arg = 2.0f) RAFT_EXPLICIT;
template <typename Type, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
const Type* x,
const Type* y,
Type* dist,
IdxT m,
IdxT n,
IdxT k,
cuvs::distance::DistanceType metric,
bool isRowMajor = true,
Type metric_arg = 2.0f) RAFT_EXPLICIT;
template <cuvs::distance::DistanceType DistT,
typename DataT,
typename AccT,
typename OutT,
typename layout = raft::layout_c_contiguous,
typename IdxT = int>
void distance(raft::resources const& handle,
raft::device_matrix_view<DataT, IdxT, layout> const x,
raft::device_matrix_view<DataT, IdxT, layout> const y,
raft::device_matrix_view<OutT, IdxT, layout> dist,
DataT metric_arg = 2.0f) RAFT_EXPLICIT;
template <typename Type, typename layout = raft::layout_c_contiguous, typename IdxT = int>
void pairwise_distance(raft::resources const& handle,
device_matrix_view<Type, IdxT, layout> const x,
device_matrix_view<Type, IdxT, layout> const y,
device_matrix_view<Type, IdxT, layout> dist,
cuvs::distance::DistanceType metric,
Type metric_arg = 2.0f) RAFT_EXPLICIT;
}; // namespace distance
}; // namespace cuvs
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
/*
* Hierarchy of instantiations:
*
* This file defines the extern template instantiations for the public API of
* cuvs::distance. To improve compile times, the extern template instantiation
* of the distance kernels is handled in
* distance/detail/pairwise_matrix/dispatch-ext.cuh.
*
* After adding an instance here, make sure to also add the instance to
* dispatch-ext.cuh and the corresponding .cu files.
*/
#define instantiate_raft_distance_distance(DT, DataT, AccT, OutT, FinalLambda, IdxT) \
extern template void cuvs::distance::distance<DT, DataT, AccT, OutT, FinalLambda, IdxT>( \
raft::resources const& handle, \
const DataT* x, \
const DataT* y, \
OutT* dist, \
IdxT m, \
IdxT n, \
IdxT k, \
void* workspace, \
size_t worksize, \
FinalLambda fin_op, \
bool isRowMajor, \
DataT metric_arg)
// The following two instances are used in test/distance/gram.cu. Note the use
// of int64_t for the index type.
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2Unexpanded,
float,
float,
float,
cuvs::distance::kernels::detail::rbf_fin_op<float>,
int64_t);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2Unexpanded,
double,
double,
double,
cuvs::distance::kernels::detail::rbf_fin_op<double>,
int64_t);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Canberra, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Canberra, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CorrelationExpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::CorrelationExpanded,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CosineExpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CosineExpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HammingUnexpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HammingUnexpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HellingerExpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HellingerExpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::InnerProduct, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::InnerProduct, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::JensenShannon, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::JensenShannon, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::KLDivergence, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::KLDivergence, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L1, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L1, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Expanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Expanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtExpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtExpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtUnexpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtUnexpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Unexpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Unexpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Linf, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Linf, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::LpUnexpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::LpUnexpanded, double, double, double, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::RusselRaoExpanded, float, float, float, raft::identity_op, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::RusselRaoExpanded, double, double, double, raft::identity_op, int);
#undef instantiate_raft_distance_distance
// Same, but without raft::identity_op
#define instantiate_raft_distance_distance(DT, DataT, AccT, OutT, IdxT) \
extern template void cuvs::distance::distance<DT, DataT, AccT, OutT, IdxT>( \
raft::resources const& handle, \
const DataT* x, \
const DataT* y, \
OutT* dist, \
IdxT m, \
IdxT n, \
IdxT k, \
void* workspace, \
size_t worksize, \
bool isRowMajor, \
DataT metric_arg)
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Canberra, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Canberra, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CorrelationExpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CorrelationExpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CosineExpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CosineExpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HammingUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HammingUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HellingerExpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HellingerExpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::InnerProduct, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::InnerProduct, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::JensenShannon, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::JensenShannon, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::KLDivergence, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::KLDivergence, double, double, double, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L1, float, float, float, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L1, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Expanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Expanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtExpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtExpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Unexpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Unexpanded, double, double, double, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::Linf, float, float, float, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::Linf, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::LpUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::LpUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::RusselRaoExpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::RusselRaoExpanded, double, double, double, int);
#undef instantiate_raft_distance_distance
// Same, but without workspace
#define instantiate_raft_distance_distance(DT, DataT, AccT, OutT, IdxT) \
extern template void cuvs::distance::distance<DT, DataT, AccT, OutT, IdxT>( \
raft::resources const& handle, \
const DataT* x, \
const DataT* y, \
OutT* dist, \
IdxT m, \
IdxT n, \
IdxT k, \
bool isRowMajor, \
DataT metric_arg)
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Canberra, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Canberra, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CorrelationExpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CorrelationExpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CosineExpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::CosineExpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HammingUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HammingUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HellingerExpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::HellingerExpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::InnerProduct, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::InnerProduct, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::JensenShannon, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::JensenShannon, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::KLDivergence, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::KLDivergence, double, double, double, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L1, float, float, float, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L1, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Expanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Expanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtExpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtExpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2SqrtUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Unexpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Unexpanded, double, double, double, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::Linf, float, float, float, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::Linf, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::LpUnexpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::LpUnexpanded, double, double, double, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::RusselRaoExpanded, float, float, float, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::RusselRaoExpanded, double, double, double, int);
#undef instantiate_raft_distance_distance
#define instantiate_raft_distance_getWorkspaceSize(DistT, DataT, AccT, OutT, IdxT) \
extern template size_t cuvs::distance::getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT>( \
const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k)
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::Canberra, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::Canberra, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::CorrelationExpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::CorrelationExpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::CosineExpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::CosineExpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::HammingUnexpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::HammingUnexpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::HellingerExpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::HellingerExpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::InnerProduct, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::InnerProduct, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::JensenShannon, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::JensenShannon, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::KLDivergence, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::KLDivergence, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L1, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L1, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2Expanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2Expanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2SqrtExpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2SqrtExpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2SqrtUnexpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2SqrtUnexpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2Unexpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2Unexpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::Linf, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::Linf, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::LpUnexpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::LpUnexpanded, double, double, double, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::RusselRaoExpanded, float, float, float, int);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::RusselRaoExpanded, double, double, double, int);
#undef instantiate_raft_distance_getWorkspaceSize
#define instantiate_raft_distance_getWorkspaceSize(DistT, DataT, AccT, OutT, IdxT, layout) \
extern template size_t cuvs::distance::getWorkspaceSize<DistT, DataT, AccT, OutT, IdxT, layout>( \
raft::device_matrix_view<DataT, IdxT, layout> const& x, \
raft::device_matrix_view<DataT, IdxT, layout> const& y)
// We could consider not taking template parameters for this function. The
// number of instantiations seems a bit excessive..
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::Canberra, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::Canberra, double, double, double, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::Canberra, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::Canberra, double, double, double, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::CorrelationExpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::CorrelationExpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::CorrelationExpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::CorrelationExpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::CosineExpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::CosineExpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::CosineExpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::CosineExpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::HammingUnexpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::HammingUnexpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::HammingUnexpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::HammingUnexpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::HellingerExpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::HellingerExpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::HellingerExpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::HellingerExpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::InnerProduct, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::InnerProduct,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::InnerProduct, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::InnerProduct,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::JensenShannon, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::JensenShannon,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::JensenShannon, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::JensenShannon,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::KLDivergence, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::KLDivergence,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::KLDivergence, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::KLDivergence,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L1, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L1, double, double, double, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L1, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L1, double, double, double, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2Expanded, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2Expanded, double, double, double, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2Expanded, float, float, float, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2Expanded, double, double, double, int, raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::L2SqrtExpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::L2SqrtExpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::L2SqrtExpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::L2SqrtExpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::L2SqrtUnexpanded,
float,
float,
float,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::L2SqrtUnexpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::L2SqrtUnexpanded,
float,
float,
float,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::L2SqrtUnexpanded,
double,
double,
double,
int,
raft::layout_f_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2Unexpanded, float, float, float, int, raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(cuvs::distance::DistanceType::L2Unexpanded,
double,
double,
double,
int,
raft::layout_c_contiguous);
instantiate_raft_distance_getWorkspaceSize(
cuvs::distance::DistanceType::L2Unexpanded, float, float, float, int, raft::layout_f_contiguous);
#undef instantiate_raft_distance_getWorkspaceSize
#define instantiate_raft_distance_pairwise_distance(DataT, IdxT) \
extern template void cuvs::distance::pairwise_distance(raft::resources const& handle, \
const DataT* x, \
const DataT* y, \
DataT* dist, \
IdxT m, \
IdxT n, \
IdxT k, \
rmm::device_uvector<char>& workspace, \
cuvs::distance::DistanceType metric, \
bool isRowMajor, \
DataT metric_arg)
instantiate_raft_distance_pairwise_distance(float, int);
instantiate_raft_distance_pairwise_distance(double, int);
#undef instantiate_raft_distance_pairwise_distance
// Same, but without workspace
#define instantiate_raft_distance_pairwise_distance(DataT, IdxT) \
extern template void cuvs::distance::pairwise_distance(raft::resources const& handle, \
const DataT* x, \
const DataT* y, \
DataT* dist, \
IdxT m, \
IdxT n, \
IdxT k, \
cuvs::distance::DistanceType metric, \
bool isRowMajor, \
DataT metric_arg)
instantiate_raft_distance_pairwise_distance(float, int);
instantiate_raft_distance_pairwise_distance(double, int);
#undef instantiate_raft_distance_pairwise_distance
// Version with mdspan
#define instantiate_raft_distance_distance(DistT, DataT, AccT, OutT, layout, IdxT) \
extern template void cuvs::distance::distance<DistT, DataT, AccT, OutT, layout, IdxT>( \
raft::resources const& handle, \
raft::device_matrix_view<DataT, IdxT, layout> const x, \
raft::device_matrix_view<DataT, IdxT, layout> const y, \
raft::device_matrix_view<OutT, IdxT, layout> dist, \
DataT metric_arg)
// Again, we might want to consider reigning in the number of instantiations...
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Canberra, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Canberra, double, double, double, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Canberra, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Canberra, double, double, double, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::CorrelationExpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::CorrelationExpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::CorrelationExpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::CorrelationExpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::CosineExpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::CosineExpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::CosineExpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::CosineExpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::HammingUnexpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::HammingUnexpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::HammingUnexpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::HammingUnexpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::HellingerExpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::HellingerExpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::HellingerExpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::HellingerExpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::InnerProduct, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::InnerProduct,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::InnerProduct, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::InnerProduct,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::JensenShannon, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::JensenShannon,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::JensenShannon, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::JensenShannon,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::KLDivergence, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::KLDivergence,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::KLDivergence, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::KLDivergence,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L1, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L1, double, double, double, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L1, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L1, double, double, double, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Expanded, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Expanded, double, double, double, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Expanded, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Expanded, double, double, double, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2SqrtExpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2SqrtExpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2SqrtExpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2SqrtExpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2SqrtUnexpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2SqrtUnexpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2SqrtUnexpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2SqrtUnexpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Unexpanded, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2Unexpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::L2Unexpanded, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::L2Unexpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Linf, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Linf, double, double, double, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Linf, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::Linf, double, double, double, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::LpUnexpanded, float, float, float, raft::layout_c_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::LpUnexpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(
cuvs::distance::DistanceType::LpUnexpanded, float, float, float, raft::layout_f_contiguous, int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::LpUnexpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::RusselRaoExpanded,
float,
float,
float,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::RusselRaoExpanded,
double,
double,
double,
raft::layout_c_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::RusselRaoExpanded,
float,
float,
float,
raft::layout_f_contiguous,
int);
instantiate_raft_distance_distance(cuvs::distance::DistanceType::RusselRaoExpanded,
double,
double,
double,
raft::layout_f_contiguous,
int);
#undef instantiate_raft_distance_distance
#define instantiate_raft_distance_pairwise_distance(DataT, layout, IdxT) \
extern template void cuvs::distance::pairwise_distance( \
raft::resources const& handle, \
raft::device_matrix_view<DataT, IdxT, layout> const x, \
raft::device_matrix_view<DataT, IdxT, layout> const y, \
raft::device_matrix_view<DataT, IdxT, layout> dist, \
cuvs::distance::DistanceType metric, \
DataT metric_arg)
instantiate_raft_distance_pairwise_distance(float, raft::layout_c_contiguous, int);
instantiate_raft_distance_pairwise_distance(float, raft::layout_f_contiguous, int);
instantiate_raft_distance_pairwise_distance(double, raft::layout_c_contiguous, int);
instantiate_raft_distance_pairwise_distance(double, raft::layout_f_contiguous, int);
#undef instantiate_raft_distance_pairwise_distance
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/fused_l2_nn-inl.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __FUSED_L2_NN_H
#define __FUSED_L2_NN_H
#pragma once
#include <cub/cub.cuh>
#include <cuvs/distance/detail/fused_l2_nn.cuh>
#include <cuvs/distance/fused_l2_nn_helpers.cuh>
#include <limits>
#include <raft/core/resources.hpp>
#include <raft/linalg/contractions.cuh>
#include <raft/util/cuda_utils.cuh>
#include <stdint.h>
#include <type_traits>
namespace cuvs {
namespace distance {
/**
* \ingroup fused_l2_nn
* @{
*/
/**
* @brief Fused L2 distance and 1-nearest-neighbor computation in a single call.
*
* The benefits of such a call are 2-fold: 1) eliminate the need for an
* intermediate buffer to store the output of gemm 2) reduce the memory read
* traffic on this intermediate buffer, otherwise needed during the reduction
* phase for 1-NN.
*
* @tparam DataT data type
* @tparam OutT output type to either store 1-NN indices and their minimum
* distances or store only the min distances. Accordingly, one
* has to pass an appropriate `ReduceOpT`
* @tparam IdxT indexing arithmetic type
* @tparam ReduceOpT A struct to perform the final needed reduction operation
* and also to initialize the output array elements with the
* appropriate initial value needed for reduction.
*
* @param[out] min will contain the reduced output (Length = `m`)
* (on device)
* @param[in] x first matrix. Row major. Dim = `m x k`.
* (on device).
* @param[in] y second matrix. Row major. Dim = `n x k`.
* (on device).
* @param[in] xn L2 squared norm of `x`. Length = `m`. (on device).
* @param[in] yn L2 squared norm of `y`. Length = `n`. (on device)
* @param[in] m gemm m
* @param[in] n gemm n
* @param[in] k gemm k
* @param[in] workspace temp workspace. Size = sizeof(int)*m. (on device)
* @param[in] redOp reduction operator in the epilogue
* @param[in] pairRedOp reduction operation on key value pairs
* @param[in] sqrt Whether the output `minDist` should contain L2-sqrt
* @param[in] initOutBuffer whether to initialize the output buffer before the
* main kernel launch
* @param[in] stream cuda stream
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT, typename KVPReduceOpT>
void fusedL2NN(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
bool sqrt,
bool initOutBuffer,
cudaStream_t stream)
{
// When k is smaller than 32, the Policy4x4 results in redundant calculations
// as it uses tiles that have k=32. Therefore, use a "skinny" policy instead
// that uses tiles with a smaller value of k.
bool is_skinny = k < 32;
size_t bytes = sizeof(DataT) * k;
auto px = reinterpret_cast<uintptr_t>(x);
auto py = reinterpret_cast<uintptr_t>(y);
if (16 % sizeof(DataT) == 0 && bytes % 16 == 0 && px % 16 == 0 && py % 16 == 0) {
if (is_skinny) {
detail::fusedL2NNImpl<
DataT,
OutT,
IdxT,
typename raft::linalg::Policy4x4Skinny<DataT, 16 / sizeof(DataT)>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
} else {
detail::fusedL2NNImpl<DataT,
OutT,
IdxT,
typename raft::linalg::Policy4x4<DataT, 16 / sizeof(DataT)>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
}
} else if (8 % sizeof(DataT) == 0 && bytes % 8 == 0 && px % 8 == 0 && py % 8 == 0) {
if (is_skinny) {
detail::fusedL2NNImpl<
DataT,
OutT,
IdxT,
typename raft::linalg::Policy4x4Skinny<DataT, 8 / sizeof(DataT)>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
} else {
detail::fusedL2NNImpl<DataT,
OutT,
IdxT,
typename raft::linalg::Policy4x4<DataT, 8 / sizeof(DataT)>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
}
} else {
if (is_skinny) {
detail::fusedL2NNImpl<DataT,
OutT,
IdxT,
typename raft::linalg::Policy4x4Skinny<DataT, 1>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
} else {
detail::fusedL2NNImpl<DataT,
OutT,
IdxT,
typename raft::linalg::Policy4x4<DataT, 1>::Policy,
ReduceOpT>(
min, x, y, xn, yn, m, n, k, (int*)workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
}
}
}
/**
* @brief Wrapper around fusedL2NN with minimum reduction operators.
*
* fusedL2NN cannot be compiled in the distance library due to the lambda
* operators, so this wrapper covers the most common case (minimum).
* This should be preferred to the more generic API when possible, in order to
* reduce compilation times for users of the shared library.
*
* @tparam DataT data type
* @tparam OutT output type to either store 1-NN indices and their minimum
* distances (e.g. raft::KeyValuePair<int, float>) or store only the min
* distances.
* @tparam IdxT indexing arithmetic type
* @param[out] min will contain the reduced output (Length = `m`)
* (on device)
* @param[in] x first matrix. Row major. Dim = `m x k`.
* (on device).
* @param[in] y second matrix. Row major. Dim = `n x k`.
* (on device).
* @param[in] xn L2 squared norm of `x`. Length = `m`. (on device).
* @param[in] yn L2 squared norm of `y`. Length = `n`. (on device)
* @param[in] m gemm m
* @param[in] n gemm n
* @param[in] k gemm k
* @param[in] workspace temp workspace. Size = sizeof(int)*m. (on device)
* @param[in] sqrt Whether the output `minDist` should contain L2-sqrt
* @param[in] initOutBuffer whether to initialize the output buffer before the
* main kernel launch
* @param[in] stream cuda stream
*/
template <typename DataT, typename OutT, typename IdxT>
void fusedL2NNMinReduce(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
void* workspace,
bool sqrt,
bool initOutBuffer,
cudaStream_t stream)
{
MinAndDistanceReduceOp<IdxT, DataT> redOp;
KVPMinReduce<IdxT, DataT> pairRedOp;
fusedL2NN<DataT, OutT, IdxT>(
min, x, y, xn, yn, m, n, k, workspace, redOp, pairRedOp, sqrt, initOutBuffer, stream);
}
/** @} */
} // namespace distance
} // namespace cuvs
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/kernels.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/detail/kernels/gram_matrix.cuh>
#include <cuvs/distance/detail/kernels/kernel_factory.cuh>
#include <raft/util/cuda_utils.cuh>
#include <cuvs/distance/distance.cuh>
#include <raft/linalg/gemm.cuh>
namespace cuvs::distance::kernels {
// TODO: Need to expose formal APIs for this that are more consistent w/ other APIs in RAFT
using cuvs::distance::kernels::detail::GramMatrixBase;
using cuvs::distance::kernels::detail::KernelFactory;
}; // end namespace cuvs::distance::kernels
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/detail/distance_ops/all_ops.cuh>
#include <cuvs/distance/detail/pairwise_matrix/dispatch.cuh>
#include <cuvs/distance/detail/pairwise_matrix/dispatch_sm60.cuh>
#include <cuvs/distance/detail/pairwise_matrix/dispatch_sm80.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/reduce.cuh>
#include <raft/linalg/unary_op.cuh>
#include <type_traits>
namespace cuvs {
namespace distance {
namespace detail {
/**
* @brief: A tag type for overload resolution based on DistanceType
*
* It is not possible to partially specialize function templates on a single
* parameter. Instead, it is often easier to use a combination of conventional
* method overloading and a parameter with a specific tag type. The following
* type is used to help method overloading based on the DistanceType enum.
*/
template <DistanceType d>
using distance_tag = std::integral_constant<DistanceType, d>;
/**
* @brief Implement pairwise_matrix for specific distance
*
* There are multiple overloads for this function, one for each distance type.
* They are implemented below. The documentation of this function serves as
* documentation for all functions. The following overloads are defined:
*
* - DistanceType::Canberra:
* - DistanceType::CorrelationExpanded:
* - DistanceType::CosineExpanded:
* - DistanceType::HammingUnexpanded:
* - DistanceType::HellingerExpanded:
* - DistanceType::JensenShannon:
* - DistanceType::KLDivergence:
* - DistanceType::L1:
* - DistanceType::L2Expanded:
* - DistanceType::L2SqrtExpanded:
* - DistanceType::L2Unexpanded:
* - DistanceType::L2SqrtUnexpanded:
* - DistanceType::Linf:
* - DistanceType::LpUnexpanded:
* - DistanceType::RusselRaoExpanded:
*
* @tparam DataT Input data type
* @tparam AccT Accumulation data type
* @tparam OutT Output data type
* @tparam FinOpT Type of final operation
* @tparam IdxT Index type
*
* @param handle RAFT resources handle
* @param distance_type A tag type to indicate which distance is calculated.
* @param x First set of points
* @param y Second set of points
* @param out Output distance matrix
* @param m Number of points in x
* @param n Number of points in y
* @param k Dimensionality of points in x, y
* @param workspace Temporary workspace needed for computations
* @param worksize Number of bytes of the workspace
* @param is_row_major Whether the matrices are row-major or col-major
* @param metric_arg The `p` argument for Lp.
*/
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::Canberra> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace, // unused
size_t worksize, // unused
FinOpT fin_op,
bool is_row_major,
DataT metric_arg) // unused
{
ops::canberra_distance_op<DataT, AccT, IdxT> distance_op{};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::CorrelationExpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace,
size_t worksize,
FinOpT fin_op,
bool is_row_major,
DataT) // unused
{
ASSERT(!(worksize < 2 * (m + n) * sizeof(AccT)), "workspace size error");
ASSERT(workspace != nullptr, "workspace is null");
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
AccT* x_norm = workspace;
AccT* y_norm = workspace;
AccT* sq_x_norm = workspace;
AccT* sq_y_norm = workspace;
// TODO: Column major case looks to have lower accuracy for X == Y,
// perhaps the use of stridedSummationKernel could be causing this,
// need to investigate and fix.
if (x == y && is_row_major) {
raft::linalg::reduce(x_norm,
x,
k,
std::max(m, n),
(AccT)0,
is_row_major,
true,
stream,
false,
raft::identity_op(),
raft::add_op());
sq_x_norm += std::max(m, n);
sq_y_norm = sq_x_norm;
raft::linalg::rowNorm(
sq_x_norm, x, k, std::max(m, n), raft::linalg::L2Norm, is_row_major, stream);
} else {
y_norm += m;
raft::linalg::reduce(x_norm,
x,
k,
m,
(AccT)0,
is_row_major,
true,
stream,
false,
raft::identity_op(),
raft::add_op());
raft::linalg::reduce(y_norm,
y,
k,
n,
(AccT)0,
is_row_major,
true,
stream,
false,
raft::identity_op(),
raft::add_op());
sq_x_norm += (m + n);
sq_y_norm = sq_x_norm + m;
raft::linalg::rowNorm(sq_x_norm, x, k, m, raft::linalg::L2Norm, is_row_major, stream);
raft::linalg::rowNorm(sq_y_norm, y, k, n, raft::linalg::L2Norm, is_row_major, stream);
}
using OpT = ops::correlation_distance_op<DataT, AccT, IdxT>;
OpT corr_op(is_row_major, sq_x_norm, sq_y_norm, m, n, k);
pairwise_matrix_dispatch<decltype(corr_op), DataT, AccT, OutT, FinOpT, IdxT>(
corr_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::CosineExpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace,
size_t worksize,
FinOpT fin_op,
bool is_row_major,
DataT) // unused
{
// raft distance support inputs as float/double and output as uint8_t/float/double.
static_assert(!((sizeof(OutT) > 1) && (sizeof(AccT) != sizeof(OutT))),
"OutT can be uint8_t, float, double,"
"if sizeof(OutT) > 1 then sizeof(AccT) == sizeof(OutT).");
ASSERT(!(worksize < (m + n) * sizeof(AccT)), "workspace size error");
ASSERT(workspace != nullptr, "workspace is null");
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
DataT* x_norm = workspace;
DataT* y_norm = workspace;
// TODO: Column major case looks to have lower accuracy for X == Y,
// perhaps the use of stridedSummationKernel could be causing this,
// need to investigate and fix.
if (x == y && is_row_major) {
raft::linalg::rowNorm(
x_norm, x, k, std::max(m, n), raft::linalg::L2Norm, is_row_major, stream, raft::sqrt_op{});
} else {
y_norm += m;
raft::linalg::rowNorm(
x_norm, x, k, m, raft::linalg::L2Norm, is_row_major, stream, raft::sqrt_op{});
raft::linalg::rowNorm(
y_norm, y, k, n, raft::linalg::L2Norm, is_row_major, stream, raft::sqrt_op{});
}
ops::cosine_distance_op<DataT, AccT, IdxT> distance_op{};
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::HammingUnexpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
ops::hamming_distance_op<DataT, AccT, IdxT> distance_op{k};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::InnerProduct> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
raft::linalg::gemm(handle,
out,
const_cast<DataT*>(x),
const_cast<DataT*>(y),
m,
n,
k,
!is_row_major,
!is_row_major,
is_row_major,
stream);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::HellingerExpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
// First sqrt x and y
const auto raft_sqrt = raft::linalg::unaryOp<DataT, raft::sqrt_op, IdxT>;
raft_sqrt((DataT*)x, x, m * k, raft::sqrt_op{}, stream);
if (x != y) { raft_sqrt((DataT*)y, y, n * k, raft::sqrt_op{}, stream); }
// Then calculate Hellinger distance
ops::hellinger_distance_op<DataT, AccT, IdxT> distance_op{};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
// Finally revert sqrt of x and y
raft_sqrt((DataT*)x, x, m * k, raft::sqrt_op{}, stream);
if (x != y) { raft_sqrt((DataT*)y, y, n * k, raft::sqrt_op{}, stream); }
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::JensenShannon> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
ops::jensen_shannon_distance_op<DataT, AccT, IdxT> distance_op{};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::KLDivergence> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
auto unaryOp_lambda = [] __device__(DataT input) {
const bool x_zero = (input == 0);
return (!x_zero) * raft::log(input + x_zero);
};
auto unaryOp_lambda_reverse = [] __device__(DataT input) {
// reverse previous log (x) back to x using (e ^ log(x))
const bool x_zero = (input == 0);
return (!x_zero) * raft::exp(input);
};
if (x != y) {
raft::linalg::unaryOp<DataT, decltype(unaryOp_lambda), IdxT>(
(DataT*)y, y, n * k, unaryOp_lambda, stream);
}
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
// This op takes some shortcuts when x equals y. So its behavior changes based
// on this.
ops::kl_divergence_op<DataT, AccT, IdxT> distance_op{is_row_major, x == y};
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
if (x != y) {
// Now reverse previous log (x) back to x using (e ^ log(x))
raft::linalg::unaryOp<DataT, decltype(unaryOp_lambda_reverse), IdxT>(
(DataT*)y, y, n * k, unaryOp_lambda_reverse, stream);
}
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::L1> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
ops::l1_distance_op<DataT, AccT, IdxT> distance_op{};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT,
typename AccT,
typename OutT,
typename FinOpT,
typename IdxT = int>
void distance_impl_l2_expanded( // NOTE: different name
bool perform_sqrt, // dispatch on sqrt
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace,
size_t worksize,
FinOpT fin_op,
cudaStream_t stream,
bool is_row_major)
{
// raft distance support inputs as float/double and output as uint8_t/float/double.
static_assert(!((sizeof(OutT) > 1) && (sizeof(AccT) != sizeof(OutT))),
"OutT can be uint8_t, float, double,"
"if sizeof(OutT) > 1 then sizeof(AccT) == sizeof(OutT).");
ASSERT(!(worksize < (m + n) * sizeof(AccT)), "workspace size error");
ASSERT(workspace != nullptr, "workspace is null");
DataT* x_norm = workspace;
DataT* y_norm = workspace;
// TODO: Column major case looks to have lower accuracy for X == Y,
// perhaps the use of stridedSummationKernel could be causing this,
// need to investigate and fix.
if ((x == y) && is_row_major) {
raft::linalg::rowNorm(x_norm,
x,
k,
std::max(m, n),
raft::linalg::L2Norm,
is_row_major,
stream,
raft::identity_op{});
} else {
y_norm += m;
raft::linalg::rowNorm(
x_norm, x, k, m, raft::linalg::L2Norm, is_row_major, stream, raft::identity_op{});
raft::linalg::rowNorm(
y_norm, y, k, n, raft::linalg::L2Norm, is_row_major, stream, raft::identity_op{});
}
ops::l2_exp_distance_op<DataT, AccT, IdxT> distance_op{perform_sqrt};
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::L2Expanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace,
size_t worksize,
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
bool perform_sqrt = false;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
distance_impl_l2_expanded(
perform_sqrt, x, y, out, m, n, k, workspace, worksize, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::L2SqrtExpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT* workspace,
size_t worksize,
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
bool perform_sqrt = true;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
distance_impl_l2_expanded(
perform_sqrt, x, y, out, m, n, k, workspace, worksize, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::L2Unexpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
bool perform_sqrt = false;
ops::l2_unexp_distance_op<DataT, AccT, IdxT> l2_op(perform_sqrt);
// The unexpanded L2 does not require the norms of a and b to be calculated.
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(l2_op), DataT, AccT, OutT, FinOpT, IdxT>(
l2_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::L2SqrtUnexpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
bool perform_sqrt = true;
ops::l2_unexp_distance_op<DataT, AccT, IdxT> l2_op(perform_sqrt);
// The unexpanded L2 does not require the norms of a and b to be calculated.
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(l2_op), DataT, AccT, OutT, FinOpT, IdxT>(
l2_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::Linf> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
ops::l_inf_distance_op<DataT, AccT, IdxT> distance_op{};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::LpUnexpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT metric_arg)
{
ops::lp_unexp_distance_op<DataT, AccT, IdxT> distance_op{metric_arg};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
template <typename DataT, typename AccT, typename OutT, typename FinOpT, typename IdxT = int>
void distance_impl(raft::resources const& handle,
distance_tag<DistanceType::RusselRaoExpanded> distance_type,
const DataT* x,
const DataT* y,
OutT* out,
IdxT m,
IdxT n,
IdxT k,
AccT*, // workspace unused
size_t, // worksize unused
FinOpT fin_op,
bool is_row_major,
DataT) // metric_arg unused
{
ops::russel_rao_distance_op<DataT, AccT, IdxT> distance_op{k};
const DataT* x_norm = nullptr;
const DataT* y_norm = nullptr;
cudaStream_t stream = raft::resource::get_cuda_stream(handle);
pairwise_matrix_dispatch<decltype(distance_op), DataT, AccT, OutT, FinOpT, IdxT>(
distance_op, m, n, k, x, y, x_norm, y_norm, out, fin_op, stream, is_row_major);
}
/**
* @brief Evaluate pairwise distances with the user epilogue lamba allowed
* @tparam DistanceType which distance to evaluate
* @tparam InType input argument type
* @tparam AccType accumulation type
* @tparam OutType output type
* @tparam FinalLambda user-defined epilogue lamba
* @tparam Index_ Index type
*
* @param x first set of points
* @param y second set of points
* @param out output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param workspace temporary workspace needed for computations
* @param worksize number of bytes of the workspace
* @param fin_op the final gemm epilogue lambda
* @param stream cuda stream
* @param isRowMajor whether the matrices are row-major or col-major
*
* @note fin_op: This is a device lambda which is supposed to operate upon the
* input which is AccType and returns the output in OutType. It's signature is
* as follows: <pre>OutType fin_op(AccType in, int g_idx);</pre>. If one needs
* any other parameters, feel free to pass them via closure.
*/
template <cuvs::distance::DistanceType distanceType,
typename InType,
typename AccType,
typename OutType,
typename FinalLambda,
typename Index_ = int>
void distance(raft::resources const& handle,
const InType* x,
const InType* y,
OutType* out,
Index_ m,
Index_ n,
Index_ k,
void* workspace,
size_t worksize,
FinalLambda fin_op,
bool isRowMajor = true,
InType metric_arg = 2.0f)
{
// raft distance support inputs as float/double and output as uint8_t/float/double.
static_assert(!((sizeof(OutType) > 1) && (sizeof(AccType) != sizeof(OutType))),
"OutType can be uint8_t, float, double,"
"if sizeof(OutType) > 1 then sizeof(AccType) == sizeof(OutType).");
distance_impl<InType, AccType, OutType, FinalLambda, Index_>(
handle,
distance_tag<distanceType>{},
x,
y,
out,
m,
n,
k,
reinterpret_cast<AccType*>(workspace),
worksize,
fin_op,
isRowMajor,
metric_arg);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* @brief Evaluate pairwise distances for the simple use case
* @tparam DistanceType which distance to evaluate
* @tparam InType input argument type
* @tparam AccType accumulation type
* @tparam OutType output type
* @tparam Index_ Index type
* @param x first set of points
* @param y second set of points
* @param dist output distance matrix
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
* @param workspace temporary workspace needed for computations
* @param worksize number of bytes of the workspace
* @param stream cuda stream
* @param isRowMajor whether the matrices are row-major or col-major
*/
template <cuvs::distance::DistanceType distanceType,
typename InType,
typename AccType,
typename OutType,
typename Index_ = int>
void distance(raft::resources const& handle,
const InType* x,
const InType* y,
OutType* out,
Index_ m,
Index_ n,
Index_ k,
void* workspace,
size_t worksize,
bool isRowMajor = true,
InType metric_arg = 2.0f)
{
auto fin_op = raft::identity_op();
distance<distanceType, InType, AccType, OutType, decltype(fin_op), Index_>(
handle, x, y, out, m, n, k, workspace, worksize, fin_op, isRowMajor, metric_arg);
}
/**
* @brief Return the exact workspace size to compute the distance
* @tparam DistanceType which distance to evaluate
* @tparam InType input argument type
* @tparam AccType accumulation type
* @tparam OutType output type
* @tparam Index_ Index type
* @param x first set of points
* @param y second set of points
* @param m number of points in x
* @param n number of points in y
* @param k dimensionality
*
* @note If the specified distanceType doesn't need the workspace at all, it
* returns 0.
*/
template <cuvs::distance::DistanceType distanceType,
typename InType,
typename AccType,
typename OutType,
typename Index_ = int>
size_t getWorkspaceSize(const InType* x, const InType* y, Index_ m, Index_ n, Index_ k)
{
size_t worksize = 0;
constexpr bool is_allocated = (distanceType <= cuvs::distance::DistanceType::CosineExpanded) ||
(distanceType == cuvs::distance::DistanceType::CorrelationExpanded);
constexpr int numOfBuffers =
(distanceType == cuvs::distance::DistanceType::CorrelationExpanded) ? 2 : 1;
if (is_allocated) {
// TODO : when X == Y allocate std::max(m, n) instead of m + n when column major input
// accuracy issue is resolved until then we allocate as m + n.
worksize += numOfBuffers * m * sizeof(AccType);
worksize += numOfBuffers * n * sizeof(AccType);
}
return worksize;
}
}; // namespace detail
}; // namespace distance
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_distance_cutlass_base.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wtautological-compare"
// We define CUTLASS_NAMESPACE in case
// RAFT cmake is not used
#ifndef CUTLASS_NAMESPACE
#define cutlass raft_cutlass
#endif
#include <rmm/device_uvector.hpp>
#include <type_traits>
#include <cutlass/cutlass.h>
#include <cutlass/gemm/device/gemm.h>
#include <cutlass/gemm/device/gemm_universal_adapter.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_coord.h>
#include <cutlass/tensor_view.h>
#include <cuvs/distance/detail/distance_ops/cutlass.cuh>
#include <raft/util/cutlass_utils.cuh>
#include "./pairwise_distance_epilogue_elementwise.h"
#include "./pairwise_distance_gemm.h"
namespace cuvs {
namespace distance {
namespace detail {
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
int VecLen,
typename FinalLambda,
typename OpT,
bool isRowMajor>
std::enable_if_t<ops::has_cutlass_op<OpT>::value> cutlassDistanceKernel(const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
OutT* dOutput,
FinalLambda fin_op,
OpT distance_op,
cudaStream_t stream)
{
static_assert(!(std::is_same<OutT, bool>::value),
"OutType bool is not supported use uint8_t instead");
auto dist_op = distance_op.get_cutlass_op();
using DistanceFn = decltype(dist_op);
using EpilogueOutputOp =
cutlass::epilogue::thread::PairwiseDistanceEpilogueElementwise<DataT, // ElementC_
AccT, // ElementAccumulator_
DataT, // ElementCompute_
AccT, // ElementZ_
OutT, // ElementT_
1, // Elements per access 1
DistanceFn,
FinalLambda>;
constexpr int batch_count = 1;
constexpr auto mode = cutlass::gemm::GemmUniversalMode::kGemm;
typename EpilogueOutputOp::Params epilog_op_param(dist_op, fin_op);
const DataT *a, *b;
IdxT gemm_lda, gemm_ldb;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// Alignment
constexpr int Alignment = VecLen;
// default initialize problem size with row major inputs
auto problem_size = cutlass::gemm::GemmCoord(n, m, k);
using cutlassDistKernel =
typename cutlass::gemm::kernel::PairwiseDistanceGemm<DataT,
Alignment,
DataT,
Alignment,
AccT,
AccT,
EpilogueOutputOp,
NumStages, // Number of pipeline stages
isRowMajor>::GemmKernel;
using cutlassDist = cutlass::gemm::device::GemmUniversalAdapter<cutlassDistKernel>;
if constexpr (isRowMajor) {
a = y;
b = x;
gemm_lda = ldb;
gemm_ldb = lda;
} else {
problem_size = cutlass::gemm::GemmCoord(m, n, k);
a = x;
b = y;
gemm_lda = lda;
gemm_ldb = ldb;
}
typename cutlassDist::Arguments arguments{
mode, problem_size, batch_count, epilog_op_param, a, b,
xn, // C matrix eq vector param, which here is A norm
nullptr, // tensor_Z,
(DataT*)yn, // this is broadcast vec, which is required to be non-const param
dOutput, // Output distance matrix
(int64_t)0, // batch stride A
(int64_t)0, // batch stride B
(int64_t)0, // batch stride Norm A
(int64_t)0,
(int64_t)0, // batch stride Norm B
(int64_t)0, // batch stride Output
gemm_lda, // stride A
gemm_ldb, // stride B
1, // stride A norm
0, // this is no-op for Z
0, // This must be zero
ldd // stride Output matrix
};
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = cutlassDist::get_workspace_size(arguments);
// Allocate workspace memory
rmm::device_uvector<uint8_t> workspace(workspace_size, stream);
// Instantiate CUTLASS kernel depending on templates
cutlassDist cutlassDist_op;
// Check the problem size is supported or not
RAFT_CUTLASS_TRY(cutlassDist_op.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
RAFT_CUTLASS_TRY(cutlassDist_op.initialize(arguments, workspace.data(), stream));
// Launch initialized CUTLASS kernel
RAFT_CUTLASS_TRY(cutlassDist_op(stream));
}
}; // namespace detail
}; // namespace distance
}; // namespace cuvs
#pragma GCC diagnostic pop
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_distance_gemm.h | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/gemm/kernel/default_gemm_universal.h>
#include <cutlass/gemm/kernel/gemm_with_fused_epilogue.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include "./pairwise_distance_epilogue.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Element type for final output
// typename ElementOutT,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Number of stages used in the pipelined mainloop
int Stages,
/// data layout row/column major of inputs
bool isRowMajor>
struct PairwiseDistanceGemm {
// This struct is specialized for fp32/3xTF32
/// Threadblock-level tile size (concept: GemmShape)
using ThreadblockShape =
cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128, K = 16
/// Warp-level tile size (concept: GemmShape)
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16
/// Warp-level tile size (concept: GemmShape)
// This code section describes the size of MMA op
using InstructionShape =
cutlass::gemm::GemmShape<16, 8, 4>; // <- MMA Op tile M = 16, N = 8, K = 4
/// Operation performed by GEMM
using Operator = cutlass::arch::OpMultiplyAddFastF32;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using OperatorClass = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using ArchTag = cutlass::arch::Sm80;
// This code section describes how threadblocks are scheduled on GPU
/// Threadblock-level swizzling operator
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
/// data layout for final output matrix.
// we keep this same layout even for column major inputs
using LayoutOutput = cutlass::layout::RowMajor;
typedef typename std::conditional<isRowMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor>::type NormXLayout;
typedef typename std::
conditional<isRowMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor>::type LayoutA_;
typedef typename std::
conditional<isRowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor>::type LayoutB_;
using GemmBase = typename DefaultGemmUniversal<ElementA_,
LayoutA_,
cutlass::ComplexTransform::kNone,
kAlignmentA,
ElementB_,
LayoutB_,
cutlass::ComplexTransform::kNone,
kAlignmentB,
ElementC_,
LayoutOutput,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::PairwiseDistanceEpilogue<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementAccumulator,
typename EpilogueOutputOp::ElementT,
ElementAccumulator,
EpilogueOutputOp,
NormXLayout,
GemmBase::Epilogue::kElementsPerAccess>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = GemmWithFusedEpilogue<typename GemmBase::Mma, Epilogue, ThreadblockSwizzle>;
};
template <
/// Layout type for A matrix operand
int kAlignmentA,
/// Layout type for B matrix operand
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Number of stages used in the pipelined mainloop
int Stages,
/// data layout row/column major of inputs
bool isRowMajor>
struct PairwiseDistanceGemm<double,
kAlignmentA,
double,
kAlignmentB,
ElementC_,
ElementAccumulator,
EpilogueOutputOp,
Stages,
isRowMajor> {
// using Transform = cutlass::ComplexTransform::kNone;
// Threadblock-level tile size (concept: GemmShape)
using ThreadblockShape =
cutlass::gemm::GemmShape<64, 64, 16>; // <- threadblock tile M = 64, N = 64, K = 16
/// Warp-level tile size (concept: GemmShape)
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>; // <- warp tile M = 32, N = 32, K = 16
/// Warp-level tile size (concept: GemmShape)
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
// Operation performed by GEMM
using Operator = cutlass::arch::OpMultiplyAdd;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using OperatorClass = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using ArchTag = cutlass::arch::Sm80;
// This code section describes how threadblocks are scheduled on GPU
/// Threadblock-level swizzling operator
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
/// data layout for final output matrix.
// we keep this same layout even for column major inputs
using LayoutOutput = cutlass::layout::RowMajor;
typedef typename std::conditional<isRowMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor>::type NormXLayout;
typedef typename std::
conditional<isRowMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor>::type LayoutA_;
typedef typename std::
conditional<isRowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor>::type LayoutB_;
using GemmBase = typename DefaultGemmUniversal<double,
LayoutA_,
cutlass::ComplexTransform::kNone,
1,
double,
LayoutB_,
cutlass::ComplexTransform::kNone,
1,
ElementC_,
LayoutOutput,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::PairwiseDistanceEpilogue<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementC_,
typename EpilogueOutputOp::ElementT,
ElementC_,
EpilogueOutputOp,
NormXLayout,
GemmBase::Epilogue::kElementsPerAccess>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = GemmWithFusedEpilogue<typename GemmBase::Mma, Epilogue, ThreadblockSwizzle>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/fused_l2_nn.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef> // size_t
#include <cuvs/distance/detail/distance_ops/l2_exp.cuh> // ops::l2_exp_distance_op
#include <cuvs/distance/detail/fused_distance_nn/cutlass_base.cuh>
#include <cuvs/distance/detail/pairwise_distance_base.cuh> // PairwiseDistances
#include <limits> // std::numeric_limits
#include <raft/core/kvp.hpp> // raft::KeyValuePair
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/linalg/contractions.cuh> // Policy
#include <raft/util/arch.cuh> // raft::util::arch::SM_*
#include <raft/util/cuda_utils.cuh> // raft::ceildiv, raft::shfl
namespace cuvs {
namespace distance {
namespace detail {
template <typename LabelT, typename DataT>
struct KVPMinReduceImpl {
typedef raft::KeyValuePair<LabelT, DataT> KVP;
DI KVP operator()(LabelT rit, const KVP& a, const KVP& b) { return b.value < a.value ? b : a; }
DI KVP operator()(const KVP& a, const KVP& b) { return b.value < a.value ? b : a; }
}; // KVPMinReduce
template <typename LabelT, typename DataT>
struct MinAndDistanceReduceOpImpl {
typedef typename raft::KeyValuePair<LabelT, DataT> KVP;
DI void operator()(LabelT rid, KVP* out, const KVP& other) const
{
if (other.value < out->value) {
out->key = other.key;
out->value = other.value;
}
}
DI void operator()(LabelT rid, DataT* out, const KVP& other) const
{
if (other.value < *out) { *out = other.value; }
}
DI void operator()(LabelT rid, DataT* out, const DataT& other) const
{
if (other < *out) { *out = other; }
}
DI void init(DataT* out, DataT maxVal) const { *out = maxVal; }
DI void init(KVP* out, DataT maxVal) const { out->value = maxVal; }
DI void init_key(DataT& out, LabelT idx) const { return; }
DI void init_key(KVP& out, LabelT idx) const { out.key = idx; }
DI DataT get_value(KVP& out) const
{
return out.value;
;
}
DI DataT get_value(DataT& out) const { return out; }
};
template <typename LabelT, typename DataT>
struct MinReduceOpImpl {
typedef typename raft::KeyValuePair<LabelT, DataT> KVP;
DI void operator()(LabelT rid, DataT* out, const KVP& other)
{
if (other.value < *out) { *out = other.value; }
}
DI void init(DataT* out, DataT maxVal) { *out = maxVal; }
};
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT>
RAFT_KERNEL initKernel(OutT* min, IdxT m, DataT maxVal, ReduceOpT redOp)
{
auto tid = IdxT(blockIdx.x) * blockDim.x + threadIdx.x;
if (tid < m) { redOp.init(min + tid, maxVal); }
}
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT>
void initialize(OutT* min, IdxT m, DataT maxVal, ReduceOpT redOp, cudaStream_t stream)
{
auto blks = raft::ceildiv(m, 256);
initKernel<DataT, OutT, IdxT><<<blks, 256, 0, stream>>>(min, m, maxVal, redOp);
}
// TODO: specialize this function for MinAndDistanceReduceOp<int, float>
// with atomicCAS of 64 bit which will eliminate mutex and raft::shfls
template <typename P, typename OutT, typename IdxT, typename KVPair, typename ReduceOpT>
DI void updateReducedVal(
int* mutex, OutT* min, KVPair* val, ReduceOpT red_op, IdxT m, IdxT gridStrideY)
{
const auto lid = threadIdx.x % raft::WarpSize;
const auto accrowid = threadIdx.x / P::AccThCols;
// Update each output row in order within a warp. This will resolve hang
// issues with pre-Volta architectures
#pragma unroll
for (int j = 0; j < (raft::WarpSize / P::AccThCols); j++) {
if (lid == j * P::AccThCols) {
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
auto rid = gridStrideY + accrowid + i * P::AccThRows;
if (rid < m) {
auto value = val[i];
while (atomicCAS(mutex + rid, 0, 1) == 1)
;
__threadfence();
red_op(rid, min + rid, value);
__threadfence();
atomicCAS(mutex + rid, 1, 0);
}
}
}
}
}
template <typename DataT,
typename OutT,
typename IdxT,
typename P,
typename ReduceOpT,
typename KVPReduceOpT,
typename OpT,
typename FinalLambda>
__launch_bounds__(P::Nthreads, 2) RAFT_KERNEL fusedL2NNkernel(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
DataT maxVal,
int* mutex,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
OpT distance_op,
FinalLambda fin_op)
{
// compile only if below non-ampere arch.
#if __CUDA_ARCH__ < 800
extern __shared__ char smem[];
typedef KeyValuePair<IdxT, DataT> KVPair;
KVPair val[P::AccRowsPerTh];
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
val[i] = {0, maxVal};
}
// epilogue operation lambda for final value calculation
auto epilog_lambda = [n, pairRedOp, &val, maxVal] __device__(
DataT acc[P::AccRowsPerTh][P::AccColsPerTh],
DataT * regxn,
DataT * regyn,
IdxT gridStrideX,
IdxT gridStrideY) {
KVPReduceOpT pairRed_op(pairRedOp);
// intra thread reduce
const auto acccolid = threadIdx.x % P::AccThCols;
const auto accrowid = threadIdx.x / P::AccThCols;
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
auto tmpkey = acccolid + j * P::AccThCols + gridStrideX;
KVPair tmp = {tmpkey, acc[i][j]};
if (tmpkey < n) {
val[i] = pairRed_op(accrowid + i * P::AccThRows + gridStrideY, tmp, val[i]);
}
}
}
};
auto rowEpilog_lambda =
[m, mutex, min, pairRedOp, redOp, &val, maxVal] __device__(IdxT gridStrideY) {
KVPReduceOpT pairRed_op(pairRedOp);
ReduceOpT red_op(redOp);
const auto accrowid = threadIdx.x / P::AccThCols;
const auto lid = raft::laneId();
// reduce
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = P::AccThCols / 2; j > 0; j >>= 1) {
// Actually, the srcLane (lid +j) should be (lid +j) % P:AccThCols,
// but the raft::shfl op applies the modulo internally.
auto tmpkey = raft::shfl(val[i].key, lid + j, P::AccThCols);
auto tmpvalue = raft::shfl(val[i].value, lid + j, P::AccThCols);
KVPair tmp = {tmpkey, tmpvalue};
val[i] = pairRed_op(accrowid + i * P::AccThRows + gridStrideY, tmp, val[i]);
}
}
updateReducedVal<P, OutT, IdxT, KVPair, ReduceOpT>(mutex, min, val, red_op, m, gridStrideY);
// reset the val array.
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
val[i] = {0, maxVal};
}
};
IdxT lda = k, ldb = k, ldd = n;
constexpr bool row_major = true;
constexpr bool write_out = false;
PairwiseDistances<DataT,
DataT, // OutT (unused in PairwiseDistances)
IdxT,
P,
decltype(distance_op),
decltype(epilog_lambda),
FinalLambda,
decltype(rowEpilog_lambda),
row_major,
write_out>
obj(x,
y,
m,
n,
k,
lda,
ldb,
ldd,
xn,
yn,
nullptr, // Output pointer
smem,
distance_op,
epilog_lambda,
fin_op,
rowEpilog_lambda);
obj.run();
#endif
}
// cg::reduce functor for FusedDistanceNN used in its cutlass version
// to output the min distance value & key(loc id).
// This is used in fused_distance_nn/predicated_tile_iterator_reduced_vec.h
// store_with_byte_offset() passed to cg::reduce() & select_reduce.
template <typename AccType, typename Index, typename OutType>
struct kvp_cg_min_reduce_op {
typedef typename raft::KeyValuePair<Index, AccType> KVP;
__host__ __device__ kvp_cg_min_reduce_op() noexcept {};
using AccTypeT = AccType;
using IndexT = Index;
// functor signature.
__host__ __device__ KVP operator()(KVP a, KVP b) const { return a.value < b.value ? a : b; }
__host__ __device__ AccType operator()(AccType a, AccType b) const { return min(a, b); }
__host__ __device__ bool isAmin(AccType a, AccType b) const { return a < b ? true : false; }
};
template <typename DataT,
typename OutT,
typename IdxT,
typename Policy,
typename ReduceOpT,
typename KVPReduceOpT>
void fusedL2NNImpl(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
int* workspace,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
bool sqrt,
bool initOutBuffer,
cudaStream_t stream)
{
// The kernel policy is determined by fusedL2NN.
typedef Policy P;
dim3 blk(P::Nthreads);
auto nblks = raft::ceildiv<int>(m, P::Nthreads);
constexpr auto maxVal = std::numeric_limits<DataT>::max();
typedef raft::KeyValuePair<IdxT, DataT> KVPair;
RAFT_CUDA_TRY(cudaMemsetAsync(workspace, 0, sizeof(int) * m, stream));
if (initOutBuffer) {
initKernel<DataT, OutT, IdxT, ReduceOpT>
<<<nblks, P::Nthreads, 0, stream>>>(min, m, maxVal, redOp);
RAFT_CUDA_TRY(cudaGetLastError());
}
namespace arch = raft::util::arch;
using AccT = DataT;
ops::l2_exp_distance_op<DataT, AccT, IdxT> distance_op{sqrt};
raft::identity_op fin_op{};
auto kernel = fusedL2NNkernel<DataT,
OutT,
IdxT,
P,
ReduceOpT,
KVPReduceOpT,
decltype(distance_op),
decltype(fin_op)>;
// Get pointer to fp32 SIMT kernel to determine the best compute architecture
// out of all for which the kernel was compiled for that matches closely
// to the current device. Other methods to determine the architecture (that do not
// require a pointer) can be error prone. See:
// https://github.com/NVIDIA/cub/issues/545
void* kernel_ptr = reinterpret_cast<void*>(kernel);
auto runtime_arch = arch::kernel_virtual_arch(kernel_ptr);
auto cutlass_range = arch::SM_range(arch::SM_80(), arch::SM_future());
if (cutlass_range.contains(runtime_arch)) {
// If device is SM_80 or later, use CUTLASS-based kernel.
using L2Op = cuvs::distance::detail::ops::l2_exp_cutlass_op<DataT, DataT>;
using kvp_cg_min_reduce_op_ = kvp_cg_min_reduce_op<DataT, IdxT, OutT>;
kvp_cg_min_reduce_op_ cg_reduce_op;
L2Op L2_dist_op(sqrt);
IdxT lda, ldb, ldd;
lda = k, ldb = k, ldd = n;
cutlassFusedDistanceNN<DataT,
DataT,
OutT,
IdxT,
P::Veclen,
kvp_cg_min_reduce_op_,
L2Op,
ReduceOpT,
KVPReduceOpT>(x,
y,
xn,
yn,
m,
n,
k,
lda,
ldb,
ldd,
min,
workspace,
cg_reduce_op,
L2_dist_op,
redOp,
pairRedOp,
stream);
} else {
// If device less than SM_80, use fp32 SIMT kernel.
constexpr size_t shmemSize = P::SmemSize + ((P::Mblk + P::Nblk) * sizeof(DataT));
dim3 grid = launchConfigGenerator<P>(m, n, shmemSize, kernel);
kernel<<<grid, blk, shmemSize, stream>>>(
min, x, y, xn, yn, m, n, k, maxVal, workspace, redOp, pairRedOp, distance_op, fin_op);
RAFT_CUDA_TRY(cudaGetLastError());
}
}
} // namespace detail
} // namespace distance
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/masked_nn.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <limits>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/device_memory_resource.hpp>
#include <stdint.h>
#include <cuvs/distance/detail/compress_to_bits.cuh>
#include <cuvs/distance/detail/fused_l2_nn.cuh>
#include <cuvs/distance/detail/masked_distance_base.cuh>
#include <raft/linalg/contractions.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
namespace cuvs {
namespace distance {
namespace detail {
template <typename DataT,
typename OutT,
typename IdxT,
typename P,
typename ReduceOpT,
typename KVPReduceOpT,
typename CoreLambda,
typename FinalLambda>
__launch_bounds__(P::Nthreads, 2) RAFT_KERNEL masked_l2_nn_kernel(OutT* min,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
const uint64_t* adj,
const IdxT* group_idxs,
IdxT num_groups,
IdxT m,
IdxT n,
IdxT k,
bool sqrt,
DataT maxVal,
int* mutex,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
CoreLambda core_op,
FinalLambda fin_op)
{
extern __shared__ char smem[];
typedef raft::KeyValuePair<IdxT, DataT> KVPair;
KVPair val[P::AccRowsPerTh];
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
val[i] = {-1, maxVal};
}
// epilogue operation lambda for final value calculation
auto epilog_lambda = [pairRedOp, &val, maxVal, sqrt] __device__(
DataT acc[P::AccRowsPerTh][P::AccColsPerTh],
int thread_adj,
DataT* regxn,
DataT* regyn,
IdxT tile_idx_n,
IdxT tile_idx_m,
IdxT tile_end_n) {
KVPReduceOpT pairRed_op(pairRedOp);
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
acc[i][j] = regxn[i] + regyn[j] - (DataT)2.0 * acc[i][j];
}
}
if (sqrt) {
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
acc[i][j] = raft::sqrt(acc[i][j]);
}
}
}
// intra thread reduce
const auto acccolid = threadIdx.x % P::AccThCols;
const auto accrowid = threadIdx.x / P::AccThCols;
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
// thread_adj is a bitfield that contains a 1 at location i iff we must
// compute row i of acc (the accumulator register tile). It is described in
// more detail in the maskedDistances.run() method.
const bool ignore = (thread_adj & (1 << i)) == 0;
if (ignore) { continue; }
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
auto tmpkey = acccolid + j * P::AccThCols + tile_idx_n;
if (tile_end_n <= tmpkey) {
// Do not process beyond end of tile.
continue;
}
KVPair tmp = {tmpkey, acc[i][j]};
if (tmpkey < tile_end_n) {
val[i] = pairRed_op(accrowid + i * P::AccThRows + tile_idx_m, tmp, val[i]);
}
}
}
};
auto rowEpilog_lambda =
[m, mutex, min, pairRedOp, redOp, &val, maxVal] __device__(IdxT tile_idx_m) {
KVPReduceOpT pairRed_op(pairRedOp);
ReduceOpT red_op(redOp);
const auto accrowid = threadIdx.x / P::AccThCols;
const auto lid = raft::laneId();
// reduce
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = P::AccThCols / 2; j > 0; j >>= 1) {
auto tmpkey = raft::shfl(val[i].key, lid + j);
auto tmpvalue = raft::shfl(val[i].value, lid + j);
KVPair tmp = {tmpkey, tmpvalue};
val[i] = pairRed_op(accrowid + i * P::AccThRows + tile_idx_m, tmp, val[i]);
}
}
updateReducedVal<P, OutT, IdxT, KVPair, ReduceOpT>(mutex, min, val, red_op, m, tile_idx_m);
// reset the val array.
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
val[i] = {-1, maxVal};
}
};
IdxT lda = k, ldb = k, ldd = n;
MaskedDistances<true,
DataT,
DataT,
IdxT,
P,
CoreLambda,
decltype(epilog_lambda),
FinalLambda,
decltype(rowEpilog_lambda),
true>
obj(x,
y,
m,
n,
k,
lda,
ldb,
ldd,
xn,
yn,
adj,
group_idxs,
num_groups,
smem,
core_op,
epilog_lambda,
fin_op,
rowEpilog_lambda);
obj.run();
}
/**
* @brief Wrapper for masked_l2_nn_kernel
*
* Responsibilities:
* - Allocate (and initialize) workspace memory for:
* - mutexes used in nearest neighbor update step
* - adjacency matrix bitfield
* - Compress adjacency matrix to bitfield
* - Initialize output buffer (conditional on `initOutBuffer`)
* - Specify core and final operations for the L2 norm
* - Determine optimal launch configuration for kernel.
* - Launch kernel and check for errors.
*
* @tparam DataT Input data-type (for x and y matrices).
* @tparam OutT Output data-type (for key-value pairs).
* @tparam IdxT Index data-type.
* @tparam ReduceOpT A struct to perform the final needed reduction
* operation and also to initialize the output array
* elements with the appropriate initial value needed for
* reduction.
* @tparam KVPReduceOpT Type of Reduction operation on key value pairs.
*
* @param handle RAFT handle for managing expensive resources
* @param[out] out Will contain reduced output (nn key-value pairs)
* @param[in] x First matrix. Row major. Dim = `m x k`. (on device)
* @param[in] y Second matrix. Row major. Dim = `n x k`. (on device)
* @param[in] xn L2 squared norm of `x`. Length = `m`.
* @param[in] yn L2 squared norm of `y`. Length = `n`.
* @param[in] adj A boolean adjacency matrix indicating for each
* row of `x` and each group in `y` whether to compute the
* distance. Dim = `m x num_groups`.
* @param[in] group_idxs An array containing the *end* indices of each group
* in `y`. The value of group_idxs[j] indicates the
* start of group j + 1, i.e., it is the inclusive
* scan of the group lengths. The first group is
* always assumed to start at index 0 and the last
* group typically ends at index `n`. Length =
* `num_groups`.
* @param[in] num_groups Length of `group_idxs`.
* @param m Rows of `x`.
* @param n Rows of `y`.
* @param k Cols of `x` and `y`.
* @param redOp Reduction operator in the epilogue
* @param pairRedOp Reduction operation on key value pairs
* @param sqrt Whether to compute the squared or actual (i.e. sqrt) L2 norm.
* @param initOutBuffer Whether to initialize the output buffer
*
*
*/
template <typename DataT, typename OutT, typename IdxT, typename ReduceOpT, typename KVPReduceOpT>
void masked_l2_nn_impl(raft::resources const& handle,
OutT* out,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
const bool* adj,
const IdxT* group_idxs,
IdxT num_groups,
IdxT m,
IdxT n,
IdxT k,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
bool sqrt,
bool initOutBuffer)
{
typedef typename linalg::Policy4x4<DataT, 1>::Policy P;
static_assert(P::Mblk == 64, "masked_l2_nn_impl only supports a policy with 64 rows per block.");
// Get stream and workspace memory resource
rmm::mr::device_memory_resource* ws_mr =
dynamic_cast<rmm::mr::device_memory_resource*>(raft::resource::get_workspace_resource(handle));
auto stream = resource::get_cuda_stream(handle);
// Acquire temporary buffers and initialize to zero:
// 1) Adjacency matrix bitfield
// 2) Workspace for fused nearest neighbor operation
size_t m_div_64 = raft::ceildiv(m, IdxT(64));
rmm::device_uvector<uint64_t> ws_adj64{m_div_64 * num_groups, stream, ws_mr};
rmm::device_uvector<int> ws_fused_nn{size_t(m), stream, ws_mr};
RAFT_CUDA_TRY(cudaMemsetAsync(ws_adj64.data(), 0, ws_adj64.size() * sizeof(uint64_t), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(ws_fused_nn.data(), 0, ws_fused_nn.size() * sizeof(int), stream));
// Compress boolean adjacency matrix to bitfield.
auto adj_view = raft::make_device_matrix_view<const bool, int>(adj, m, num_groups);
auto adj64_view =
raft::make_device_matrix_view<uint64_t, int>(ws_adj64.data(), m_div_64, num_groups);
compress_to_bits(handle, adj_view, adj64_view);
// Initialize output buffer with keyvalue pairs as determined by the reduction
// operator (it will be called with maxVal).
constexpr auto maxVal = std::numeric_limits<DataT>::max();
if (initOutBuffer) {
dim3 grid(raft::ceildiv<int>(m, P::Nthreads));
dim3 block(P::Nthreads);
initKernel<DataT, OutT, IdxT, ReduceOpT><<<grid, block, 0, stream>>>(out, m, maxVal, redOp);
RAFT_CUDA_TRY(cudaGetLastError());
}
// Accumulation operation lambda
auto core_lambda = [] __device__(DataT & acc, DataT & x, DataT & y) { acc += x * y; };
auto fin_op = raft::identity_op{};
auto kernel = masked_l2_nn_kernel<DataT,
OutT,
IdxT,
P,
ReduceOpT,
KVPReduceOpT,
decltype(core_lambda),
decltype(fin_op)>;
constexpr size_t smemSize = P::SmemSize + ((P::Mblk + P::Nblk) * sizeof(DataT));
dim3 block(P::Nthreads);
dim3 grid = launchConfigGenerator<P>(m, n, smemSize, kernel);
kernel<<<grid, block, smemSize, stream>>>(out,
x,
y,
xn,
yn,
ws_adj64.data(),
group_idxs,
num_groups,
m,
n,
k,
sqrt,
maxVal,
ws_fused_nn.data(),
redOp,
pairRedOp,
core_lambda,
fin_op);
RAFT_CUDA_TRY(cudaGetLastError());
}
} // namespace detail
} // namespace distance
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_distance_epilogue_elementwise.h | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
/*! \file
\brief Functor performing distance operations used by epilogues of pairwise distance
* kernels.
* This is adapted from LinearCombinationBiasElementwise from CUTLASS 2.9.0
* customized for applying elementwise distance formula on accumulated GEMM value
* and applying user-defined final custom operation on the distance value.
*/
#pragma once
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/functional.h>
#include <cutlass/numeric_conversion.h>
#include <cutlass/numeric_types.h>
#include <cutlass/epilogue/thread/activation.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
typename DistanceOp_,
typename FinalOp_>
class PairwiseDistanceEpilogueElementwise {
public:
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using DistanceOp = DistanceOp_;
using FinalOp = FinalOp_;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
using FragmentOutput = FragmentZ;
static bool const kIsHeavy = false; // ElementwiseOp::kIsHeavy;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = false; // We don't store anything in Z,
/// If true, the 'T' tensor is stored
static bool const kStoreT = true; // this is our final output storage.
/// Host-constructable parameters structure
struct Params {
FinalOp_ final_op_;
DistanceOp_ dist_op_;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(DistanceOp_ dist_op, FinalOp final_op) : final_op_(final_op), dist_op_(dist_op) {}
CUTLASS_HOST_DEVICE
Params() {}
};
private:
//
// Data members
//
FinalOp_ final_op;
DistanceOp_ elementwise_op;
public:
//
// Methods
//
/// Constructor from Params
CUTLASS_HOST_DEVICE
PairwiseDistanceEpilogueElementwise(Params const& params)
: final_op(params.final_op_), elementwise_op(params.dist_op_)
{
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const
{
// we use for making sure C matrix path is used for A mat norm.
return true;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(FragmentZ& frag_Z,
FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentC const& frag_C,
FragmentCompute const& V) const
{
FragmentCompute tmp_Accum =
NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_C =
NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C);
FragmentCompute result_Z;
FragmentCompute result_T;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
result_Z[i] = elementwise_op(tmp_C[i], V[i], tmp_Accum[i]);
result_T[i] = final_op(result_Z[i], 0);
}
NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t;
frag_T = convert_t(result_T);
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(FragmentZ& frag_Z,
FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentCompute const& V) const
{
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/predicated_tile_iterator_normvec.h | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This file contains a customized version of PredicatedTileIterator from CUTLASS 2.9.0
(https://github.com/NVIDIA/cutlass/blob/v2.9.0/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h#L75)
Changes:
- added `Layout_` template param
- Only the row index is used to load the data in load_with_byte_offset().
This way the same normalization data is used across all columns in a row.
*/
#pragma once
#include <cutlass/arch/arch.h>
#include <cutlass/arch/memory.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/epilogue/threadblock/output_tile_thread_map.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator_params.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_ref.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
typename Layout_,
bool ScatterD = false, ///< Scatter D operand or not
bool UseCUDAStore = false>
class PredicatedTileIteratorNormVec {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
static_assert(ThreadMap::Iterations::kCluster > 0, "ThreadMap::Iterations::kCluster must be > 0");
static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
/// Fragment object
using Fragment = Array<Element,
ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>())
{
}
CUTLASS_HOST_DEVICE
Params(Base const& base) : Base(base) {}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() { enable(); }
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Byte-level pointer
uint8_t* byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
/// Scatter indices
int const* indices_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorNormVec(PredicatedTileIteratorParams const& params,
Element* pointer,
TensorCoord extent,
int thread_idx,
TensorCoord threadblock_offset = TensorCoord(),
int const* indices = nullptr)
: params_(params), indices_(indices)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] =
((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column());
}
// Null pointer performs no accesses
if (!pointer) { mask_.clear(); }
if (ScatterD && !indices) { mask_.clear(); }
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.row()) * LongIndex(params_.stride);
if (ScatterD) {
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset)
{
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment& frag, int64_t byte_offset) const
{
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType* memory_pointer = reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
if (ScatterD && row_guard) {
assert(indices_);
memory_pointer = reinterpret_cast<AccessType*>(
byte_pointer + byte_offset +
LongIndex(indices_[row_offset + thread_start_row_]) * LongIndex(params_.stride));
}
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
if (column == 0) {
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer[0],
guard);
} else {
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column] =
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn];
}
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) { byte_pointer += params_.increment_row; }
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; }
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment& frag) const { load_with_byte_offset(frag, 0); }
/// Stores a fragment to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) const
{
uint8_t* byte_pointer = byte_pointer_;
AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
AccessType* memory_pointer = reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
if (ScatterD && row_guard) {
assert(indices_);
memory_pointer = reinterpret_cast<AccessType*>(
byte_pointer + byte_offset +
LongIndex(indices_[row_offset + thread_start_row_]) * LongIndex(params_.stride));
}
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
if (UseCUDAStore) {
if (guard) {
memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess] =
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column];
}
} else {
cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
}
if (row + 1 < ThreadMap::Iterations::kRow) {
if (!ScatterD) { byte_pointer += params_.increment_row; }
}
}
if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; }
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment const& frag) const { store_with_byte_offset(frag, 0); }
/// Loads a fragment from memory
CUTLASS_DEVICE
void downsample_load_with_byte_offset(Fragment& frag,
int64_t byte_offset,
int convolution_P,
int convolution_Q,
int add_P,
int add_Q,
int problem_N) const
{
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
int output_row = row_offset + thread_start_row_;
int output_N = output_row / (convolution_P * convolution_Q);
int output_PQ = output_row % (convolution_P * convolution_Q);
int output_P = output_PQ / convolution_Q;
int output_Q = output_PQ % convolution_Q;
int input_row = output_N * 2 * convolution_P * 2 * convolution_Q +
(2 * output_P + add_P) * 2 * convolution_Q + 2 * output_Q + add_Q;
int64_t byte_offset = (input_row - output_row) * problem_N * sizeof(float);
AccessType* memory_pointer = reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; }
}
if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; }
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void upsample_load_with_byte_offset(Fragment& frag,
int64_t byte_offset,
int convolution_P,
int convolution_Q,
int add_P,
int add_Q,
int problem_N) const
{
uint8_t* byte_pointer = byte_pointer_;
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
int output_row = row_offset + thread_start_row_;
int output_N = output_row / (convolution_P * convolution_Q);
int output_PQ = output_row % (convolution_P * convolution_Q);
int output_P = output_PQ / convolution_Q;
int output_Q = output_PQ % convolution_Q;
int row_add_P = add_P;
int row_add_Q = add_Q;
if (output_P > convolution_P - 2) row_add_P = 0;
if (output_Q > convolution_Q - 2) row_add_Q = 0;
int input_row = output_N * (convolution_P / 2) * (convolution_Q / 2) +
((output_P + row_add_P) / 2) * (convolution_Q / 2) +
(output_Q + row_add_Q) / 2;
int64_t byte_offset = (input_row - output_row) * problem_N * sizeof(float);
AccessType* memory_pointer = reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
bool guard = row_guard && mask_.predicates[column];
cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
(void*)&memory_pointer[column * ThreadMap::Delta::kColumn / kElementsPerAccess],
guard);
}
if (row + 1 < ThreadMap::Iterations::kRow) { byte_pointer += params_.increment_row; }
}
if (group + 1 < ThreadMap::Iterations::kGroup) { byte_pointer += params_.increment_group; }
}
if (cluster + 1 < ThreadMap::Iterations::kCluster) {
byte_pointer += params_.increment_cluster;
}
}
}
CUTLASS_DEVICE
MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const { return thread_start_row_; }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const { return thread_start_column_; }
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const { return extent_row_; }
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const { return extent_column_; }
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorNormVec& operator++()
{
++state_[0];
if (!ScatterD) { byte_pointer_ += params_.advance_row; }
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ +=
(ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; }
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_distance_epilogue.h | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This is adapted from DefaultEpilogueWithBroadcastTensorOp from CUTLASS 2.9.0
(https://github.com/NVIDIA/cutlass/blob/master/include/cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h#L75)
This epilogue allows us to load norm buffers using PredicatedTileIteratorNormVec
and EpilogueWithBroadcast used for distances L2/cosine as well as applies user-define elementwise
operation.
-- A norm load is provided PredicatedTileIteratorNormVec
-- B norm load is provided by EpilogueWithBroadcast
-- elementwise operation is provided by OutputOp
*/
#pragma once
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/numeric_types.h>
#include <cutlass/gemm/gemm.h>
#include "./predicated_tile_iterator_normvec.h"
#include <cutlass/epilogue/threadblock/default_epilogue_tensor_op.h>
#include <cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h>
#include <cutlass/epilogue/threadblock/epilogue.h>
#include <cutlass/epilogue/threadblock/epilogue_with_broadcast.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
typename LayoutT,
int ElementsPerAccess,
bool ScatterD = false>
struct PairwiseDistanceEpilogue {
/// Use defaults related to the existing epilogue
using Base =
DefaultEpilogueTensorOp<Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess>;
//
// Stores the result z = (y = GEMM(A, B, C), broadcast)
//
using OutputTileIterator = cutlass::epilogue::threadblock::
PredicatedTileIteratorNormVec<typename Base::OutputTileThreadMap, ElementOutput, LayoutT>;
//
// Additional tensor tile iterator - stores t = Elementwise(z)
//
using TensorTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<typename Base::OutputTileThreadMap,
ElementTensor>;
/// Define the epilogue
using Epilogue = EpilogueWithBroadcast<Shape,
WarpMmaTensorOp,
PartitionsK,
OutputTileIterator,
TensorTileIterator,
ElementVector,
typename Base::AccumulatorFragmentIterator,
typename Base::WarpTileIterator,
typename Base::SharedLoadIterator,
OutputOp,
typename Base::Padding,
Base::kFragmentsPerIteration>;
};
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_distance_base.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/contractions.cuh> // raft::linalg::Contractions_NT
#include <raft/util/cuda_dev_essentials.cuh> // ceildiv
#include <raft/util/cuda_rt_essentials.hpp> // RAFT_CUDA_TRY
#include <cstddef> // size_t
namespace cuvs {
namespace distance {
namespace detail {
/**
* @brief Device class for L1, L2 and cosine distance metrics.
* @tparam DataT input data-type (for A and B matrices)
* @tparam AccT accumulation data-type
* @tparam OutT output data-type (for C and D matrices)
* @tparam IdxT index data-type
* @tparam Policy struct which tunes the Contraction kernel
* @tparam OpT A distance operation, e.g., cosine_distance_op.
* @tparam EpilogueLambda applies an elementwise function to compute final
values. Its signature is:
template <typename AccT, typename DataT> void epilogue_lambda
(AccT acc[][], DataT* regxn, DataT* regyn);
* @tparam FinalLambda the final lambda called on final distance value
* @param[in] x input matrix
* @param[in] y input matrix
* @param[in] m number of rows of A and C/D
* @param[in] n number of columns of B and C/D
* @param[in] k number of cols of A and rows of B
* @param[in] lda leading dimension of A
* @param[in] ldb leading dimension of B
* @param[in] ldd leading dimension of C/D
* @param[in] xn row norms of input matrix A. Required for expanded L2, cosine
* @param[in] yn row norms of input matrix B. Required for expanded L2, cosine
* @param[output] pD output matrix
* @param[in] smem shared mem buffer for intermediate storage of A, B, xn & yn.
* @param distance_op the distance operation, e.g. cosine_distance_op
* @param epilog_op the epilog operation lambda
* @param fin_op the final gemm epilogue lambda
* @param rowEpilog_op epilog lambda that executes when a full row has been processed
*/
template <typename DataT,
typename OutT,
typename IdxT,
typename Policy,
typename OpT,
typename EpilogueLambda,
typename FinalLambda,
typename rowEpilogueLambda,
bool isRowMajor = true,
bool writeOut = true,
typename BaseClass = raft::linalg::Contractions_NT<DataT, IdxT, Policy, isRowMajor>>
struct PairwiseDistances : public BaseClass {
// Get accumulation type from distance_op
using AccT = typename OpT::AccT;
private:
typedef Policy P;
const DataT* xn;
const DataT* yn;
const DataT* const yBase;
OutT* dOutput;
char* smem;
OpT distance_op;
EpilogueLambda epilog_op;
FinalLambda fin_op;
rowEpilogueLambda rowEpilog_op;
const IdxT grid_stride_m;
const IdxT grid_stride_n;
const IdxT grid_offset_m;
const IdxT grid_offset_n;
AccT acc[P::AccRowsPerTh][P::AccColsPerTh];
public:
// Constructor
DI PairwiseDistances(const DataT* _x,
const DataT* _y,
IdxT _m,
IdxT _n,
IdxT _k,
IdxT _lda,
IdxT _ldb,
IdxT _ldd,
const DataT* _xn,
const DataT* _yn,
OutT* _dOutput,
char* _smem,
OpT _distance_op,
EpilogueLambda _epilog_op,
FinalLambda _fin_op,
rowEpilogueLambda _rowEpilog_op)
: BaseClass(_x, _y, _m, _n, _k, _lda, _ldb, _ldd, _smem),
xn(_xn),
yn(_yn),
yBase(_y),
dOutput(_dOutput),
smem(_smem),
distance_op(_distance_op),
epilog_op(_epilog_op),
fin_op(_fin_op),
rowEpilog_op(_rowEpilog_op),
grid_stride_m(P::Mblk * gridDim.y),
grid_stride_n(P::Nblk * gridDim.x),
grid_offset_m(P::Mblk * blockIdx.y),
grid_offset_n(P::Nblk * blockIdx.x)
{
}
DI void run()
{
for (auto tile_idx_m = grid_offset_m; tile_idx_m < this->m; tile_idx_m += grid_stride_m) {
this->ldgXY(tile_idx_m, grid_offset_n, 0);
for (auto tile_idx_n = grid_offset_n; tile_idx_n < this->n; tile_idx_n += grid_stride_n) {
// Prolog:
reset_accumulator();
this->stsXY();
__syncthreads();
this->switch_write_buffer();
// Main loop:
for (int kidx = P::Kblk; kidx < this->k; kidx += P::Kblk) {
this->ldgXY(tile_idx_m, tile_idx_n, kidx);
// Process all data in shared memory (previous k-block) and
// accumulate in registers.
accumulate();
this->stsXY();
__syncthreads();
this->switch_write_buffer();
this->switch_read_buffer();
}
accumulate(); // last iteration
// The pre-condition for the loop over tile_idx_n is that write_buffer
// and read_buffer point to the same buffer. This flips read_buffer back
// so that it satisfies the pre-condition of this loop.
this->switch_read_buffer();
// Epilog:
if (distance_op.use_norms) {
DataT regxn[P::AccRowsPerTh], regyn[P::AccColsPerTh];
load_norms(tile_idx_m, tile_idx_n, regxn, regyn);
// Overlap ldg with epilog computation
ldgNextGridStride(tile_idx_m, tile_idx_n);
// Calculate distance_op epilog.
// Use .template to disambiguate (See:
// https://en.cppreference.com/w/cpp/language/dependent_name)
distance_op.template epilog<Policy>(acc, regxn, regyn, tile_idx_n, tile_idx_m);
// And any possible additional epilogs
epilog_op(acc, regxn, regyn, tile_idx_n, tile_idx_m);
} else {
// Overlap ldg with epilog computation
ldgNextGridStride(tile_idx_m, tile_idx_n);
// Calculate distance_op epilog.
// Use .template to disambiguate (See:
// https://en.cppreference.com/w/cpp/language/dependent_name)
distance_op.template epilog<Policy>(acc, nullptr, nullptr, tile_idx_n, tile_idx_m);
// And any possible additional epilogs
epilog_op(acc, nullptr, nullptr, tile_idx_n, tile_idx_m);
}
if (writeOut) { store_output(tile_idx_m, tile_idx_n); }
}
rowEpilog_op(tile_idx_m);
}
}
private:
DI void ldgNextGridStride(IdxT tile_idx_m, IdxT tile_idx_n)
{
// Fetch next grid stride ldg if within range
const auto next_tile_tile_idx_n = tile_idx_n + grid_stride_n;
const auto next_tile_tile_idx_m = tile_idx_m + grid_stride_m;
if ((next_tile_tile_idx_n) < this->n) {
this->ldgXY(tile_idx_m, next_tile_tile_idx_n, 0);
} else if ((next_tile_tile_idx_m) < this->m) {
this->ldgXY(next_tile_tile_idx_m, grid_offset_n, 0);
}
}
DI void reset_accumulator()
{
// Reset accumulator registers to zero.
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
acc[i][j] = BaseClass::Zero;
}
}
}
DI void accumulate_reg_tile(DataT (®_x)[P::AccRowsPerTh][P::Veclen],
DataT (®_y)[P::AccColsPerTh][P::Veclen])
{
#pragma unroll
for (int v = 0; v < P::Veclen; ++v) {
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
distance_op.core(acc[i][j], reg_x[i][v], reg_y[j][v]);
}
}
}
}
DI void accumulate()
{
// We have a separate raft::ldsXY and accumulate_reg_tile outside the loop body,
// so that these separated calls can be interspersed with preceding and
// following instructions, thereby hiding latency.
this->ldsXY(0);
// If expensive inner loop, do not unroll loop.
constexpr int num_iterations = P::Kblk / P::Veclen - 1;
constexpr int unroll_count = decltype(distance_op)::expensive_inner_loop ? 1 : num_iterations;
#pragma unroll unroll_count
for (int ki = P::Veclen; ki < P::Kblk; ki += P::Veclen) {
accumulate_reg_tile(this->regx, this->regy);
this->ldsXY(ki);
}
// Accumulate last loaded tile.
accumulate_reg_tile(this->regx, this->regy);
}
DI void load_norms(IdxT tile_idx_m,
IdxT tile_idx_n,
DataT (®xn)[P::AccRowsPerTh],
DataT (®yn)[P::AccColsPerTh])
{
DataT* sxNorm = (DataT*)(&smem[P::SmemSize]);
DataT* syNorm = (&sxNorm[P::Mblk]);
// Load x & y norms required by this threadblock in shmem buffer
if (tile_idx_n == blockIdx.x * P::Nblk) {
for (int i = threadIdx.x; i < P::Mblk; i += P::Nthreads) {
auto idx = tile_idx_m + i;
sxNorm[i] = idx < this->m ? xn[idx] : 0;
}
}
for (int i = threadIdx.x; i < P::Nblk; i += P::Nthreads) {
auto idx = tile_idx_n + i;
syNorm[i] = idx < this->n ? yn[idx] : 0;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
regxn[i] = sxNorm[i * P::AccThRows + (threadIdx.x / P::AccThCols)];
}
#pragma unroll
for (int i = 0; i < P::AccColsPerTh; ++i) {
regyn[i] = syNorm[i * P::AccThCols + (threadIdx.x % P::AccThCols)];
}
}
DI void store_output(IdxT tile_idx_m, IdxT tile_idx_n)
{
IdxT starty = tile_idx_m + this->accrowid;
IdxT startx = tile_idx_n + this->acccolid;
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
auto rowId = starty + i * P::AccThRows;
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
auto colId = startx + j * P::AccThCols;
if (rowId < this->m && colId < this->n) {
// Promote to 64 bit index for final write, as output array can be > 2^31
dOutput[std::size_t(rowId) * this->n + colId] = fin_op(acc[i][j], 0);
}
}
}
}
}; // struct PairwiseDistances
template <typename P, typename IdxT, typename T>
dim3 launchConfigGenerator(IdxT m, IdxT n, std::size_t sMemSize, T func)
{
int devId;
RAFT_CUDA_TRY(cudaGetDevice(&devId));
int numSMs;
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, devId));
int numBlocksPerSm = 0;
dim3 grid;
RAFT_CUDA_TRY(
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, func, P::Nthreads, sMemSize));
std::size_t minGridSize = numSMs * numBlocksPerSm;
std::size_t yChunks = raft::ceildiv<int>(m, P::Mblk);
std::size_t xChunks = raft::ceildiv<int>(n, P::Nblk);
grid.y = yChunks > minGridSize ? minGridSize : yChunks;
grid.x = (minGridSize - grid.y) <= 0 ? 1 : xChunks;
if (grid.x != 1) {
std::size_t i = 1;
while (grid.y * i < minGridSize) {
i++;
}
grid.x = i >= xChunks ? xChunks : i;
}
return grid;
}
}; // namespace detail
}; // namespace distance
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/compress_to_bits.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/device_atomics.cuh>
namespace cuvs::distance::detail {
/**
* @brief Compress 2D boolean matrix to bitfield
*
* Utility kernel for masked_l2_nn.
*
* @tparam T
*
* @parameter[in] in An `m x n` boolean matrix. Row major.
* @parameter[out] out An `(m / bits_per_elem) x n` matrix with elements of
* type T, where T is of size `bits_per_elem` bits.
* Note: the division (`/`) is a ceilDiv.
*/
template <typename T = uint64_t, typename = std::enable_if_t<std::is_integral<T>::value>>
RAFT_KERNEL compress_to_bits_kernel(
raft::device_matrix_view<const bool, int, raft::layout_c_contiguous> in,
raft::device_matrix_view<T, int, raft::layout_c_contiguous> out)
{
constexpr int bits_per_element = 8 * sizeof(T);
constexpr int tile_dim_m = bits_per_element;
constexpr int nthreads = 128;
constexpr int tile_dim_n = nthreads; // read 128 bools at once = 1 sector
// Tile in shared memory is transposed
__shared__ bool smem[tile_dim_n][tile_dim_m];
const int num_tiles_per_m = raft::ceildiv(in.extent(0), tile_dim_m);
const int num_tiles_per_n = raft::ceildiv(in.extent(1), tile_dim_n);
for (int lin_tile_idx = blockIdx.x; true; lin_tile_idx += gridDim.x) {
const int tile_idx_n = tile_dim_n * (lin_tile_idx % num_tiles_per_n);
const int tile_idx_m = tile_dim_m * (lin_tile_idx / num_tiles_per_n);
if (in.extent(0) <= tile_idx_m) { break; }
// Fill shared memory tile
bool reg_buf[tile_dim_m];
#pragma unroll
for (int i = 0; i < tile_dim_m; ++i) {
const int in_m = tile_idx_m + i;
const int in_n = tile_idx_n + threadIdx.x;
bool in_bounds = in_m < in.extent(0) && in_n < in.extent(1);
reg_buf[i] = in_bounds ? in(in_m, in_n) : false;
smem[threadIdx.x][i] = reg_buf[i];
}
__syncthreads();
// Drain memory tile into single output element out_elem.
T out_elem{0};
#pragma unroll
for (int j = 0; j < tile_dim_n; ++j) {
if (smem[threadIdx.x][j]) { out_elem |= T(1) << j; }
}
__syncthreads();
// Write output.
int out_m = tile_idx_m / bits_per_element;
int out_n = tile_idx_n + threadIdx.x;
if (out_m < out.extent(0) && out_n < out.extent(1)) { out(out_m, out_n) = out_elem; }
}
}
/**
* @brief Compress 2D boolean matrix to bitfield
*
* Utility kernel for masked_l2_nn.
*
* @tparam T
*
* @parameter[in] in An `m x n` boolean matrix. Row major.
* @parameter[out] out An `(m / bits_per_elem) x n` matrix with elements of
* type T, where T is of size `bits_per_elem` bits.
* Note: the division (`/`) is a ceilDiv.
*/
template <typename T = uint64_t, typename = std::enable_if_t<std::is_integral<T>::value>>
void compress_to_bits(raft::resources const& handle,
raft::device_matrix_view<const bool, int, raft::layout_c_contiguous> in,
raft::device_matrix_view<T, int, raft::layout_c_contiguous> out)
{
auto stream = resource::get_cuda_stream(handle);
constexpr int bits_per_element = 8 * sizeof(T);
RAFT_EXPECTS(raft::ceildiv(in.extent(0), bits_per_element) == out.extent(0),
"Number of output rows must be ceildiv(input rows, bits_per_elem)");
RAFT_EXPECTS(in.extent(1) == out.extent(1), "Number of output columns must equal input columns.");
const int num_SMs = raft::getMultiProcessorCount();
int blocks_per_sm = 0;
constexpr int num_threads = 128;
constexpr int dyn_smem_size = 0;
RAFT_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&blocks_per_sm, compress_to_bits_kernel<T>, num_threads, dyn_smem_size));
dim3 grid(num_SMs * blocks_per_sm);
dim3 block(128);
compress_to_bits_kernel<<<grid, block, 0, stream>>>(in, out);
RAFT_CUDA_TRY(cudaGetLastError());
}
}; // namespace cuvs::distance::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/masked_distance_base.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/detail/pairwise_distance_base.cuh>
#include <raft/linalg/contractions.cuh>
#include <raft/util/cuda_utils.cuh>
#include <cstddef>
namespace cuvs {
namespace distance {
namespace detail {
/**
* @brief Device class for masked nearest neighbor computations.
*
* @tparam useNorms whether norms are needed
* @tparam DataT input data-type (for x and y matrices)
* @tparam AccT accumulation data-type
* @tparam IdxT index data-type
* @tparam Policy struct which tunes the Contraction kernel
* @tparam CoreLambda tells how to accumulate an x and y into
acc. its signature:
template <typename AccT, typename DataT> void core_lambda(AccT& acc,
const DataT& x, const DataT& y)
* @tparam EpilogueLambda applies an elementwise function to compute final
values. Its signature is:
template <typename AccT, typename DataT> void epilogue_lambda
(AccT acc[][], DataT* regxn, DataT* regyn);
* @tparam FinalLambda the final lambda called on final distance value
* @tparam rowEpilogueLambda epilog lambda that executes when a full row has
* been processed.
*
* @param[in] x input matrix
* @param[in] y input matrix
* @param[in] m number of rows of x
* @param[in] n number of columns of y
* @param[in] k number of cols of x and y
* @param[in] lda leading dimension of x
* @param[in] ldb leading dimension of y
* @param[in] ldd parameter to keep Contractions_NT happy..
* @param[in] xn row norms of input matrix A. Required for expanded L2, cosine
* @param[in] yn row norms of input matrix B. Required for expanded L2, cosine
* @param[in] adj An adjacency matrix encoded as a bitfield indicating for each
* row of `x` and each group in `y` whether to compute the
* distance. Dim = `(m / 64) x num_groups`.
* @param[in] group_idxs An array containing the *end* indices of each group
* in `y`. The value of group_idxs[j] indicates the
* start of group j + 1, i.e., it is the inclusive
* scan of the group lengths. The first group is
* always assumed to start at index 0 and the last
* group typically ends at index `n`. Length =
* `num_groups`.
* @param[in] num_groups The number of groups in group_idxs.
* @param[in] smem shared mem buffer for intermediate storage of x, y, xn & yn.
* @param core_op the core accumulation operation lambda
* @param epilog_op the epilog operation lambda
* @param fin_op the final gemm epilogue lambda
* @param rowEpilog_op epilog lambda that executes when a full row has been processed.
*/
template <bool useNorms,
typename DataT,
typename AccT,
typename IdxT,
typename Policy,
typename CoreLambda,
typename EpilogueLambda,
typename FinalLambda,
typename rowEpilogueLambda,
bool isRowMajor = true,
typename BaseClass = raft::linalg::Contractions_NT<DataT, IdxT, Policy, isRowMajor>>
struct MaskedDistances : public BaseClass {
private:
typedef Policy P;
const DataT* xn;
const DataT* yn;
const DataT* const yBase;
const uint64_t* adj;
const IdxT* group_idxs;
IdxT num_groups;
char* smem;
CoreLambda core_op;
EpilogueLambda epilog_op;
FinalLambda fin_op;
rowEpilogueLambda rowEpilog_op;
AccT acc[P::AccRowsPerTh][P::AccColsPerTh];
public:
// Constructor
DI MaskedDistances(const DataT* _x,
const DataT* _y,
IdxT _m,
IdxT _n,
IdxT _k,
IdxT _lda,
IdxT _ldb,
IdxT _ldd,
const DataT* _xn,
const DataT* _yn,
const uint64_t* _adj,
const IdxT* _group_idxs,
IdxT _num_groups,
char* _smem,
CoreLambda _core_op,
EpilogueLambda _epilog_op,
FinalLambda _fin_op,
rowEpilogueLambda _rowEpilog_op)
: BaseClass(_x, _y, _m, _n, _k, _lda, _ldb, _ldd, _smem),
xn(_xn),
yn(_yn),
yBase(_y),
adj(_adj),
group_idxs(_group_idxs),
num_groups(_num_groups),
smem(_smem),
core_op(_core_op),
epilog_op(_epilog_op),
fin_op(_fin_op),
rowEpilog_op(_rowEpilog_op)
{
}
DI void run()
{
const auto grid_stride_m = (P::Mblk * gridDim.y);
const auto grid_offset_m = (P::Mblk * blockIdx.y);
const auto grid_stride_g = gridDim.x;
const auto grid_offset_g = blockIdx.x;
for (auto tile_idx_m = grid_offset_m; tile_idx_m < this->m; tile_idx_m += grid_stride_m) {
// Start loop over groups
for (auto idx_g = grid_offset_g; idx_g < this->num_groups; idx_g += grid_stride_g) {
const uint64_t block_adj = get_block_adjacency(adj, tile_idx_m, idx_g);
// block_adj is a bitfield that contains a 1 if a row is adjacent to the
// current group. All zero means we can skip this group.
if (block_adj == 0) { continue; }
// thread_adj is a bitfield that contains a 1 at location i iff we must
// compute row i of acc (the accumulator register tile). That is,
// for i = 0,.., AccRowsPerTh and j = 0,.., AccColsPerTh:
//
// ((1 << i) & thread_adj) > 0 <=> acc[i][j] must be computed.
//
// We precompute this information because it is used in various
// locations to skip thread-local computations, specifically:
//
// 1. To skip computations if thread_adj == 0, i.e., none of the values
// of `acc` have to be computed.
//
// 2. In epilog_op, to consider only values of `acc` to be reduced that
// are not masked of.
//
// Note 1: Even when the computation can be skipped for a specific thread,
// the thread still participates in synchronization operations.
//
// Note 2: In theory, it should be possible to skip computations for
// specific rows of `acc`. In practice, however, this does not improve
// performance.
int thread_adj = compute_thread_adjacency(block_adj);
auto tile_idx_n = idx_g == 0 ? 0 : group_idxs[idx_g - 1];
const auto group_end_n = group_idxs[idx_g];
for (; tile_idx_n < group_end_n; tile_idx_n += P::Nblk) {
// We provide group_end_n to limit the number of unnecessary data
// points that are loaded from y.
this->ldgXY(tile_idx_m, tile_idx_n, 0, group_end_n);
reset_accumulator();
this->stsXY();
__syncthreads();
this->switch_write_buffer();
for (int kidx = P::Kblk; kidx < this->k; kidx += P::Kblk) {
this->ldgXY(tile_idx_m, tile_idx_n, kidx, group_end_n);
// Process all data in shared memory (previous k-block) and
// accumulate in registers.
if (thread_adj != 0) { accumulate(); }
this->stsXY();
__syncthreads();
this->switch_write_buffer();
this->switch_read_buffer();
}
if (thread_adj != 0) {
accumulate(); // last iteration
}
// The pre-condition for the loop over tile_idx_n is that write_buffer
// and read_buffer point to the same buffer. This flips read_buffer
// back so that it satisfies the pre-condition of this loop.
this->switch_read_buffer();
if (useNorms) {
DataT regxn[P::AccRowsPerTh], regyn[P::AccColsPerTh];
load_norms(tile_idx_m, tile_idx_n, group_end_n, regxn, regyn);
if (thread_adj != 0) {
epilog_op(acc, thread_adj, regxn, regyn, tile_idx_n, tile_idx_m, group_end_n);
}
} else {
if (thread_adj != 0) {
epilog_op(acc, thread_adj, nullptr, nullptr, tile_idx_n, tile_idx_m, group_end_n);
}
}
} // tile_idx_n
} // idx_g
rowEpilog_op(tile_idx_m);
} // tile_idx_m
}
private:
DI uint64_t get_block_adjacency(const uint64_t* adj, IdxT tile_idx_m, IdxT idx_group)
{
// A single element of `adj` contains exactly enough bits to indicate which
// rows in the current tile to skip and which to compute.
static_assert(P::Mblk == 8 * sizeof(adj[0]),
"masked_l2_nn only supports a policy with 64 rows per block.");
IdxT block_flag_idx = tile_idx_m / P::Mblk;
// Index into adj at row tile_idx_m / 64 and column idx_group.
return adj[block_flag_idx * this->num_groups + idx_group];
}
DI uint32_t compute_thread_adjacency(const uint64_t block_adj)
{
// thread_adj is a bitfield that contains a 1 at location i iff we must
// compute row i of acc (the accumulator register tile). It is described in
// more detail in the run() method.
uint32_t thread_adj = 0;
#pragma unroll
for (int thread_row_idx = 0; thread_row_idx < P::AccRowsPerTh; ++thread_row_idx) {
// Index `thread_row_idx` refers to a row of the current threads' register
// tile `acc`, i.e., acc[i][:]. Index `block_row_idx` refers to the
// corresponding row of the current block tile in shared memory.
const int block_row_idx = this->accrowid + thread_row_idx * P::AccThRows;
// block_row_is_adjacent is true if the current block_row_idx is adjacent
// to the current group.
const uint64_t block_mask = 1ull << block_row_idx;
const bool block_row_is_adjacent = (block_adj & block_mask) != 0;
if (block_row_is_adjacent) {
// If block row is adjacent, write a 1 bit to thread_adj at location
// `thread_row_idx`.
const uint32_t thread_mask = 1 << thread_row_idx;
thread_adj |= thread_mask;
}
}
return thread_adj;
}
DI void reset_accumulator()
{
// Reset accumulator registers to zero.
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
acc[i][j] = BaseClass::Zero;
}
}
}
DI void accumulate()
{
#pragma unroll
for (int ki = 0; ki < P::Kblk; ki += P::Veclen) {
this->ldsXY(ki);
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
#pragma unroll
for (int v = 0; v < P::Veclen; ++v) {
core_op(acc[i][j], this->regx[i][v], this->regy[j][v]);
}
}
}
}
}
DI void load_norms(IdxT tile_idx_m,
IdxT tile_idx_n,
IdxT end_n,
DataT (®xn)[P::AccRowsPerTh],
DataT (®yn)[P::AccColsPerTh])
{
DataT* sxNorm = (DataT*)(&smem[P::SmemSize]);
DataT* syNorm = (&sxNorm[P::Mblk]);
// Load x & y norms required by this threadblock in shmem buffer
for (int i = threadIdx.x; i < P::Mblk; i += P::Nthreads) {
auto idx = tile_idx_m + i;
sxNorm[i] = idx < this->m ? xn[idx] : 0;
}
for (int i = threadIdx.x; i < P::Nblk; i += P::Nthreads) {
auto idx = tile_idx_n + i;
syNorm[i] = idx < end_n ? yn[idx] : 0;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
regxn[i] = sxNorm[i * P::AccThRows + (threadIdx.x / P::AccThCols)];
}
#pragma unroll
for (int i = 0; i < P::AccColsPerTh; ++i) {
regyn[i] = syNorm[i * P::AccThCols + (threadIdx.x % P::AccThCols)];
}
}
}; // struct MaskedDistances
}; // namespace detail
}; // namespace distance
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/canberra.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp> // raft::abs
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/**
* @brief The canberra distance matrix calculation
*
* It computes the following equation:
*
* c_ij = sum_k |x_ik - y_kj| / ( |x_ik| + |y_kj| )
*/
template <typename DataType, typename AccType, typename IdxType>
struct canberra_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = true;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
const auto diff = raft::abs(x - y);
const auto add = raft::abs(x) + raft::abs(y);
// deal with potential for 0 in denominator by
// forcing 0/1 instead
acc += ((add != 0) * diff / (add + (add == 0)));
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
return;
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/l2_exp.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/math.hpp>
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/**
* Reserve 1 digit of precision from each floating-point type
* for round-off error tolerance.
* @tparam DataT
*/
template <typename DataT>
__device__ constexpr DataT get_clamp_precision()
{
switch (sizeof(DataT)) {
case 2: return 1e-3;
case 4: return 1e-6;
case 8: return 1e-15;
default: return 0;
}
}
// Epilogue operator for CUTLASS based kernel
template <typename DataT, typename AccT>
struct l2_exp_cutlass_op {
bool sqrt;
__device__ l2_exp_cutlass_op() noexcept : sqrt(false) {}
__device__ l2_exp_cutlass_op(bool isSqrt) noexcept : sqrt(isSqrt) {}
inline __device__ AccT operator()(DataT aNorm, DataT bNorm, DataT accVal) const noexcept
{
AccT outVal = aNorm + bNorm - DataT(2.0) * accVal;
/**
* Self-neighboring points should have (aNorm == bNorm) == accVal and the dot product (accVal)
* can sometimes have round-off errors, which will cause (aNorm == bNorm) ~ accVal instead.
*/
outVal = outVal * !((outVal * outVal < get_clamp_precision<DataT>()) * (aNorm == bNorm));
return sqrt ? raft::sqrt(outVal * (outVal > 0)) : outVal;
}
__device__ AccT operator()(DataT aData) const noexcept { return aData; }
};
/**
* @brief the expanded euclidean distance matrix calculation
*
* It computes the following equation:
*
* c_ij = - 2 sum_k x_ik * y_kj + ||x_i.||_2 + ||y_.j||_2
*
*/
template <typename DataType, typename AccType, typename IdxType>
struct l2_exp_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
const bool sqrt;
l2_exp_distance_op(bool sqrt_) noexcept : sqrt(sqrt_) {}
// Load norms of input data
static constexpr bool use_norms = true;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize + ((Policy::Mblk + Policy::Nblk) * sizeof(DataT));
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += x * y; };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
DataT accVal = acc[i][j];
DataT val = regxn[i] + regyn[j] - (DataT)2.0 * accVal;
/**
* Self-neighboring points should have (aNorm == bNorm) == accVal and the dot product
* (accVal) can sometimes have round-off errors, which will cause (aNorm == bNorm) ~ accVal
* instead.
*/
acc[i][j] =
val * (val > 0) * !((val * val < get_clamp_precision<DataT>()) * (regxn[i] == regyn[j]));
}
}
if (sqrt) {
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = raft::sqrt(acc[i][j]);
}
}
}
}
constexpr l2_exp_cutlass_op<DataT, AccT> get_cutlass_op() const
{
return l2_exp_cutlass_op<DataT, AccT>(sqrt);
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/l1.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/**
* @brief the L1 distance matrix calculation
*
* It computes the following equation:
*
* c_ij = sum_k abs(x_ik - y_kj)
*/
template <typename DataType, typename AccType, typename IdxType>
struct l1_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Do not load norms of data, the computation of L1 distance does not use them.
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += raft::abs(x - y); };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
return;
};
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/all_ops.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// Defines a named requirement "has_cutlass_op"
#include <cuvs/distance/detail/distance_ops/cutlass.cuh>
// The distance operations:
#include <cuvs/distance/detail/distance_ops/canberra.cuh>
#include <cuvs/distance/detail/distance_ops/correlation.cuh>
#include <cuvs/distance/detail/distance_ops/cosine.cuh>
#include <cuvs/distance/detail/distance_ops/hamming.cuh>
#include <cuvs/distance/detail/distance_ops/hellinger.cuh>
#include <cuvs/distance/detail/distance_ops/jensen_shannon.cuh>
#include <cuvs/distance/detail/distance_ops/kl_divergence.cuh>
#include <cuvs/distance/detail/distance_ops/l1.cuh>
#include <cuvs/distance/detail/distance_ops/l2_exp.cuh>
#include <cuvs/distance/detail/distance_ops/l2_unexp.cuh>
#include <cuvs/distance/detail/distance_ops/l_inf.cuh>
#include <cuvs/distance/detail/distance_ops/lp_unexp.cuh>
#include <cuvs/distance/detail/distance_ops/russel_rao.cuh>
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/template.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
// Describes the computation the template distance
//
// Fill in the TODO items.
template <typename DataType, typename AccType, typename IdxType>
struct template_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
TODO member;
template_distance_op(TODO member_) noexcept : member(member_) {}
// Load norms of input data
static constexpr bool use_norms = TODO;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize + TODO;
}
DI void core(AccT& acc, DataT& x, DataT& y) const { TODO; };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
TODO;
}
// If exist, returns a cutlass op that performs the same operation.
// See cosine and l2_exp distance ops for an example.
constexpr l2_exp_cutlass_op<DataT, AccT> get_cutlass_op() const { TODO; }
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/hamming.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/**
* @brief the Hamming Unexpanded distance matrix calculation
* It computes the following equation:
*
* c_ij = sum_k (x_ik != y_kj) / k
*/
template <typename DataType, typename AccType, typename IdxType>
struct hamming_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
IdxT k;
hamming_distance_op(IdxT k_) noexcept : k(k_) {}
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += (x != y); };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
const DataT one_over_k = DataT(1.0) / k;
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] *= one_over_k;
}
}
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/kl_divergence.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp> // raft::log
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/**
* @brief the KL Divergence distance matrix calculation
*
* It computes the following equation:
*
* c_ij = 0.5 * sum(x * log (x / y));
*/
template <typename DataType, typename AccType, typename IdxType>
struct kl_divergence_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
const bool is_row_major;
const bool x_equal_y;
kl_divergence_op(bool row_major_, bool x_equal_y_ = false) noexcept
: is_row_major(row_major_), x_equal_y(x_equal_y_)
{
}
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = true;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
// TODO: make sure that these branches get hoisted out of main loop.. Could
// be quite expensive otherwise.
if (x_equal_y) {
if (is_row_major) {
const bool x_zero = (x == 0);
const bool y_zero = (y == 0);
acc += x * (raft::log(x + x_zero) - (!y_zero) * raft::log(y + y_zero));
} else {
const bool y_zero = (y == 0);
const bool x_zero = (x == 0);
acc += y * (raft::log(y + y_zero) - (!x_zero) * raft::log(x + x_zero));
}
} else {
if (is_row_major) {
const bool x_zero = (x == 0);
acc += x * (raft::log(x + x_zero) - y);
} else {
const bool y_zero = (y == 0);
acc += y * (raft::log(y + y_zero) - x);
}
}
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = (0.5f * acc[i][j]);
}
}
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/lp_unexp.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp> // raft::pow, raft::abs
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/**
* @brief the unexpanded Lp (Minkowski) distance matrix calculation
*
* It computes the following equation:
*
* c_ij = (sum_k |x_ik - y_jk|^p)^(1/p)
*/
template <typename DataType, typename AccType, typename IdxType>
struct lp_unexp_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
DataT p;
lp_unexp_distance_op(DataT p_) noexcept : p(p_) {}
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = true;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
const auto diff = raft::abs(x - y);
acc += raft::pow(diff, p);
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
const auto one_over_p = 1.0f / p;
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = raft::pow(acc[i][j], one_over_p);
}
}
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/cutlass.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <type_traits> // std::false_type
#include <utility> // std::declval
namespace cuvs::distance::detail::ops {
// This file defines the named requirement "has_cutlass_op" that can be used to
// determine if a distance operation has a CUTLASS op that can be used to pass
// to CUTLASS. Examples of distance operations that satisfy this requirement are
// cosine_distance_op and l2_exp_distance_op.
// Primary template handles types that do not support CUTLASS.
// This pattern is described in:
// https://en.cppreference.com/w/cpp/types/void_t
template <typename, typename = void>
struct has_cutlass_op : std::false_type {};
// Specialization recognizes types that do support CUTLASS
template <typename T>
struct has_cutlass_op<T, std::void_t<decltype(std::declval<T>().get_cutlass_op())>>
: std::true_type {};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/correlation.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/** @brief The correlation distance
*
* It computes the following equation:
*
* d(x, y) = ((x - mean(x)) ⋅ (y - mean(y)))
* /
* (|| x - mean(x) ||_2 || y - mean(y) ||_2)
*/
template <typename DataType, typename AccType, typename IdxType>
struct correlation_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
const DataT* x2n;
const DataT* y2n;
IdxT m;
IdxT n;
IdxT k;
correlation_distance_op(
bool is_row_major, const DataT* x2n_, const DataT* y2n_, IdxT m_, IdxT n_, IdxT k_) noexcept
: x2n(x2n_), y2n(y2n_), m(m_), n(n_), k(k_)
{
// The distance op is typically created before the row-major/col-major
// swapping has been done. So we do it here.
if (!is_row_major) {
std::swap<const DataT*>(x2n, y2n);
std::swap(m, n);
}
}
// Load norms of input data
static constexpr bool use_norms = true;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize + (2 * (Policy::Mblk + Policy::Nblk) * sizeof(DataT));
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += x * y; };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
// Note how we can sneakily get a pointer to shared memory here, to store
// more data. If the implementation of PairwiseDistanceMatKernel ever
// changes, this will be where we find the bugs.
extern __shared__ char smem[];
DataT regx2n[Policy::AccRowsPerTh], regy2n[Policy::AccColsPerTh];
DataT* sx2Norm =
(DataT*)(&smem[Policy::SmemSize + (Policy::Mblk + Policy::Nblk) * sizeof(DataT)]);
DataT* sy2Norm = (&sx2Norm[Policy::Mblk]);
// Load x & y norms required by this threadblock in shmem buffer
if (gridStrideX == blockIdx.x * Policy::Nblk) {
for (int i = threadIdx.x; i < Policy::Mblk; i += Policy::Nthreads) {
auto idx = gridStrideY + i;
sx2Norm[i] = idx < m ? x2n[idx] : 0;
}
}
for (int i = threadIdx.x; i < Policy::Nblk; i += Policy::Nthreads) {
auto idx = gridStrideX + i;
sy2Norm[i] = idx < n ? y2n[idx] : 0;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
regx2n[i] = sx2Norm[i * Policy::AccThRows + (threadIdx.x / Policy::AccThCols)];
}
#pragma unroll
for (int i = 0; i < Policy::AccColsPerTh; ++i) {
regy2n[i] = sy2Norm[i * Policy::AccThCols + (threadIdx.x % Policy::AccThCols)];
}
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
auto numer = k * acc[i][j] - (regxn[i] * regyn[j]);
auto Q_denom = k * regx2n[i] - (regxn[i] * regxn[i]);
auto R_denom = k * regy2n[j] - (regyn[j] * regyn[j]);
acc[i][j] = 1 - (numer / raft::sqrt(Q_denom * R_denom));
}
}
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/russel_rao.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/**
* @brief the Russell Rao distance matrix calculation
*
* It computes the following equation:
*
* c_ij = (k - (sum_k x_ik * y_kj)) / k
*/
template <typename DataType, typename AccType, typename IdxType>
struct russel_rao_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
IdxT k;
const float one_over_k;
russel_rao_distance_op(IdxT k_) noexcept : k(k_), one_over_k(1.0f / k_) {}
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += x * y; };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = (k - acc[i][j]) * one_over_k;
}
}
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/hellinger.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/**
* @brief the Hellinger distance matrix calculation
*
* It computes the following equation:
*
* c_ij = sqrt(1 - sum_k sqrt(x_ik * y_kj))
*
*/
template <typename DataType, typename AccType, typename IdxType>
struct hellinger_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
// This is sqrt(x) * sqrt(y).
const auto product = x * y;
acc += product;
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
// Adjust to replace NaN in sqrt with 0 if input to sqrt is negative
const auto finalVal = (1 - acc[i][j]);
const auto rectifier = (!signbit(finalVal));
acc[i][j] = raft::sqrt(rectifier * finalVal);
}
}
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/jensen_shannon.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp> // raft::log
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
// Describes the computation the jensen_shannon distance
/**
* @brief the Jensen Shannon distance matrix calculation
*
* It computes the following equation:
*
* c_ij = sqrt(0.5 * sum( -x_i * (log(0.5 * (x_i + y_i)) - log(x_i))
* + (-y_i * (log(0.5 * (x_i + y_i)) - log(y_i)))))
*/
template <typename DataType, typename AccType, typename IdxType>
struct jensen_shannon_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = true;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
const DataT m = 0.5f * (x + y);
const bool m_zero = (m == 0);
const auto logM = (!m_zero) * raft::log(m + m_zero);
const bool x_zero = (x == 0);
const bool y_zero = (y == 0);
acc += (-x * (logM - raft::log(x + x_zero))) + (-y * (logM - raft::log(y + y_zero)));
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = raft::sqrt(0.5 * acc[i][j]);
}
}
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/cosine.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
// Epilogue operator for CUTLASS based kernel
template <typename DataT, typename AccT>
struct cosine_cutlass_op {
__device__ cosine_cutlass_op() noexcept {}
__device__ AccT operator()(DataT& aNorm, const DataT& bNorm, DataT& accVal) const noexcept
{
return static_cast<AccT>(1.0) - static_cast<AccT>(accVal / (aNorm * bNorm));
}
__device__ AccT operator()(DataT aData) const noexcept { return aData; }
};
/**
* @brief the expanded cosine distance matrix calculation
*
* It computes the following equation:
*
* d(x, y) = 1 - (x ⋅ y) / ( ||x||_2 ||y||_2)
*/
template <typename DataType, typename AccType, typename IdxType>
struct cosine_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Load norms of input data
static constexpr bool use_norms = true;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize + ((Policy::Mblk + Policy::Nblk) * sizeof(DataT));
}
DI void core(AccT& acc, DataT& x, DataT& y) const { acc += x * y; };
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = 1.0 - (acc[i][j] / (regxn[i] * regyn[j]));
}
}
}
constexpr cosine_cutlass_op<DataT, AccT> get_cutlass_op() const
{
return cosine_cutlass_op<DataT, AccT>();
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/l_inf.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/**
* @brief the L_inf (Chebyshev) distance matrix calculation
*
* It computes the following equation:
*
* c_ij = max_k | x_ik - y_kj |
*/
template <typename DataType, typename AccType, typename IdxType>
struct l_inf_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
// Load norms of input data
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
const auto diff = raft::abs(x - y);
acc = raft::max(acc, diff);
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
return;
}
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/distance_ops/l2_unexp.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace cuvs::distance::detail::ops {
/**
* @brief the unexpanded euclidean distance matrix calculation
*
* It computes the following equation:
*
* c_ij = optional_sqrt ( sum_k (x_ik - y_kj)^2 )
*/
template <typename DataType, typename AccType, typename IdxType>
struct l2_unexp_distance_op {
using DataT = DataType;
using AccT = AccType;
using IdxT = IdxType;
bool sqrt;
l2_unexp_distance_op(bool sqrt_) noexcept : sqrt(sqrt_) {}
// Do not load norms of data, the computation of L1 distance does not use them.
static constexpr bool use_norms = false;
// Whether the core function requires so many instructions that it makes sense
// to reduce loop unrolling, etc. We do this to keep compile times in check.
static constexpr bool expensive_inner_loop = false;
// Size of shared memory. This is normally decided by the kernel policy, but
// some ops such as correlation_distance_op use more.
template <typename Policy>
static constexpr size_t shared_mem_size()
{
return Policy::SmemSize;
}
DI void core(AccT& acc, DataT& x, DataT& y) const
{
const auto diff = x - y;
acc += diff * diff;
};
template <typename Policy>
DI void epilog(AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT* regxn,
DataT* regyn,
IdxT gridStrideX,
IdxT gridStrideY) const
{
if (sqrt) {
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
acc[i][j] = raft::sqrt(acc[i][j]);
}
}
}
};
};
} // namespace cuvs::distance::detail::ops
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/fused_distance_nn/epilogue_elementwise.cuh | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
/*! \file
\brief Functor performing distance operations used by epilogues of pairwise distance
* kernels.
* This is adapted from LinearCombinationBiasElementwise from CUTLASS 2.9.0
* customized for applying elementwise distance formula on accumulated GEMM value
* and applying user-defined operation which can convert distance values to key-value pair.
* .
*/
#pragma once
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/functional.h>
#include <cutlass/numeric_conversion.h>
#include <cutlass/numeric_types.h>
#include <cutlass/epilogue/thread/activation.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace thread {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
typename DistanceOp_,
typename CGReduceOp_,
typename ReduceOpT_,
typename KVPReduceOpT_>
class FusedDistanceNNEpilogueElementwise {
public:
using ElementOutput = ElementC_;
using ElementC = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
static int const kElementsPerAccess = ElementsPerAccess;
static int const kCount = kElementsPerAccess;
using DistanceOp = DistanceOp_;
using CGReduceOp = CGReduceOp_;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using OutValT = typename CGReduceOp::AccTypeT;
using FragmentT = Array<OutValT, kElementsPerAccess>;
using FragmentOutput = FragmentZ;
static bool const kIsHeavy = true; // ElementwiseOp::kIsHeavy;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = false; // We don't store anything in Z,
/// If true, the 'T' tensor is stored
static bool const kStoreT = true; // this is our final output storage.
/// Host-constructable parameters structure
struct Params {
CGReduceOp_ cg_reduce_op;
DistanceOp_ dist_op_;
KVPReduceOpT_ pair_redop_;
ReduceOpT_ red_op_;
int* mutexes_;
using CGReduceT = CGReduceOp_;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params(DistanceOp_ dist_op,
CGReduceOp cg_reduce_op,
ReduceOpT_ red_op,
KVPReduceOpT_ pair_redop,
int* mutexes)
: cg_reduce_op(cg_reduce_op),
dist_op_(dist_op),
pair_redop_(pair_redop),
red_op_(red_op),
mutexes_(mutexes)
{
}
CUTLASS_HOST_DEVICE
Params() {}
};
private:
//
// Data members
//
DistanceOp_ elementwise_op;
KVPReduceOpT_ pair_redop;
public:
ReduceOpT_ red_op;
//
// Methods
//
/// Constructor from Params
CUTLASS_HOST_DEVICE
FusedDistanceNNEpilogueElementwise(Params const& params)
: elementwise_op(params.dist_op_), pair_redop(params.pair_redop_), red_op(params.red_op_)
{
}
/// Returns true if source is needed
CUTLASS_HOST_DEVICE
bool is_source_needed() const
{
// we use for making sure C matrix is used for A mat norm.
return true;
}
/// Functionally required for serial reduction in the epilogue
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentC const& frag_C,
FragmentCompute const& V) const
{
FragmentCompute tmp_Accum =
NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
FragmentCompute tmp_C =
NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C);
FragmentCompute result_Z;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
ElementCompute res_Z = elementwise_op(tmp_C[i], V[i], tmp_Accum[i]);
frag_T[i] = res_Z;
}
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(FragmentZ& frag_Z,
FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentCompute const& V) const
{
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/fused_distance_nn/custom_epilogue_with_broadcast.h | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
The epilogue rearranges the result of a matrix product through shared memory to match canonical
tensor layouts in global memory. Epilogues support conversion and reduction operations.
This file contains a customized version of EpilogueWithBroadcast from CUTLASS 2.9.1
(https://github.com/NVIDIA/cutlass/blob/v2.9.1/include/cutlass/epilogue/threadblock/epilogue_with_broadcast.h)
Changes:
- customized the compute_source_needed_() and apply_output_operator_() to suit the needs of per row
reduction
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#include <cuda/std/utility>
#else
#include <assert.h>
#include <utility>
#endif
#include <cutlass/aligned_buffer.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/fast_math.h>
#include <cutlass/functional.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/layout/vector.h>
#include <cutlass/numeric_conversion.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_coord.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
#include <cutlass/transform/threadblock/regular_tile_iterator.h>
#include <cutlass/epilogue/threadblock/epilogue_base.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator.h>
#include <cutlass/numeric_types.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This base class is meant to define the concept required of the
/// EpilogueWithBroadcast::OutputOp
template <typename ElementC_,
typename ElementAccumulator_,
typename ElementCompute_,
typename ElementZ_,
typename ElementT_,
int ElementsPerAccess,
bool StoreZ = true,
bool StoreT = true>
struct EpilogueWithBroadcastOpBaseCustom {
using ElementOutput = ElementC_;
using ElementAccumulator = ElementAccumulator_;
using ElementCompute = ElementCompute_;
using ElementZ = ElementZ_;
using ElementT = ElementT_;
static int const kElementsPerAccess = ElementsPerAccess;
using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
using FragmentC = Array<ElementOutput, kElementsPerAccess>;
using FragmentZ = Array<ElementZ, kElementsPerAccess>;
using FragmentT = Array<ElementT, kElementsPerAccess>;
/// If true, the 'Z' tensor is stored
static bool const kStoreZ = StoreZ;
/// If true, the 'T' tensor is stored
static bool const kStoreT = StoreT;
/// Parameters structure - required
struct Params {};
//
// Methods
//
/// Constructor from Params
EpilogueWithBroadcastOpBaseCustom(Params const& params_) {}
/// Determine if the source is needed. May return false if
bool is_source_needed() const { return true; }
CUTLASS_HOST_DEVICE
void set_k_partition(int k_partition, int k_partition_count) {}
/// Applies the operation when is_source_needed() is true
CUTLASS_HOST_DEVICE
void operator()(FragmentZ& frag_Z,
FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentC const& frag_C,
FragmentCompute const& V) const
{
}
/// Applies the operation when is_source_needed() is false
CUTLASS_HOST_DEVICE
void operator()(FragmentZ& frag_Z,
FragmentT& frag_T,
FragmentAccumulator const& AB,
FragmentCompute const& V) const
{
}
};
////////////////////////////////////////////////////////////////////////////////
/// Epilogue operator with bias vector broadcast over columns.
///
/// Computes the following:
///
///
/// Z, T = OutputOp(AB, C, Broadcast)
///
/// if (ElementwiseOp::kStoreZ) {
/// store(converted_u);
/// }
///
/// if (ElementwiseOp::kStoreT) {
/// store(v);
/// }
///
template <
typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp)
int PartitionsK, ///< Number of partitions of the K dimension
typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors (z)
typename TensorTileIterator_, ///< Additional tile iterator for tensor-valued operands (t)
typename ElementVector_, ///< Pointer to broadcast vector
typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators
typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM
typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM
typename OutputOp_, ///< Output operator - concept is EpilogueWithBroadcastOp
typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept:
///< MatrixShape)
int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large
(!IsEpilogueFunctorHeavy<OutputOp_>::value)>
class EpilogueWithBroadcastCustom : public EpilogueBase<Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition> {
public:
using Base = EpilogueBase<Shape_,
typename WarpMmaOperator_::Shape,
PartitionsK,
AccumulatorFragmentIterator_,
WarpTileIterator_,
Padding_,
FragmentsPerPartition>;
using Shape = Shape_;
using WarpMmaOperator = WarpMmaOperator_;
static int const kPartitionsK = PartitionsK;
using OutputTileIterator = OutputTileIterator_;
using TensorTileIterator = TensorTileIterator_;
using ElementVector = ElementVector_;
using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
using WarpTileIterator = WarpTileIterator_;
using SharedLoadIterator = SharedLoadIterator_;
using OutputOp = OutputOp_;
using Padding = Padding_;
using Layout = layout::RowMajor;
using LongIndex = typename Layout::LongIndex;
/// The complete warp-level accumulator tile
using AccumulatorTile = typename Base::AccumulatorTile;
/// Accumulator element
using ElementAccumulator = typename WarpTileIterator::Element;
/// Compute data type produced by the output op
using ElementCompute = typename OutputOp::ElementCompute;
/// Compute fragment
using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>;
/// Thread map used by output tile iterators
using ThreadMap = typename OutputTileIterator::ThreadMap;
/// Fragment object used to store the broadcast values
using BroadcastFragment =
Array<ElementCompute, ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>;
/// Output element
using ElementOutput = typename OutputTileIterator::Element;
/// Data type of additional tensor
using ElementTensor = typename TensorTileIterator::Element;
/// Output access size
static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
/// Tensor reference to destination tensor
using TensorRef = typename OutputTileIterator::TensorRef;
/// Tensor reference to sync tensor
using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
/// Const tensor reference to source tensor
using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
/// Array type used to output
using OutputAccessType =
Array<typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using AccumulatorAccessType =
Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
/// Array type used by output functor
using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>;
/// Tensor access type
using TensorAccessType = Array<ElementTensor, OutputTileIterator::kElementsPerAccess>;
/// Number of warps
using WarpCount = typename Base::WarpCount;
/// Shared memory allocation from epilogue base class
using BaseSharedStorage = typename Base::SharedStorage;
static int constexpr kSmemTiles =
Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles;
/// Used for the broadcast
struct BroadcastDetail {
/// Number of threads per warp
static int const kWarpSize = 32;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
/// Number of distinct scalar column indices handled by each thread
static int const kColumnsPerThread =
ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess;
/// Number of distinct scalar row indices handled by each thread
static int const kRowsPerThread =
ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn;
/// Number of threads per threadblock
static int const kThreadCount = kWarpSize * WarpCount::kCount;
/// Number of distinct threads per row of output tile
static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread);
/// Number of distinct threads which must be reduced during the final reduction phase within the
/// threadblock.
static int const kThreadRows = kThreadCount / kThreadsPerRow;
/// I'm not sure what I meant here.
static int const kThreadAccessesPerRow =
const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount);
/// Shape of the shared memory allocation for the epilogue
using StorageShape = MatrixShape<kThreadRows, Shape::kN>;
/// Debug printing
CUTLASS_DEVICE
static void print()
{
#if 0
printf("BroadcastDetail {\n");
printf(
" kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n"
"kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n",
kColumnsPerThread,
kRowsPerThread,
kThreadCount,
kThreadsPerRow,
kThreadRows,
kThreadAccessesPerRow,
StorageShape::kRow,
StorageShape::kColumn,
StorageShape::kCount
);
printf("};\n");
#endif
}
};
/// Shared storage structure (shadows base) with additional SMEM buffer for reduction
struct SharedStorage {
union {
BaseSharedStorage base;
};
CUTLASS_HOST_DEVICE
SharedStorage() {}
};
public:
static_assert(SharedLoadIterator::Fragment::kElements == TensorTileIterator::Fragment::kElements,
"Mismatch between shared load iterator and output tile iterator.");
static_assert(OutputTileIterator::kElementsPerAccess,
"OutputTileIterator::kElementsPerAccess must not be zero.");
static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess),
"Divisibility");
private:
/// Loads fragment from shared memory aligned with output tensor
SharedLoadIterator shared_load_iterator_;
/// Thread index within the threadblock
int thread_idx_;
public:
/// Constructor
CUTLASS_DEVICE
EpilogueWithBroadcastCustom(SharedStorage& shared_storage, ///< Shared storage object
int thread_idx, ///< ID of a thread within the threadblock
int warp_idx, ///< ID of warp within threadblock
int lane_idx ///< Id of thread within warp
)
: Base(shared_storage.base, thread_idx, warp_idx, lane_idx),
shared_load_iterator_(shared_storage.base.reference(), thread_idx),
thread_idx_(thread_idx)
{
}
/// Streams the result to global memory
CUTLASS_DEVICE
void operator()(
OutputOp const& output_op, ///< Output operator
ElementVector const* broadcast_ptr, ///< Broadcast vector
AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix
TensorTileIterator
tensor_iterator, ///< Threadblock tile iterator for additional tensor operand
MatrixCoord const&
problem_size = ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord(Shape::kM, Shape::kN),
MatrixCoord const&
threadblock_offset = ///< Threadblock's initial offset within the problem size space
MatrixCoord())
{
BroadcastFragment broadcast_fragment;
load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset);
compute_source_needed_(
output_op, broadcast_fragment, accumulators, source_iterator, tensor_iterator);
}
private:
CUTLASS_DEVICE
void load_broadcast_fragment_(
BroadcastFragment&
broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
ElementVector const* broadcast_ptr, ///< Broadcast vector
MatrixCoord const&
problem_size, ///< Problem size needed to guard against out-of-bounds accesses
MatrixCoord const&
threadblock_offset ///< Threadblock's initial offset within the problem size space
)
{
broadcast_fragment.clear();
// If no pointer is supplied, set with all zeros and avoid memory accesses
if (!broadcast_ptr) { return; }
int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column();
int thread_column_idx = threadblock_offset.column() + thread_initial_column;
broadcast_ptr += thread_initial_column;
NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess>
converter;
using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>;
using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>;
ComputeFragmentType* frag_ptr = reinterpret_cast<ComputeFragmentType*>(&broadcast_fragment);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) {
AccessType loaded;
loaded.clear();
if (thread_column_idx < problem_size.column()) {
loaded = *reinterpret_cast<AccessType const*>(broadcast_ptr);
}
ComputeFragmentType cvt = converter(loaded);
frag_ptr[j] = cvt;
thread_column_idx += ThreadMap::Delta::kColumn;
broadcast_ptr += ThreadMap::Delta::kColumn;
}
}
template <class Seq>
struct acc2smem_source_not_needed;
template <size_t... Seq>
struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator& warp_tile_iterator)
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
CUTLASS_PRAGMA_UNROLL
for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
++accum_fragment_iterator;
warp_tile_iterator.store(accum_fragment);
if (p < Base::kFragmentsPerIteration - 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
}
}
if (Base::kFragmentsPerIteration > 1) {
warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
(1 - Base::kFragmentsPerIteration));
}
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const& iterator_begin,
WarpTileIterator& warp_tile_iterator)
{
int dummy[] = {
(pos == (Seq * Base::kFragmentsPerIteration)) &&
(helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...};
CUTLASS_UNUSED(dummy[0]);
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_not_needed_(
OutputOp const& output_op, ///< Output operator
BroadcastFragment const&
broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
OutputTileIterator destination_iterator, ///< Tile iterator for destination
AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
)
{
}
template <class Seq>
struct acc2smem_source_needed;
template <size_t... Seq>
struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
template <int Advance>
CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
WarpTileIterator& warp_tile_iterator)
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Advance; i++) {
++accum_fragment_iterator;
}
typename AccumulatorFragmentIterator::Fragment accum_fragment;
accum_fragment_iterator.load(accum_fragment);
warp_tile_iterator.store(accum_fragment);
}
CUTLASS_DEVICE
static void push(size_t pos,
AccumulatorFragmentIterator const& iterator_begin,
WarpTileIterator& warp_tile_iterator)
{
int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
}
};
/// Streams the result to global memory
CUTLASS_DEVICE
void compute_source_needed_(
OutputOp const& output_op, ///< Output operator
BroadcastFragment const&
broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns
AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile
OutputTileIterator
source_iterator, ///< Threadblock tile coordinate in GEMM (in units of threadblock tiles)
TensorTileIterator tensor_iterator ///< Threadblock tile iterator for additioanl tensor operand
)
{
typename OutputTileIterator::Fragment source_fragment;
source_fragment.clear();
//
// Iterator over warp-level accumulator fragment
//
AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
//
// Iterate over accumulator tile
//
#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
//
// Convert and store fragment
//
//__syncthreads();
acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push(
iter, accum_fragment_iterator, this->warp_tile_iterator_);
__syncthreads();
//
// Load fragments from shared memory
//
typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
shared_load_iterator_.load(aligned_accum_fragment[0]);
//
// Apply output operation
//
typename TensorTileIterator::Fragment frag_T;
//
// Load the source
//
source_iterator.load(source_fragment);
++source_iterator;
apply_output_operator_(
frag_T, output_op, aligned_accum_fragment[0], source_fragment, broadcast_fragment);
//
// Conditionally store fragments
//
if (OutputOp::kStoreT) {
tensor_iterator.store(frag_T);
++tensor_iterator;
}
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_(typename TensorTileIterator::Fragment& frag_T,
OutputOp const& output_op,
typename SharedLoadIterator::Fragment const& frag_AB,
typename OutputTileIterator::Fragment const& frag_C,
BroadcastFragment const& frag_Broadcast)
{
using AccessTypeT = Array<typename TensorTileIterator::OutValT, kElementsPerAccess>;
using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>;
AccessTypeT* frag_T_ptr = reinterpret_cast<AccessTypeT*>(&frag_T);
AccumulatorAccessType const* frag_AB_ptr =
reinterpret_cast<AccumulatorAccessType const*>(&frag_AB);
OutputAccessType const* frag_C_ptr = reinterpret_cast<OutputAccessType const*>(&frag_C);
AccessTypeBroadcast const* frag_Broadcast_ptr =
reinterpret_cast<AccessTypeBroadcast const*>(&frag_Broadcast);
int const kOutputOpIterations =
TensorTileIterator::Fragment::kElements / TensorTileIterator::kElementsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kOutputOpIterations; ++i) {
output_op(frag_T_ptr[i],
frag_AB_ptr[i],
frag_C_ptr[(i / ThreadMap::Iterations::kColumn)],
frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]);
}
}
/// Helper to invoke the output functor over each vector of output
CUTLASS_DEVICE
void apply_output_operator_source_not_needed_(
typename OutputTileIterator::Fragment& frag_Z,
typename TensorTileIterator::Fragment& frag_T,
OutputOp const& output_op,
typename SharedLoadIterator::Fragment const& frag_AB,
BroadcastFragment const& frag_Broadcast)
{
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/fused_distance_nn/cutlass_base.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wtautological-compare"
// We define CUTLASS_NAMESPACE in case
// RAFT cmake is not used
#ifndef CUTLASS_NAMESPACE
#define cutlass raft_cutlass
#endif
#include <cutlass/cutlass.h>
#include <cutlass/gemm/device/gemm.h>
#include <cutlass/gemm/device/gemm_grouped.h>
#include <cutlass/gemm/device/gemm_universal_adapter.h>
#include <rmm/device_uvector.hpp>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_coord.h>
#include <cutlass/tensor_view.h>
#include <cuvs/distance/detail/fused_distance_nn/epilogue_elementwise.cuh> // FusedDistanceNNEpilogueElementwise
#include <cuvs/distance/detail/fused_distance_nn/gemm.h> // FusedDistanceNNGemm
#include <raft/util/cudart_utils.hpp> // getMultiProcessorCount
#include <raft/util/cutlass_utils.cuh> // RAFT_CUTLASS_TRY
namespace cuvs {
namespace distance {
namespace detail {
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
int VecLen,
typename CGReduceOpT,
typename DistanceFn,
typename ReduceOpT,
typename KVPReduceOpT>
void cutlassFusedDistanceNN(const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
OutT* dOutput,
int* mutexes,
CGReduceOpT cg_reduce_op,
DistanceFn dist_op,
ReduceOpT redOp,
KVPReduceOpT pairRedOp,
cudaStream_t stream)
{
using EpilogueOutputOp = cutlass::epilogue::thread::FusedDistanceNNEpilogueElementwise<
DataT, // ElementC_
AccT, // ElementAccumulator_
DataT, // ElementCompute_
AccT, // ElementZ_
OutT, // ElementT_
// 128 / cutlass::sizeof_bits<DataT>::value,
1, // Elements per access 1
DistanceFn,
CGReduceOpT,
ReduceOpT,
KVPReduceOpT>;
constexpr int batch_count = 1;
typename EpilogueOutputOp::Params epilog_op_param(
dist_op, cg_reduce_op, redOp, pairRedOp, mutexes);
// Number of pipelines you want to use
constexpr int NumStages = 3;
// Alignment
constexpr int Alignment = VecLen;
// default initialize problem size with row major inputs
auto problem_size = cutlass::gemm::GemmCoord(m, n, k);
constexpr bool isRowMajor = true;
using fusedDistanceNNKernel =
typename cutlass::gemm::kernel::FusedDistanceNNGemm<DataT,
Alignment,
DataT,
Alignment,
AccT,
AccT,
EpilogueOutputOp,
NumStages, // Number of pipeline stages
isRowMajor>::GemmKernel;
using fusedDistanceNN = cutlass::gemm::device::GemmGrouped<fusedDistanceNNKernel>;
int num_blocks_per_sm = fusedDistanceNN::maximum_active_blocks();
int num_sms = raft::getMultiProcessorCount();
int full_wave = num_blocks_per_sm * num_sms;
constexpr int mmaShapeM = fusedDistanceNNKernel::Mma::Shape::kM;
constexpr int mmaShapeN = fusedDistanceNNKernel::Mma::Shape::kN;
int columnTiles = (problem_size.n() - 1 + mmaShapeN) / mmaShapeN;
int rowTiles = (problem_size.m() - 1 + mmaShapeM) / mmaShapeM;
int totalTiles = columnTiles * rowTiles;
int thread_blocks =
rowTiles < full_wave ? (totalTiles < full_wave ? totalTiles : full_wave) : rowTiles;
typename fusedDistanceNN::Arguments arguments{
problem_size,
batch_count, // num of problems.
thread_blocks,
epilog_op_param,
x,
y,
xn, // C matrix eq vector param, which here is A norm
(DataT*)yn, // this is broadcast vec, which is required to be non-const param
dOutput, // Output distance matrix
(int64_t)lda, // stride A
(int64_t)ldb, // stride B
(int64_t)1, // stride A norm
(int64_t)ldd // stride Output matrix
};
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = fusedDistanceNN::get_workspace_size(arguments);
// Allocate workspace memory
rmm::device_uvector<uint8_t> workspace(workspace_size, stream);
// Instantiate CUTLASS kernel depending on templates
fusedDistanceNN fusedDistanceNN_op;
// Check the problem size is supported or not
RAFT_CUTLASS_TRY(fusedDistanceNN_op.can_implement(arguments));
// Initialize CUTLASS kernel with arguments and workspace pointer
RAFT_CUTLASS_TRY(fusedDistanceNN_op.initialize(arguments, workspace.data(), stream));
// Launch initialized CUTLASS kernel
RAFT_CUTLASS_TRY(fusedDistanceNN_op.run(stream));
}
}; // namespace detail
}; // namespace distance
}; // namespace cuvs
#pragma GCC diagnostic pop
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/fused_distance_nn/predicated_tile_iterator_reduced_vec.h | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This file contains a customized version of PredicatedTileIterator from CUTLASS 2.9.0
(https://github.com/NVIDIA/cutlass/blob/v2.9.0/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h#L75)
Changes:
- added `Layout_` template param
- PredicatedTileIteratorParams() is customized to not stride by layout.stride(0).
- makes use of `SharedStorage` to store reduced values across warps to gmem in coalesced manner.
- customized the store_with_byte_offset() to perform reduction per row and write final value to
gmem.
- customized the Params() struct to take user inputs from epilogueOp params.
*/
#pragma once
#include <cooperative_groups.h>
#include <cooperative_groups/reduce.h>
#include <cutlass/arch/arch.h>
#include <cutlass/arch/memory.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/epilogue/threadblock/output_tile_thread_map.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator_params.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_ref.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
namespace cg = cooperative_groups;
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
typename Layout_,
typename EpilogueOpParams_,
bool ScatterD = false, ///< Scatter D operand or not
bool UseCUDAStore = false>
class PredicatedTileIteratorReducedVec {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
using EpilogueOpParams = EpilogueOpParams_;
using OutIdxT = typename EpilogueOpParams::CGReduceT::IndexT;
using OutValT = typename EpilogueOpParams::CGReduceT::AccTypeT;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
static_assert(ThreadMap::Iterations::kCluster > 0, "ThreadMap::Iterations::kCluster must be > 0");
static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
static_assert(!UseCUDAStore, "UseCUDAStore path is not supported");
static int const total_rows = ThreadMap::kWarpCount * ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
ThreadMap::Count::kTile * ThreadMap::Delta::kRow;
/// Fragment object
using Fragment =
Array<OutValT,
ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster * kElementsPerAccess>;
// Memory access size
using AccessType = AlignedArray<Element, kElementsPerAccess>;
using AccessTypeValT = AlignedArray<OutValT, kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
EpilogueOpParams user_param;
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>())
{
}
CUTLASS_HOST_DEVICE
Params(Layout const& layout, EpilogueOpParams const& user_param_)
: PredicatedTileIteratorParams(int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>()),
user_param(user_param_)
{
}
CUTLASS_HOST_DEVICE
Params(Base const& base) : Base(base) {}
};
/// Mask object
struct Mask {
// static int const kCount = ThreadMap::Iterations::kColumn;
static int const kCount = ThreadMap::Iterations::kColumn * kElementsPerAccess;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() { enable(); }
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
/// Shared storage allocation needed by the predicated tile
// iterator for reduction.
struct SharedStorage {
//
// Type definitions
//
using Shape = MatrixShape<total_rows, 1>;
/// Shape of the shared memory allocation for the reduced values store
using StorageShape = MatrixShape<Shape::kRow, Shape::kColumn>;
//
// Data members
//
// Methods
//
AlignedBuffer<Element, StorageShape::kCount> storage;
CUTLASS_DEVICE
Element* data() { return storage.data(); }
SharedStorage() {}
CUTLASS_DEVICE
void initSmem(EpilogueOpParams const& user_params)
{
Element* shared_elem_arr = data();
constexpr auto maxVal = std::numeric_limits<OutValT>::max();
for (int row = threadIdx.x; row < total_rows; row += blockDim.x) {
user_params.red_op_.init(&shared_elem_arr[row], maxVal);
}
}
};
template <typename cg_reduce_op_t,
typename cg_group_t,
typename IdxT,
typename ValT,
typename OutT>
struct select_reduce {
/// Performs warp level reduction and stores a reduced output to memory
CUTLASS_DEVICE
select_reduce(OutT value,
ValT prev_red_val,
cg_reduce_op_t reduce_op,
cg_group_t cg_warp_group,
OutT& shmem_ptr)
{
if (cg_warp_group.any(reduce_op.isAmin(value, prev_red_val))) {
OutT reduced_val = cg::reduce(cg_warp_group, value, reduce_op);
if (cg_warp_group.thread_rank() == 0) { shmem_ptr = reduced_val; }
}
}
};
template <typename cg_reduce_op_t, typename cg_group_t, typename IdxT>
struct select_reduce<cg_reduce_op_t, cg_group_t, IdxT, float, raft::KeyValuePair<IdxT, float>> {
using ValT = float;
using Ty = raft::KeyValuePair<IdxT, ValT>;
/// Performs warp level reduction of key value pair and stores a reduced output to memory
CUTLASS_DEVICE
select_reduce(Ty val_to_red,
float prev_red_val,
cg_reduce_op_t cg_reduce_op,
cg_group_t cg_warp_group,
Ty& shmem_ptr)
{
ValT val = val_to_red.value;
if (cg_warp_group.any(cg_reduce_op.isAmin(val, prev_red_val))) {
ValT reduced_val = cg::reduce(cg_warp_group, val, cg_reduce_op);
bool pred = (reduced_val == val);
auto subTile = cg::binary_partition(cg_warp_group, pred);
if (pred) {
if (subTile.thread_rank() == 0) { shmem_ptr = val_to_red; }
}
}
}
};
template <typename cg_reduce_op_t, typename cg_group_t, typename IdxT>
struct select_reduce<cg_reduce_op_t, cg_group_t, IdxT, double, raft::KeyValuePair<IdxT, double>> {
using ValT = double;
using Ty = raft::KeyValuePair<IdxT, ValT>;
/// Performs warp level reduction of key value pair and stores a reduced output to memory
CUTLASS_DEVICE
select_reduce(Ty val_to_red,
double prev_red_val,
cg_reduce_op_t cg_reduce_op,
cg_group_t cg_warp_group,
Ty& shmem_ptr)
{
ValT val = val_to_red.value;
if (cg_warp_group.any(cg_reduce_op.isAmin(val, prev_red_val))) {
ValT reduced_val = cg::reduce(cg_warp_group, val, cg_reduce_op);
bool pred = (reduced_val == val);
auto subTile = cg::binary_partition(cg_warp_group, pred);
if (pred) {
if (subTile.thread_rank() == 0) { shmem_ptr = val_to_red; }
}
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
Params params_;
/// Byte-level pointer
uint8_t* byte_pointer_;
/// Byte-level pointer first tile offset of this threadblock.
uint8_t* first_tile_byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
Index block_start_row_first_tile_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
// mutable int shared_tile_id;
/// Scatter indices
int const* indices_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(Params::stride) == 8, "Expected 64b strides");
protected:
SharedStorage& shared_storage_;
const bool& do_gmem_reduction_;
private:
//
// Methods
//
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorReducedVec(SharedStorage& shared_storage,
Params const& params,
Element* pointer,
TensorCoord extent,
int thread_idx,
const bool& do_gmem_reduction,
TensorCoord threadblock_offset = TensorCoord(),
int const* indices = nullptr)
: params_(params),
indices_(indices),
shared_storage_(shared_storage),
do_gmem_reduction_(do_gmem_reduction)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
TensorCoord block_offset = ThreadMap::initial_offset(0) + threadblock_offset;
block_start_row_first_tile_ = block_offset.row();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn * kElementsPerAccess; ++c) {
int columnPerAccess = (c / kElementsPerAccess);
int columnWithinPerAccess = c % kElementsPerAccess;
mask_.predicates[c] = ((thread_offset.column() + ThreadMap::Delta::kColumn * columnPerAccess +
columnWithinPerAccess) < extent.column());
}
if (threadblock_offset.column() == 0) {
EpilogueOpParams const& user_params = params_.user_param;
shared_storage_.initSmem(user_params);
}
// Null pointer performs no accesses
if (!pointer) { mask_.clear(); }
if (ScatterD && !indices) { mask_.clear(); }
// Initialize pointer
first_tile_byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(block_offset.row()) * LongIndex(params_.stride);
if (ScatterD) {
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Destructor
CUTLASS_DEVICE
~PredicatedTileIteratorReducedVec()
{
if (do_gmem_reduction_) {
EpilogueOpParams const& user_params = params_.user_param;
auto gmem_ptr = reinterpret_cast<Element*>(first_tile_byte_pointer_);
Element* shared_elem_arr = shared_storage_.data();
const uint32_t mutex_id = (block_start_row_first_tile_ / total_rows);
bool useGmemMutex = (gridDim.x != ((extent_row_ - 1 + total_rows) / total_rows));
// If this is not optimal grid size perform mutex based gmem reduce.
if (useGmemMutex) {
// single lock per block for multiple rows
if (threadIdx.x == 0 && block_start_row_first_tile_ < extent_row_) {
// acquire mutex lock.
unsigned int ns = 8;
while (atomicCAS(user_params.mutexes_ + mutex_id, 0, 1) == 1) {
__nanosleep(ns);
if (ns < 256) { ns *= 2; }
}
}
}
__syncthreads();
for (int row = threadIdx.x; row < total_rows; row += blockDim.x) {
if (block_start_row_first_tile_ + row < extent_row_) {
user_params.red_op_(
block_start_row_first_tile_ + row, &gmem_ptr[row], shared_elem_arr[row]);
}
}
if (useGmemMutex) {
__threadfence();
__syncthreads();
if (threadIdx.x == 0 && block_start_row_first_tile_ < extent_row_) {
// release mutex lock.
atomicExch(user_params.mutexes_ + mutex_id, 0);
}
}
}
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset)
{
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Performs reduction and Stores a reduced output to memory
CUTLASS_DEVICE
void store_with_byte_offset(Fragment& frag, int64_t byte_offset) const
{
AccessTypeValT* frag_ptr = reinterpret_cast<AccessTypeValT*>(&frag);
cg::thread_block cta = cg::this_thread_block();
// tile_width 16 is required if kElementPerAccess > 1
constexpr int tile_width = (32 / ThreadMap::Delta::kColumn) ? 32 : 16;
cg::thread_block_tile<tile_width> tile32 = cg::tiled_partition<tile_width>(cta);
EpilogueOpParams const& user_params = params_.user_param;
using cg_reduce_t = decltype(user_params.cg_reduce_op);
using tile32_t = decltype(tile32);
Element* shared_elem_arr = shared_storage_.data();
constexpr auto maxVal = std::numeric_limits<OutValT>::max();
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
const OutIdxT row_id = row_offset + thread_start_row_;
bool row_guard = (row_id < extent_row_);
const int frag_idx = frag_row_idx * ThreadMap::Iterations::kColumn * kElementsPerAccess;
Element red_val;
user_params.red_op_.init(&red_val, maxVal);
if (row_guard) {
const int iter_row = (row_id % total_rows);
const auto prev_red_val = user_params.red_op_.get_value(shared_elem_arr[iter_row]);
CUTLASS_PRAGMA_UNROLL
for (int column = 0; column < ThreadMap::Iterations::kColumn * kElementsPerAccess;
++column) {
int columnPerAccess = column / kElementsPerAccess;
int columnWithPerAccess = column % kElementsPerAccess;
bool guard = mask_.predicates[column];
if (guard) {
const OutIdxT key_id = thread_start_column_ +
ThreadMap::Delta::kColumn * columnPerAccess +
columnWithPerAccess;
const int frag_col_idx = frag_idx + column;
Element this_val;
user_params.red_op_.init(&this_val, (*frag_ptr)[frag_col_idx]);
user_params.red_op_.init_key(this_val, key_id);
user_params.red_op_(row_id, &red_val, this_val);
}
}
// select_reduce doesn't need to use `red_op_` as at the warp level we use cg_reduce_op,
// this satisfies the requirement of mst/single linkage of checking colors buffer.
select_reduce<cg_reduce_t, tile32_t, OutIdxT, OutValT, Element> red_obj(
red_val, prev_red_val, user_params.cg_reduce_op, tile32, shared_elem_arr[iter_row]);
}
}
}
}
}
/// Stores a fragment to memory
CUTLASS_DEVICE
void store(Fragment& frag) const { store_with_byte_offset(frag, 0); }
CUTLASS_DEVICE
MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const { return thread_start_row_; }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const { return thread_start_column_; }
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const { return extent_row_; }
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const { return extent_column_; }
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorReducedVec& operator++()
{
++state_[0];
if (!ScatterD) { byte_pointer_ += params_.advance_row; }
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ +=
(ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; }
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/fused_distance_nn/epilogue.cuh | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This is adapted from DefaultEpilogueWithBroadcastTensorOp from CUTLASS 2.9.0
(https://github.com/NVIDIA/cutlass/blob/master/include/cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h#L75)
This epilogue allows us to load norm buffers using PredicatedTileIteratorNormVec
and EpilogueWithBroadcast used for distances L2/cosine as well as applies user-define elementwise
operation.
-- A norm load is provided PredicatedTileIteratorNormVec
-- B norm load is provided by EpilogueWithBroadcast
-- elementwise operation is provided by OutputOp
*/
#pragma once
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/numeric_types.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/epilogue/threadblock/default_epilogue_tensor_op.h>
#include <cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h>
#include <cutlass/epilogue/threadblock/epilogue.h>
#include <cuvs/distance/detail/fused_distance_nn/custom_epilogue_with_broadcast.h>
#include <cuvs/distance/detail/fused_distance_nn/predicated_tile_iterator_normvec_smem.h>
#include <cuvs/distance/detail/fused_distance_nn/predicated_tile_iterator_reduced_vec.h>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Defines sensible defaults for epilogues for TensorOps.
template <typename Shape,
typename WarpMmaTensorOp,
int PartitionsK,
typename ElementOutput,
typename ElementTensor,
typename ElementVector,
typename OutputOp,
typename LayoutT,
int ElementsPerAccess,
bool ScatterD = false>
struct FusedDistanceNNEpilogue {
/// Use defaults related to the existing epilogue
using Base =
DefaultEpilogueTensorOp<Shape, WarpMmaTensorOp, PartitionsK, OutputOp, ElementsPerAccess>;
//
// Stores the result z = (y = GEMM(A, B, C), broadcast)
//
using RowNormTileIterator = cutlass::epilogue::threadblock::
PredicatedTileIteratorNormVecSmem<typename Base::OutputTileThreadMap, ElementOutput, LayoutT>;
//
// Additional tensor tile iterator - stores t = Elementwise(z)
//
using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorReducedVec<
typename Base::OutputTileThreadMap,
ElementTensor,
LayoutT,
typename OutputOp::Params>;
/// Define the epilogue
using Epilogue = cutlass::epilogue::threadblock::EpilogueWithBroadcastCustom<
Shape,
WarpMmaTensorOp,
PartitionsK,
RowNormTileIterator,
OutputTileIterator,
ElementVector,
typename Base::AccumulatorFragmentIterator,
typename Base::WarpTileIterator,
typename Base::SharedLoadIterator,
OutputOp,
typename Base::Padding,
Base::kFragmentsPerIteration>;
};
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/fused_distance_nn/persistent_gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Problem visitor for grouped GEMMs
This file contains heavily customized version of GemmGrouped from CUTLASS 2.10.0
(https://github.com/NVIDIA/cutlass/blob/v2.10.0/include/cutlass/gemm/kernel/gemm_grouped.h)
Changes:
- adds support for only single problem size to be launched persistently
where each threablock processes more than one tile of the same problem.
*/
#pragma once
#include <cutlass/complex.h>
#include <cutlass/cutlass.h>
#include <cutlass/fast_math.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/matrix_coord.h>
#include <cutlass/semaphore.h>
#include <cutlass/gemm/kernel/gemm_grouped_problem_visitor.h>
#include <cutlass/gemm/kernel/gemm_transpose_operands.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/trace.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate
typename Epilogue_, ///! Epilogue
typename ThreadblockSwizzle_, ///! Threadblock swizzling function
GroupScheduleMode GroupScheduleMode_, ///! Type of scheduling to perform
bool Transposed = false>
struct FusedDistanceNNPersistent {
public:
using Mma = Mma_;
using Epilogue = Epilogue_;
using EpilogueOutputOp = typename Epilogue::OutputOp;
using ThreadblockSwizzle = ThreadblockSwizzle_;
static GroupScheduleMode const kGroupScheduleMode = GroupScheduleMode_;
static bool const kTransposed = Transposed;
// Optional transpose
using MapArguments = kernel::detail::MapArguments<typename Mma::IteratorA::Element,
typename Mma::IteratorA::Layout,
Mma::kTransformA,
Mma::IteratorA::AccessType::kElements,
typename Mma::IteratorB::Element,
typename Mma::IteratorB::Layout,
Mma::kTransformB,
Mma::IteratorB::AccessType::kElements,
typename Mma::LayoutC,
kTransposed>;
// Public-facing type definitions related to operand element type, layout, and complex conjugate
// operation. Must interact with the 'kTransposed' notion.
using ElementA = typename MapArguments::ElementA;
using LayoutA = typename MapArguments::LayoutA;
using ElementB = typename MapArguments::ElementB;
using LayoutB = typename MapArguments::LayoutB;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename MapArguments::LayoutC;
static ComplexTransform const kTransformA = MapArguments::kTransformA;
static ComplexTransform const kTransformB = MapArguments::kTransformB;
// Type definitions about the mainloop.
using Operator = typename Mma::Operator;
using OperatorClass = typename Mma::Operator::OperatorClass;
using ThreadblockShape = typename Mma::Shape;
using WarpShape = typename Mma::Operator::Shape;
using InstructionShape = typename Mma::Policy::Operator::InstructionShape;
using ArchTag = typename Mma::ArchTag;
static int const kStages = Mma::kStages;
static int const kAlignmentA = MapArguments::kAlignmentA;
static int const kAlignmentB = MapArguments::kAlignmentB;
static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess;
/// Warp count (concept: GemmShape)
using WarpCount = typename Mma::WarpCount;
static int const kThreadCount = 32 * WarpCount::kCount;
using ProblemVisitor = GemmGroupedProblemVisitor<ThreadblockShape,
kGroupScheduleMode,
kThreadCount,
kThreadCount,
kTransposed>;
//
// Structures
//
struct temp_problem_visitor {
int problem_count;
CUTLASS_HOST_DEVICE temp_problem_visitor() : problem_count(0){};
CUTLASS_HOST_DEVICE temp_problem_visitor(int problem_count_) : problem_count(problem_count_){};
};
/// Argument structure
struct Arguments {
//
// Data members
//
GemmCoord problem_sizes;
temp_problem_visitor problem_visitor;
int problem_count;
int threadblock_count;
typename EpilogueOutputOp::Params output_op;
void const* ptr_A;
void const* ptr_B;
void const* ptr_C;
void* ptr_Vector;
void* ptr_Tensor;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldt;
// Only used by device-level operator
GemmCoord* host_problem_sizes;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Arguments()
: // problem_count(0),
threadblock_count(0),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_Vector(nullptr),
ptr_Tensor(nullptr),
lda(0),
ldb(0),
ldc(0),
ldt(0),
host_problem_sizes(nullptr)
{
}
/// Ctor
CUTLASS_HOST_DEVICE
Arguments(GemmCoord problem_sizes,
int problem_count,
int threadblock_count,
typename EpilogueOutputOp::Params output_op,
void const* ptr_A,
void const* ptr_B,
void const* ptr_C,
void* ptr_Vector,
void* ptr_Tensor,
typename LayoutA::Stride::Index lda,
typename LayoutB::Stride::Index ldb,
typename LayoutC::Stride::Index ldc,
typename LayoutC::Stride::Index ldt,
GemmCoord* host_problem_sizes = nullptr)
: problem_sizes(problem_sizes),
threadblock_count(threadblock_count),
output_op(output_op),
ptr_A(ptr_A),
ptr_B(ptr_B),
ptr_C(ptr_C),
ptr_Vector(ptr_Vector),
ptr_Tensor(ptr_Tensor),
lda(lda),
ldb(ldb),
ldc(ldc),
ldt(ldt),
host_problem_sizes(host_problem_sizes)
{
problem_visitor.problem_count = problem_count;
}
};
//
// Structure for precomputing values in host memory and passing to kernels
//
/// Parameters structure
struct Params {
// typename ProblemVisitor::Params problem_visitor;
temp_problem_visitor problem_visitor;
int threadblock_count;
typename Mma::IteratorA::Params params_A;
typename Mma::IteratorB::Params params_B;
typename Epilogue::OutputTileIterator::Params params_C;
typename Epilogue::TensorTileIterator::Params params_Tensor;
typename EpilogueOutputOp::Params output_op;
void* ptr_A;
void* ptr_B;
void* ptr_C;
void* ptr_Vector;
void* ptr_Tensor;
GemmCoord problem_size;
typename LayoutA::Stride::Index lda;
typename LayoutB::Stride::Index ldb;
typename LayoutC::Stride::Index ldc;
typename LayoutC::Stride::Index ldt;
//
// Methods
//
CUTLASS_HOST_DEVICE
Params()
: params_A(0),
params_B(0),
params_C(0),
ptr_A(nullptr),
ptr_B(nullptr),
ptr_C(nullptr),
ptr_Vector(nullptr),
ptr_Tensor(nullptr),
lda(0),
ldb(0),
ldc(0),
ldt(0)
{
}
CUTLASS_HOST_DEVICE
Params(Arguments const& args, void* workspace = nullptr, int tile_count = 0)
: problem_size(args.problem_sizes),
threadblock_count(args.threadblock_count),
output_op(args.output_op),
params_A(args.lda),
params_B(args.ldb),
params_C(args.ldc),
// Here we pass additional user args via args.output_op
// to the reduction output tile iterator
params_Tensor(args.ldt, args.output_op),
ptr_A(const_cast<void*>(args.ptr_A)),
ptr_B(const_cast<void*>(args.ptr_B)),
ptr_C(const_cast<void*>(args.ptr_C)),
ptr_Vector(args.ptr_Vector),
ptr_Tensor(args.ptr_Tensor),
lda(args.lda),
ldb(args.ldb),
ldc(args.ldc),
ldt(args.ldt)
{
problem_visitor.problem_count = args.problem_visitor.problem_count;
}
CUTLASS_HOST_DEVICE
void update(Arguments const& args, void* workspace = nullptr, int tile_count = 0)
{
threadblock_count = args.threadblock_count;
output_op = args.output_op;
ptr_A = const_cast<void*>(args.ptr_A);
ptr_B = const_cast<void*>(args.ptr_B);
ptr_C = const_cast<void*>(args.ptr_C);
ptr_Vector = args.ptr_Vector;
ptr_Tensor = args.ptr_Tensor;
lda = args.lda;
ldb = args.ldb;
ldc = args.ldc;
ldt = args.ldt;
problem_size = args.problem_sizes;
}
};
/// Shared memory storage structure
struct SharedStorage {
union {
typename Mma::SharedStorage main_loop;
typename Epilogue::SharedStorage epilogue;
} kernel;
typename Epilogue::TensorTileIterator::SharedStorage reduced_store;
typename Epilogue::OutputTileIterator::SharedStorage rownorm_store;
};
public:
//
// Methods
//
CUTLASS_DEVICE
FusedDistanceNNPersistent() {}
/// Determines whether kernel satisfies alignment
static Status can_implement(cutlass::gemm::GemmCoord const& problem_size)
{
return Status::kSuccess;
}
static Status can_implement(Arguments const& args) { return Status::kSuccess; }
static size_t get_extra_workspace_size(Arguments const& args,
cutlass::gemm::GemmCoord const& grid_tiled_shape)
{
return 0;
}
CUTLASS_DEVICE
static uint32_t tile_count(const cutlass::MatrixCoord& grid)
{
return grid.row() * grid.column();
}
/// Get the grid shape
CUTLASS_DEVICE
static cutlass::MatrixCoord grid_shape(const cutlass::gemm::GemmCoord& problem)
{
return cutlass::MatrixCoord(((problem.m() - 1 + ThreadblockShape::kM) / ThreadblockShape::kM),
((problem.n() - 1 + ThreadblockShape::kN) / ThreadblockShape::kN));
}
/// Executes one GEMM
CUTLASS_DEVICE
void operator()(Params const& params, SharedStorage& shared_storage)
{
#if __CUDA_ARCH__ >= 800
//
// These types shadow the type-level definitions and support the ability to implement
// a 'transposed' GEMM that computes the transposed problems.
//
using ElementA = typename Mma::IteratorA::Element;
using LayoutA = typename Mma::IteratorA::Layout;
using ElementB = typename Mma::IteratorB::Element;
using LayoutB = typename Mma::IteratorB::Layout;
using ElementC = typename Epilogue::OutputTileIterator::Element;
using LayoutC = typename Epilogue::OutputTileIterator::Layout;
const GemmCoord& problem_size = params.problem_size;
const auto grid_shape_ = grid_shape(problem_size);
const uint32_t problem_chunk = (tile_count(grid_shape_) - 1 + gridDim.x) / gridDim.x;
const uint32_t problem_chunk_end = blockIdx.x * problem_chunk + problem_chunk;
typename LayoutB::Index column =
((blockIdx.x * problem_chunk) % grid_shape_.column()) * Mma::Shape::kN;
typename LayoutB::Index row =
((blockIdx.x * problem_chunk) / grid_shape_.column()) * Mma::Shape::kM;
if (column) {
shared_storage.reduced_store.initSmem(params.output_op);
shared_storage.rownorm_store.initSmem(params.ptr_C, problem_size.m(), row, sizeof(ElementC));
}
// Outer 'persistent' loop to iterate over tiles
for (uint32_t tile_idx = blockIdx.x * problem_chunk; tile_idx < problem_chunk_end; tile_idx++) {
const auto grid_shape_ = grid_shape(problem_size);
cutlass::MatrixCoord threadblock_offset(
int(tile_idx / grid_shape_.column()) * Mma::Shape::kM,
int(tile_idx % grid_shape_.column()) * Mma::Shape::kN);
const bool isNextTile = ((tile_idx + 1) < problem_chunk_end);
const bool doesRowChange =
((threadblock_offset.column() + Mma::Shape::kN) >= problem_size.n());
const bool do_gmem_reduce = (doesRowChange || !isNextTile) ? true : false;
ElementA* ptr_A = static_cast<ElementA*>(params.ptr_A);
ElementB* ptr_B = static_cast<ElementB*>(params.ptr_B);
// Compute initial location in logical coordinates
cutlass::MatrixCoord tb_offset_A{threadblock_offset.row(), 0};
cutlass::MatrixCoord tb_offset_B{0, threadblock_offset.column()};
// Compute position within threadblock
int thread_idx = threadIdx.x;
// Construct iterators to A and B operands
typename Mma::IteratorA iterator_A(
params.params_A, ptr_A, {problem_size.m(), problem_size.k()}, thread_idx, tb_offset_A);
typename Mma::IteratorB iterator_B(
params.params_B, ptr_B, {problem_size.k(), problem_size.n()}, thread_idx, tb_offset_B);
// Broadcast the warp_id computed by lane 0 to ensure dependent code
// is compiled as warp-uniform.
int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0);
int lane_idx = threadIdx.x % 32;
//
// Matrix multiply phase
//
// Construct thread-scoped matrix multiply
Mma mma(shared_storage.kernel.main_loop, thread_idx, warp_idx, lane_idx);
typename Mma::FragmentC accumulators;
accumulators.clear();
// Compute threadblock-scoped matrix multiply-add
int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
// Wait for all threads to finish their epilogue phases from the previous tile.
//__syncthreads();
// Compute threadblock-scoped matrix multiply-add
mma(gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators);
//
// Epilogue
//
EpilogueOutputOp output_op(params.output_op);
ElementC* ptr_C = static_cast<ElementC*>(params.ptr_C);
typename Epilogue::ElementTensor* ptr_Tensor =
static_cast<typename Epilogue::ElementTensor*>(params.ptr_Tensor);
// Define the reduction output pointer and move to the appropriate place
typename Epilogue::ElementVector* ptr_Vector =
static_cast<typename Epilogue::ElementVector*>(params.ptr_Vector);
// Tile iterator loading from source tensor.
typename Epilogue::OutputTileIterator iterator_rownorm(shared_storage.rownorm_store,
params.params_C,
ptr_C,
problem_size.mn(),
thread_idx,
threadblock_offset);
// Additional tensor to load from
typename Epilogue::TensorTileIterator tensor_iterator(shared_storage.reduced_store,
params.params_Tensor,
// Only the final block outputs Tensor
ptr_Tensor,
problem_size.mn(),
thread_idx,
do_gmem_reduce,
threadblock_offset);
Epilogue epilogue(shared_storage.kernel.epilogue, thread_idx, warp_idx, lane_idx);
// Execute the epilogue operator to update the destination tensor.
// Move to appropriate location for this output tile
if (ptr_Vector) { ptr_Vector += threadblock_offset.column(); }
// Execute the epilogue operator to update the destination tensor.
epilogue(output_op,
ptr_Vector,
// iterator_D,
accumulators,
iterator_rownorm,
tensor_iterator,
problem_size.mn(),
threadblock_offset);
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/fused_distance_nn/predicated_tile_iterator_normvec_smem.h | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
This file contains a customized version of PredicatedTileIterator from CUTLASS 2.9.0
(https://github.com/NVIDIA/cutlass/blob/v2.9.0/include/cutlass/epilogue/threadblock/predicated_tile_iterator.h#L75)
Changes:
- added `Layout_` template param
- Only the row index is used to load the data in load_with_byte_offset().
This way the same normalization data is used across all columns in a row.
*/
#pragma once
#include <cutlass/arch/arch.h>
#include <cutlass/arch/memory.h>
#include <cutlass/array.h>
#include <cutlass/cutlass.h>
#include <cutlass/epilogue/threadblock/output_tile_thread_map.h>
#include <cutlass/epilogue/threadblock/predicated_tile_iterator_params.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
#include <cutlass/tensor_ref.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
#include <raft/util/device_loads_stores.cuh>
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////
namespace epilogue {
namespace threadblock {
////////////////////////////////////////////////////////////////////////////////
/// Tile iterator used to load and store output tile from global memory in epilogue.
///
/// Satisfies: ReadableTileIterator | PredicatedTileIterator | ForwardTileIterator
///
template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
typename Element_, ///< Element data type
typename Layout_,
bool ScatterD = false, ///< Scatter D operand or not
bool UseCUDAStore = false>
class PredicatedTileIteratorNormVecSmem {
public:
using ThreadMap = ThreadMap_;
using Shape = typename ThreadMap::Shape;
using Element = Element_;
using Layout = Layout_;
using TensorRef = TensorRef<Element, Layout>;
using ConstTensorRef = typename TensorRef::ConstTensorRef;
using Index = typename Layout::Index;
using LongIndex = typename Layout::LongIndex;
using TensorCoord = MatrixCoord;
static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
static int const kThreads = ThreadMap::kThreads;
static int const kIterations = ThreadMap::Count::kTile;
static int const total_rows = ThreadMap::kWarpCount * ThreadMap::Iterations::kRow *
ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
ThreadMap::Count::kTile * ThreadMap::Delta::kRow;
static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
static_assert(ThreadMap::Iterations::kCluster > 0, "ThreadMap::Iterations::kCluster must be > 0");
static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
using Fragment = Array<Element,
ThreadMap::Iterations::kRow * ThreadMap::Iterations::kGroup *
ThreadMap::Iterations::kCluster * ThreadMap::kElementsPerAccess>;
/// Memory access size
using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
//
// Parameters struct
//
/// Uses a non-template class
struct Params : PredicatedTileIteratorParams {
using Base = PredicatedTileIteratorParams;
CUTLASS_HOST_DEVICE
Params() {}
CUTLASS_HOST_DEVICE
Params(Layout const& layout)
: PredicatedTileIteratorParams(
layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
make_OutputTileThreadMapDesc<ThreadMap>())
{
}
CUTLASS_HOST_DEVICE
Params(Base const& base) : Base(base) {}
};
/// Mask object
struct Mask {
static int const kCount = ThreadMap::Iterations::kColumn;
/// Predicate state
bool predicates[kCount];
//
// Mask
//
CUTLASS_HOST_DEVICE
Mask() { enable(); }
///< Efficiently disables all accesses guarded by mask
CUTLASS_HOST_DEVICE void clear()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = false;
}
}
///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
CUTLASS_DEVICE void enable()
{
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kCount; ++i) {
predicates[i] = true;
}
}
};
/// Shared storage allocation needed by the predicated tile
// iterator for storing rowNorm chunk.
struct SharedStorage {
//
// Type definitions
//
using Shape = MatrixShape<total_rows, 1>;
/// Shape of the shared memory allocation
using StorageShape = MatrixShape<Shape::kRow, Shape::kColumn>;
//
// Data members
//
// Methods
//
AlignedBuffer<Element, StorageShape::kCount> storage;
CUTLASS_DEVICE
Element* data() { return storage.data(); }
SharedStorage() {}
CUTLASS_DEVICE
void initSmem(void* pointer,
const Index& num_rows,
const Index& tb_row_offset,
const LongIndex& stride)
{
Element* shared_elem_arr = data();
uint8_t* first_tile_byte_pointer_ =
reinterpret_cast<uint8_t*>(pointer) + LongIndex(tb_row_offset) * LongIndex(stride);
const auto gmem_ptr = reinterpret_cast<Element*>(first_tile_byte_pointer_);
for (int row = threadIdx.x; row < total_rows; row += blockDim.x) {
bool guard = (tb_row_offset + row) < num_rows;
cutlass::arch::cp_async<sizeof(Element)>(shared_elem_arr + row, gmem_ptr + row, guard);
cutlass::arch::cp_async_wait<0>();
}
}
};
private:
//
// Data members
//
/// Parameters structure containing reference and precomputed state.
PredicatedTileIteratorParams params_;
/// Byte-level pointer
uint8_t* byte_pointer_;
/// Array of boolean values to contain steady-state predicates
Mask mask_;
/// Extent of the matrix tile in rows
Index extent_row_;
/// Extent of the matrix tile in rows
Index extent_column_;
/// A thread's starting row position (assuming steady-state predicates have been computed)
Index thread_start_row_;
/// A thread's starting column
Index thread_start_column_;
/// Internal state counter
int state_[3];
/// Scatter indices
int const* indices_;
//
// Static asserts about internal strides
//
static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
private:
//
// Methods
//
protected:
SharedStorage& shared_storage_;
public:
//
// Methods
//
/// Constructor
CUTLASS_DEVICE
PredicatedTileIteratorNormVecSmem(SharedStorage& shared_storage,
PredicatedTileIteratorParams const& params,
Element* pointer,
TensorCoord extent,
int thread_idx,
TensorCoord& threadblock_offset,
int const* indices = nullptr)
: params_(params), indices_(indices), shared_storage_(shared_storage)
{
TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
extent_row_ = extent.row();
extent_column_ = extent.column();
thread_start_row_ = thread_offset.row();
thread_start_column_ = thread_offset.column();
// Initialize predicates
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
mask_.predicates[c] =
((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column());
}
// Null pointer performs no accesses
if (!pointer) {
mask_.clear();
return;
}
if (ScatterD && !indices) { mask_.clear(); }
// Initialize pointer
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.row()) * LongIndex(params_.stride);
if (ScatterD) {
byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
}
if (threadblock_offset.column() == 0) {
shared_storage_.initSmem(pointer, extent_row_, threadblock_offset.row(), params_.stride);
}
// Initialize internal state counter
state_[0] = state_[1] = state_[2] = 0;
}
/// Adds a pointer offset in units of Element
CUTLASS_HOST_DEVICE
void add_pointer_offset(LongIndex pointer_offset)
{
byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load_with_byte_offset(Fragment& frag, int64_t byte_offset) const
{
AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
Element* shared_elem_arr = shared_storage_.data();
CUTLASS_PRAGMA_UNROLL
for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
CUTLASS_PRAGMA_UNROLL
for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
int frag_row_idx =
(row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster));
int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup +
cluster * ThreadMap::Delta::kCluster;
int iter_row = ((row_offset + thread_start_row_) % total_rows);
Element val = shared_elem_arr[iter_row];
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerAccess; ++i) {
(*frag_ptr)[frag_row_idx + i] = val;
}
}
}
}
}
/// Loads a fragment from memory
CUTLASS_DEVICE
void load(Fragment& frag) const { load_with_byte_offset(frag, 0); }
CUTLASS_DEVICE
MatrixCoord thread_start() const { return MatrixCoord(thread_start_row_, thread_start_column_); }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_row() const { return thread_start_row_; }
/// Need to get the thread start row from the tile iterator
CUTLASS_DEVICE
int32_t thread_start_column() const { return thread_start_column_; }
/// Extent of the matrix in rows
CUTLASS_DEVICE
Index extent_row() const { return extent_row_; }
/// Extent of the matrix in columns
CUTLASS_DEVICE
Index extent_column() const { return extent_column_; }
/// Advances to the next position to load or store
CUTLASS_HOST_DEVICE
PredicatedTileIteratorNormVecSmem& operator++()
{
++state_[0];
if (!ScatterD) { byte_pointer_ += params_.advance_row; }
thread_start_row_ += ThreadMap::Shape::kRow;
if (state_[0] == ThreadMap::Count::kRow) {
state_[0] = 0;
++state_[1];
byte_pointer_ += params_.advance_group;
thread_start_row_ +=
(ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
if (state_[1] == ThreadMap::Count::kGroup) {
state_[1] = 0;
++state_[2];
byte_pointer_ += params_.advance_cluster;
thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
if (state_[2] == ThreadMap::Count::kCluster) {
state_[2] = 0;
byte_pointer_ += params_.advance_tile;
}
}
}
return *this;
}
///< Efficiently disables all accesses guarded by mask
CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
///< Efficiently enables all accesses guarded by mask
CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
///< Sets the mask
CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; }
///< Sets the mask
CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
};
///////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/fused_distance_nn/gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <cutlass/gemm/kernel/default_gemm_universal.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/layout/tensor.h>
#include <cuvs/distance/detail/fused_distance_nn/epilogue.cuh>
#include <cuvs/distance/detail/fused_distance_nn/persistent_gemm.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace kernel {
/////////////////////////////////////////////////////////////////////////////////////////////////
/*
* This configuration is used for float inputs with veclen(kAlignmentA/B) = 2 or 4,
* ideal threadblock tile shape is 32x256x16 for such cases as there is no
* registers spills for it.
*
*/
template <
/// Element type for A matrix operand
typename ElementA_,
/// Layout type for A matrix operand
int kAlignmentA,
/// Element type for B matrix operand
typename ElementB_,
/// Layout type for B matrix operand
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Number of stages used in the pipelined mainloop
int Stages,
/// data layout row/column major of inputs
bool isRowMajor>
struct FusedDistanceNNGemm {
// This struct is specialized for fp32/3xTF32
/// Threadblock-level tile size (concept: GemmShape)
// <- threadblock tile M = 32, N = 256, K = 16
// this is more performant but note that for veclen = 1
// this shape has register spills
using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 16>;
// <- threadblock tile M = 32, N = 128, K = 16
// this shape has high occupancy but less perf
// this is less performant but this shape has *no* register spills
// for any veclens(1, 2, 4)
// using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes tile size a warp will compute
// <- warp tile M = 64, N = 64, K = 16
// this is more performant for veclen 2,4.
using WarpShape = cutlass::gemm::GemmShape<32, 64, 16>;
// this shape has high occupancy but less perf used for 32x128x16
// using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes the size of MMA op
// <- MMA Op tile M = 16, N = 8, K = 4
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>;
/// Operation performed by GEMM
using Operator = cutlass::arch::OpMultiplyAddFastF32;
// using Operator = cutlass::arch::OpMultiplyAdd; // this runs only 1xTF32 for float inputs
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using OperatorClass = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using ArchTag = cutlass::arch::Sm80;
// This code section describes how threadblocks are scheduled on GPU
/// Threadblock-level swizzling operator
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
/// data layout for final output matrix.
// we keep this same layout even for column major inputs
using LayoutOutput = cutlass::layout::RowMajor;
typedef typename std::conditional<isRowMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor>::type NormXLayout;
typedef typename std::
conditional<isRowMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor>::type LayoutA_;
typedef typename std::
conditional<isRowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor>::type LayoutB_;
using GemmBase = typename DefaultGemmUniversal<ElementA_,
LayoutA_,
cutlass::ComplexTransform::kNone,
kAlignmentA,
ElementB_,
LayoutB_,
cutlass::ComplexTransform::kNone,
kAlignmentB,
ElementC_,
LayoutOutput,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::FusedDistanceNNEpilogue<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementAccumulator,
typename EpilogueOutputOp::ElementT,
ElementAccumulator,
EpilogueOutputOp,
NormXLayout,
GemmBase::Epilogue::kElementsPerAccess>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = FusedDistanceNNPersistent<typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
GroupScheduleMode::kDeviceOnly>;
};
/*
* This configuration is used for float inputs with veclen(kAlignmentA/B) = 1,
* ideal threadblock tile shape is 32x128x16 for such cases as there is no
* registers spills for it.
*
*/
template <
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Number of stages used in the pipelined mainloop
int Stages,
/// data layout row/column major of inputs
bool isRowMajor>
struct FusedDistanceNNGemm<float, /// Element type for A matrix operand
1, /// Layout type (veclen) for A matrix operand
float, /// Element type for B matrix operand
1, /// Layout type (veclen) for B matrix operand
ElementC_,
ElementAccumulator,
EpilogueOutputOp,
Stages,
isRowMajor> {
// This struct is specialized for fp32/3xTF32
using ElementA_ = float;
using ElementB_ = float;
/// Threadblock-level tile size (concept: GemmShape)
// <- threadblock tile M = 32, N = 128, K = 16
// this shape has high occupancy and no register spills for veclen = 1.
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes tile size a warp will compute
// <- warp tile M = 32, N = 32, K = 16
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes the size of MMA op
// <- MMA Op tile M = 16, N = 8, K = 4
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 4>;
/// Operation performed by GEMM
using Operator = cutlass::arch::OpMultiplyAddFastF32;
// using Operator = cutlass::arch::OpMultiplyAdd; // this runs only 1xTF32 for float inputs
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using OperatorClass = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using ArchTag = cutlass::arch::Sm80;
// This code section describes how threadblocks are scheduled on GPU
/// Threadblock-level swizzling operator
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
/// data layout for final output matrix.
// we keep this same layout even for column major inputs
using LayoutOutput = cutlass::layout::RowMajor;
typedef typename std::conditional<isRowMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor>::type NormXLayout;
typedef typename std::
conditional<isRowMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor>::type LayoutA_;
typedef typename std::
conditional<isRowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor>::type LayoutB_;
using GemmBase = typename DefaultGemmUniversal<ElementA_,
LayoutA_,
cutlass::ComplexTransform::kNone,
1,
ElementB_,
LayoutB_,
cutlass::ComplexTransform::kNone,
1,
ElementC_,
LayoutOutput,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::FusedDistanceNNEpilogue<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementAccumulator,
typename EpilogueOutputOp::ElementT,
ElementAccumulator,
EpilogueOutputOp,
NormXLayout,
GemmBase::Epilogue::kElementsPerAccess>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = FusedDistanceNNPersistent<typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
GroupScheduleMode::kDeviceOnly>;
};
template <
/// Layout type for A matrix operand
int kAlignmentA,
/// Layout type for B matrix operand
int kAlignmentB,
/// Element type for C and D matrix operands
typename ElementC_,
/// Element type for internal accumulation
typename ElementAccumulator,
/// Epilogue output operator - must satisfy concept of 'EpilogueWithBroadcastOp'
typename EpilogueOutputOp,
/// Number of stages used in the pipelined mainloop
int Stages,
/// data layout row/column major of inputs
bool isRowMajor>
struct FusedDistanceNNGemm<double,
kAlignmentA,
double,
kAlignmentB,
ElementC_,
ElementAccumulator,
EpilogueOutputOp,
Stages,
isRowMajor> {
// Threadblock-level tile size (concept: GemmShape)
// <- threadblock tile M = 64, N = 64, K = 16
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 16>;
// using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes tile size a warp will compute
// <- warp tile M = 32, N = 32, K = 16
using WarpShape = cutlass::gemm::GemmShape<32, 32, 16>;
// using WarpShape = cutlass::gemm::GemmShape<16, 32, 16>;
/// Warp-level tile size (concept: GemmShape)
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>;
// Operation performed by GEMM
using Operator = cutlass::arch::OpMultiplyAdd;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU
// SM
using OperatorClass = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using ArchTag = cutlass::arch::Sm80;
// This code section describes how threadblocks are scheduled on GPU
/// Threadblock-level swizzling operator
using ThreadblockSwizzle = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
/// data layout for final output matrix.
// we keep this same layout even for column major inputs
using LayoutOutput = cutlass::layout::RowMajor;
typedef typename std::conditional<isRowMajor,
cutlass::layout::RowMajor,
cutlass::layout::ColumnMajor>::type NormXLayout;
typedef typename std::
conditional<isRowMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor>::type LayoutA_;
typedef typename std::
conditional<isRowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor>::type LayoutB_;
using GemmBase = typename DefaultGemmUniversal<double,
LayoutA_,
cutlass::ComplexTransform::kNone,
1,
double,
LayoutB_,
cutlass::ComplexTransform::kNone,
1,
ElementC_,
LayoutOutput,
ElementAccumulator,
OperatorClass,
ArchTag,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOutputOp,
ThreadblockSwizzle,
Stages,
Operator>::GemmKernel;
// Replace epilogue
using Epilogue = typename cutlass::epilogue::threadblock::FusedDistanceNNEpilogue<
typename GemmBase::Epilogue::Shape,
typename GemmBase::Epilogue::WarpMmaOperator,
GemmBase::Epilogue::kPartitionsK,
ElementC_,
typename EpilogueOutputOp::ElementT,
ElementC_,
EpilogueOutputOp,
NormXLayout,
GemmBase::Epilogue::kElementsPerAccess>::Epilogue;
// Compose the GEMM kernel
using GemmKernel = FusedDistanceNNPersistent<typename GemmBase::Mma,
Epilogue,
ThreadblockSwizzle,
GroupScheduleMode::kDeviceOnly>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace gemm
} // namespace cutlass | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_matrix/dispatch_sm60.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm> // std::min
#include <cuvs/distance/detail/pairwise_matrix/dispatch_layout.cuh> // dispatch_layout
#include <cuvs/distance/detail/pairwise_matrix/kernel_sm60.cuh> // pairwise_matrix_sm60_wrapper
#include <raft/linalg/contractions.cuh> // raft::linalg::Policy4x4
namespace cuvs::distance::detail {
template <typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT,
typename SM_compat_t>
pairwise_matrix_sm60_wrapper<OpT, IdxT, DataT, OutT, FinOpT> pairwise_matrix_sm60_get_wrapper(
OpT distance_op,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params,
SM_compat_t sm_compat_range)
{
int vec_len = determine_vec_len(params);
// f takes compile-time constants row_major and vec_len aligned and returns
// the corresponding kernel wrapper. The wrapper contains the launch
// parameters of the kernel: a pointer to the kernel function, grid size,
// block size, and shared memory size.
auto f = [&](auto row_major, auto vec_len_aligned) {
// row_major and vec_len are std::integral_constants of type bool and int
// respectively.
// To keep compile times in check, we only specialize on veclen > 1 when
// the inner loop is relatively cheap (< 5 flops).
constexpr int vec_len_op = distance_op.expensive_inner_loop ? 1 : vec_len_aligned();
// Prevent double, vec_len=4 combination (this is not supported)
constexpr int vec_len = std::min(vec_len_op, static_cast<int>(16 / sizeof(DataT)));
using RowPolicy = typename raft::linalg::Policy4x4<DataT, vec_len>::Policy;
using ColPolicy = typename raft::linalg::Policy4x4<DataT, vec_len>::ColPolicy;
using Policy = typename std::conditional<row_major(), RowPolicy, ColPolicy>::type;
auto wrapper =
make_pairwise_matrix_sm60_wrapper<Policy, row_major()>(distance_op, params, sm_compat_range);
return wrapper;
};
// Dispatch_layout calls f with appropriate compile time constants based on
// the runtime values of params.is_row_major and vec_len.
return dispatch_layout(params.is_row_major, vec_len, f);
}
template <typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT,
typename SM_compat_t>
void pairwise_matrix_sm60_dispatch(OpT distance_op,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params,
SM_compat_t sm_compat_range,
cudaStream_t stream)
{
auto wrapper = pairwise_matrix_sm60_get_wrapper(distance_op, params, sm_compat_range);
wrapper.launch(distance_op, params, stream);
}
} // namespace cuvs::distance::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_matrix/dispatch_layout.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm> // std::min
#include <cstdint> // size_t
#include <cuvs/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
#include <raft/core/error.hpp> // RAFT_EXPECTS
#include <type_traits> // std::integral_constant
namespace cuvs::distance::detail {
/**
* @brief: Computes minimal common alignment of the rows in a 2D array in bytes
*
* The 2D matrix `x` is assumed to be row-major. This function computes the
* minimal alignment in bytes of the first elements of each row.
* Output can be 16, 8, 4, 2, 1.
*
* @param x Base pointer of row-major input matrix
* @param stride Stride in number of element between consecutive rows.
*/
template <typename DataT>
size_t alignment_of_2d_array(const DataT* x, size_t stride)
{
auto base = reinterpret_cast<uintptr_t>(x);
size_t stride_bytes = sizeof(DataT) * stride;
for (int align = 16; align >= 0; align /= 2) {
bool base_aligned = base % align == 0;
bool stride_aligned = stride_bytes % align == 0;
if (base_aligned && stride_aligned) { return align; }
}
return 1;
}
/**
* @brief: Computes the vec_len parameter kernel policy parameter
*
* @param params Kernel parameters
*/
template <typename IdxT, typename DataT, typename OutT, typename FinOpT>
int determine_vec_len(pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params)
{
size_t align_x = alignment_of_2d_array(params.x, params.ldx);
size_t align_y = alignment_of_2d_array(params.y, params.ldy);
size_t byte_alignment = min(align_x, align_y);
// Since alignment is in bytes, it could be smaller than sizeof(DataT).
// Handle this (unlikely) case here.
RAFT_EXPECTS(sizeof(DataT) <= byte_alignment,
"Input matrix must be aligned to size of elements.");
// Compute number of elements that can be loaded in one instruction
// without causing misalignent errors.
int vec_len_aligned = (byte_alignment % sizeof(DataT) == 0) ? byte_alignment / sizeof(DataT) : 1;
// In the future, pairwise_matrix might support `int8_t` input. In that case,
// byte_alignment / sizeof(DataT) might exceed 4. We maximize at 4 here, to
// prevent adding more cases in dispatch_layout below (which are expensive to
// compile).
vec_len_aligned = std::min(vec_len_aligned, 4);
return vec_len_aligned;
}
template <int n>
using vec_len_constant = std::integral_constant<int, n>;
/**
* @brief: Converts run-time arguments to compile-time arguments
*
* Converts run-time arguments row_major and vec_len to compile-time arguments
* and dispatches a lambda f with these compile-time arguments.
*
* This is equivalent to copying and pasting the lambda function `f` in each of
* the switch case statements.
*
* @tparam F Type of lambda f.
* @param row_major Boolean indicating whether input arrays have row-major layout.
* @param vec_len Integer value 1, 2, or 4 specifying the Veclen template parameter of
* the KernelPolicy.
* @param f Lambda that takes two std::integral_constant parameters representing
* row_major and vec_len.
*/
template <typename F>
auto dispatch_layout(bool row_major, int vec_len, F&& f)
{
if (row_major) {
switch (vec_len) {
case 4: return f(std::true_type(), vec_len_constant<4>());
case 2: return f(std::true_type(), vec_len_constant<2>());
default: return f(std::true_type(), vec_len_constant<1>());
}
} else {
switch (vec_len) {
case 4: return f(std::false_type(), vec_len_constant<4>());
case 2: return f(std::false_type(), vec_len_constant<2>());
default: return f(std::false_type(), vec_len_constant<1>());
}
}
}
}; // namespace cuvs::distance::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_matrix/dispatch.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "dispatch-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "dispatch-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_matrix/dispatch-ext.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/detail/distance_ops/all_ops.cuh> // ops::*
#include <cuvs/distance/detail/distance_ops/cutlass.cuh> // ops::has_cutlass_op
#include <cuvs/distance/detail/kernels/rbf_fin_op.cuh> // rbf_fin_op
#include <cuvs/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace cuvs::distance::detail {
template <typename OpT,
typename DataT,
typename AccT,
typename OutT,
typename FinOpT,
typename IdxT = int>
void pairwise_matrix_dispatch(OpT distance_op,
IdxT m,
IdxT n,
IdxT k,
const DataT* x,
const DataT* y,
const DataT* x_norm,
const DataT* y_norm,
OutT* out,
FinOpT fin_op,
cudaStream_t stream,
bool is_row_major) RAFT_EXPLICIT;
}; // namespace cuvs::distance::detail
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_distance_detail_pairwise_matrix_dispatch( \
OpT, DataT, AccT, OutT, FinOpT, IdxT) \
extern template void cuvs::distance::detail:: \
pairwise_matrix_dispatch<OpT<DataT, AccT, IdxT>, DataT, AccT, OutT, FinOpT, IdxT>( \
OpT<DataT, AccT, IdxT> distance_op, \
IdxT m, \
IdxT n, \
IdxT k, \
const DataT* x, \
const DataT* y, \
const DataT* x_norm, \
const DataT* y_norm, \
OutT* out, \
FinOpT fin_op, \
cudaStream_t stream, \
bool is_row_major)
/*
* Hierarchy of instantiations:
*
* This file defines extern template instantiations of the distance kernels. The
* instantiation of the public API is handled in cuvs/distance/distance-ext.cuh.
*
* After adding an instance here, make sure to also add the instance there.
*/
// The following two instances are used in the RBF kernel object. Note the use of int64_t for the
// index type.
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::l2_unexp_distance_op,
float,
float,
float,
cuvs::distance::kernels::detail::rbf_fin_op<float>,
int64_t);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::l2_unexp_distance_op,
double,
double,
double,
cuvs::distance::kernels::detail::rbf_fin_op<double>,
int64_t);
// Rest of instances
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::canberra_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::canberra_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::correlation_distance_op,
float,
float,
float,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::correlation_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::cosine_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::cosine_distance_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::hamming_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::hamming_distance_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::hellinger_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::hellinger_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::jensen_shannon_distance_op,
float,
float,
float,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::jensen_shannon_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::kl_divergence_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::kl_divergence_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::l1_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::l1_distance_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::l2_exp_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::l2_exp_distance_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::l2_unexp_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::l2_unexp_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::l_inf_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::l_inf_distance_op, double, double, double, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::lp_unexp_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::lp_unexp_distance_op,
double,
double,
double,
raft::identity_op,
int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::russel_rao_distance_op, float, float, float, raft::identity_op, int);
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
cuvs::distance::detail::ops::russel_rao_distance_op,
double,
double,
double,
raft::identity_op,
int);
#undef instantiate_raft_distance_detail_pairwise_matrix_dispatch
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_matrix/dispatch-inl.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/* This file has two responsibilities:
*
* 1. Dispatch to the correct implementation of a kernel based on the
* architecture of the device on which the kernel will be launched. For
* instance, the cosine distance has a CUTLASS-based implementation that can
* be used on SM80+ and the normal implementation that is used on older
* architectures.
*
* 2. Provide concise function templates that can be instantiated in
* src/distance/detail/pairwise_matrix/. Previously,
* cuvs::distance::detail::distance was instantiated. The function
* necessarily required a large set of include files, which slowed down the
* build. The cuvs::distance::detail::pairwise_matrix_arch_dispatch functions
* do not require as large an include files set, which speeds up the build.
*/
#include <cuvs/distance/detail/distance_ops/cutlass.cuh> // ops::has_cutlass_op
#include <cuvs/distance/detail/pairwise_matrix/dispatch_sm60.cuh> // dispatch_sm60
#include <cuvs/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
#include <raft/util/arch.cuh> // raft::util::arch::SM_*
// NOTE: to minimize compile times, we do not include dispatch_sm80.cuh.
// Including dispatch_sm80.cuh can slow down compile times (due to CUTLASS).
// Therefore, it is the including file's responsibility to include the correct
// dispatch_smXX.cuh headers, as is done in cuvs/distance/detail/distance.cuh
// and src/distance/detail/pairwise_matrix/dispatch_*.cu.
namespace cuvs::distance::detail {
// This forward-declaration ensures that we do not need to include
// dispatch_sm80.cuh if we are not calling it in practice. This makes compiling
// all the non-CUTLASS based distance instantiations faster. For CUTLASS-based
// distances, dispatch_sm80.cuh has to be included by the file including this
// file.
template <typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT,
typename SM_compat_t>
void pairwise_matrix_sm80_dispatch(OpT,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT>,
SM_compat_t,
cudaStream_t);
template <typename OpT,
typename DataT,
typename AccT,
typename OutT,
typename FinOpT,
typename IdxT = int>
void pairwise_matrix_dispatch(OpT distance_op,
IdxT m,
IdxT n,
IdxT k,
const DataT* x,
const DataT* y,
const DataT* x_norm,
const DataT* y_norm,
OutT* out,
FinOpT fin_op,
cudaStream_t stream,
bool is_row_major)
{
// Create kernel parameter struct. Flip x and y if column major.
IdxT ldx = is_row_major ? k : m;
IdxT ldy = is_row_major ? k : n;
IdxT ld_out = is_row_major ? n : m;
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params{
m, n, k, ldx, ldy, ld_out, x, y, x_norm, y_norm, out, fin_op, is_row_major};
if (!params.is_row_major) { params.flip_x_and_y(); }
// Dispatch rule:
// - execute CUTLASS-based kernel on SM_80 and above
// - execute normal kernel below SM_80
namespace arch = raft::util::arch;
constexpr bool cutlass_op_unavailable = !ops::has_cutlass_op<OpT>();
if constexpr (cutlass_op_unavailable) {
// Always execute legacy kernels when no cutlass op is available
auto any_range = arch::SM_range(arch::SM_min(), arch::SM_future());
pairwise_matrix_sm60_dispatch(distance_op, params, any_range, stream);
} else {
auto cutlass_range = arch::SM_range(arch::SM_80(), arch::SM_future());
auto legacy_range = arch::SM_range(arch::SM_min(), arch::SM_80());
// Get pointer to SM60 kernel to determine the best compute architecture
// out of all for which the kernel was compiled for that matches closely
// to the current device. Other methods to determine the architecture (that do not
// require a pointer) can be error prone. See:
// https://github.com/NVIDIA/cub/issues/545
auto sm60_wrapper = pairwise_matrix_sm60_get_wrapper(distance_op, params, legacy_range);
void* kernel_ptr = reinterpret_cast<void*>(sm60_wrapper.kernel_ptr);
auto runtime_arch = arch::kernel_virtual_arch(kernel_ptr);
if (cutlass_range.contains(runtime_arch)) {
// If device is SM_80 or later, use CUTLASS-based kernel.
pairwise_matrix_sm80_dispatch(distance_op, params, cutlass_range, stream);
} else {
// Reuse kernel wrapper that we obtained above. This avoids performing the
// dispatch twice.
sm60_wrapper.launch(distance_op, params, stream);
}
}
}
}; // namespace cuvs::distance::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_matrix/params.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace cuvs::distance::detail {
template <typename IdxT, typename DataT, typename OutT, typename FinOpT>
struct pairwise_matrix_params {
IdxT m;
IdxT n;
IdxT k;
IdxT ldx;
IdxT ldy;
IdxT ld_out;
const DataT* x;
const DataT* y;
const DataT* x_norm;
const DataT* y_norm;
OutT* out;
FinOpT fin_op;
bool is_row_major;
/// @brief: Flips the x and y input and corresponding sizes
void flip_x_and_y()
{
// Flip m, n; ldx, ldy; x, y; x_norm, y_norm.
std::swap(m, n);
std::swap(ldx, ldy);
std::swap(x, y);
std::swap(x_norm, y_norm);
}
};
} // namespace cuvs::distance::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_matrix/dispatch_sm80.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm> // std::min
#include <cuvs/distance/detail/pairwise_distance_cutlass_base.cuh> // cutlassDistanceKernel
#include <cuvs/distance/detail/pairwise_matrix/dispatch_layout.cuh> // dispatch_layout
namespace cuvs::distance::detail {
template <typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT,
typename SM_compat_t>
void pairwise_matrix_sm80_dispatch(OpT distance_op,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params,
SM_compat_t sm_compat_range,
cudaStream_t stream)
{
int vec_len = determine_vec_len(params);
// f takes compile-time constants row_major and vec_len aligned and runs the
// corresponding cutlass launch code.
auto f = [&](auto row_major, auto vec_len_aligned) {
// row_major and vec_len are std::integral_constants of type bool and int
// respectively.
// Prevent double, vec_len=4 combination (this is not supported)
constexpr int vec_len = std::min(vec_len_aligned(), static_cast<int>(16 / sizeof(DataT)));
using AccT = typename OpT::AccT;
cutlassDistanceKernel<DataT, AccT, OutT, IdxT, vec_len, FinOpT, OpT, row_major()>(params.x,
params.y,
params.x_norm,
params.y_norm,
params.m,
params.n,
params.k,
params.ldx,
params.ldy,
params.ld_out,
params.out,
params.fin_op,
distance_op,
stream);
};
// Dispatch_layout calls f with appropriate compile time constants based on
// the runtime values of params.is_row_major and vec_len.
dispatch_layout(params.is_row_major, vec_len, f);
}
}; // namespace cuvs::distance::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/pairwise_matrix/kernel_sm60.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert> // assert
#include <cuvs/distance/detail/pairwise_distance_base.cuh> // PairwiseDistances
#include <cuvs/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
#include <raft/core/operators.hpp> // raft::void_op
#include <raft/util/arch.cuh> // raft::util::arch::SM_compute_arch
namespace cuvs::distance::detail {
template <typename Policy,
bool row_major,
typename SM_compat_t,
typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT>
__launch_bounds__(Policy::Nthreads, 2) RAFT_KERNEL
pairwise_matrix_kernel(OpT distance_op, pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params)
{
// Early exit to minimize the size of the kernel when it is not supposed to be compiled.
constexpr SM_compat_t sm_compat_range{};
if constexpr (!sm_compat_range.contains(raft::util::arch::SM_compute_arch())) {
assert(false);
return;
}
extern __shared__ char smem[];
// The epilog is already provided by distance_op. Do not provide additional
// epilogs.
auto epilog_op = raft::void_op();
// No support for row_epilog_op.
auto row_epilog_op = raft::void_op();
// Always write output
constexpr bool write_out = true;
constexpr bool use_norms = distance_op.use_norms;
PairwiseDistances<DataT,
OutT,
IdxT,
Policy,
decltype(distance_op),
decltype(epilog_op),
decltype(params.fin_op),
decltype(row_epilog_op),
row_major,
write_out>
obj(params.x,
params.y,
params.m,
params.n,
params.k,
params.ldx,
params.ldy,
params.ld_out,
params.x_norm,
params.y_norm,
params.out,
smem,
distance_op,
epilog_op,
params.fin_op,
row_epilog_op);
obj.run();
}
// The type of a pointer to the pairwise matrix kernel. The following template
// arguments are type-erased:
//
// - The kernel policy
// - row_major
// - SM_compat_t
template <typename OpT, typename IdxT, typename DataT, typename OutT, typename FinOpT>
using pairwise_matrix_kernel_t = void (*)(OpT, pairwise_matrix_params<IdxT, DataT, OutT, FinOpT>);
// A wrapper for the pairwise matrix kernel launch. Includes kernel launch
// parameters.
template <typename OpT, typename IdxT, typename DataT, typename OutT, typename FinOpT>
struct pairwise_matrix_sm60_wrapper {
dim3 grid;
dim3 block;
int smem_size;
pairwise_matrix_kernel_t<OpT, IdxT, DataT, OutT, FinOpT> kernel_ptr;
void launch(OpT distance_op,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params,
cudaStream_t stream)
{
kernel_ptr<<<grid, block, smem_size, stream>>>(distance_op, params);
RAFT_CUDA_TRY(cudaGetLastError());
}
};
/** @brief: Create kernel launch wrapper for pairwise matrix kernel
*
* This can be used to type-erase the kernel execution policy, row_major, and SM
* compatibility range.
*
* @tparam Policy: Kernel execution policy
* @tparam row_major: Indicates whether input matrices are row major
* @tparam OpT: Type of distance operation
* @tparam IdxT: Index type
* @tparam DataT: Data type
* @tparam OutT: Output data type
* @tparam FinOpT: Final operation type
* @tparam SM_compat_t: Type of the SM architecture compatibility
*
* @param distance_op: Distance operation
* @param params: Parameters
* @param sm_compat_range: Which SM architectures to compile for.
*/
template <typename Policy,
bool row_major,
typename OpT,
typename IdxT,
typename DataT,
typename OutT,
typename FinOpT,
typename SM_compat_t>
pairwise_matrix_sm60_wrapper<OpT, IdxT, DataT, OutT, FinOpT> make_pairwise_matrix_sm60_wrapper(
OpT distance_op,
pairwise_matrix_params<IdxT, DataT, OutT, FinOpT> params,
SM_compat_t sm_compat_range)
{
dim3 block(Policy::Nthreads);
// Use ::template to disambiguate (See:
// https://en.cppreference.com/w/cpp/language/dependent_name)
int smem_size = OpT::template shared_mem_size<Policy>();
// Obtain function pointer to kernel
auto kernel =
pairwise_matrix_kernel<Policy, row_major, SM_compat_t, OpT, IdxT, DataT, OutT, FinOpT>;
dim3 grid = launchConfigGenerator<Policy>(params.m, params.n, smem_size, kernel);
return pairwise_matrix_sm60_wrapper<OpT, IdxT, DataT, OutT, FinOpT>{
grid, block, smem_size, kernel};
}
}; // namespace cuvs::distance::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/kernels/kernel_matrices.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "gram_matrix.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <cuvs/distance/detail/kernels/rbf_fin_op.cuh>
#include <cuvs/distance/distance.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/sparse/linalg/norm.cuh>
#include <raft/util/cuda_utils.cuh>
namespace cuvs::distance::kernels::detail {
/** Epiloge function for polynomial kernel without padding.
* Calculates output = (gain*in + offset)^exponent
* @param inout device vector in column major format, size [len]
* @param len array length
* @param exponent
* @param gain
* @param offset
*/
template <typename math_t, typename exp_t>
RAFT_KERNEL polynomial_kernel_nopad(
math_t* inout, size_t len, exp_t exponent, math_t gain, math_t offset)
{
for (size_t tid = threadIdx.x + blockIdx.x * blockDim.x; tid < len;
tid += blockDim.x * gridDim.x) {
inout[tid] = pow(gain * inout[tid] + offset, exponent);
}
}
/** Epiloge function for polynomial kernel with padding.
* Calculates output = (gain*input + offset)^exponent
* @param inout device vector in column major format, size [ld * cols]
* @param ld leading dimension of the inout buffer
* @param rows number of rows (rows <= ld)
* @param cols number of columns
* @param exponent
* @param gain
* @param offset
*/
template <typename math_t, typename exp_t>
RAFT_KERNEL polynomial_kernel(
math_t* inout, int ld, int rows, int cols, exp_t exponent, math_t gain, math_t offset)
{
for (size_t tidy = threadIdx.y + blockIdx.y * blockDim.y; tidy < cols;
tidy += blockDim.y * gridDim.y)
for (size_t tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < rows;
tidx += blockDim.x * gridDim.x) {
inout[tidx + tidy * ld] = pow(gain * inout[tidx + tidy * ld] + offset, exponent);
}
}
/** Epiloge function for tanh kernel without padding.
* Calculates output = tanh(gain*input + offset)
* @param inout device vector, size [len]
* @param len length of the input vector
* @param gain
* @param offset
*/
template <typename math_t>
RAFT_KERNEL tanh_kernel_nopad(math_t* inout, size_t len, math_t gain, math_t offset)
{
for (size_t tid = threadIdx.x + blockIdx.x * blockDim.x; tid < len;
tid += blockDim.x * gridDim.x) {
inout[tid] = tanh(gain * inout[tid] + offset);
}
}
/** Epiloge function for tanh kernel without padding.
* Calculates output = tanh(gain*input + offset)
* @param inout device vector in column major format, size [ld * cols]
* @param ld leading dimension of the inout buffer
* @param rows number of rows (rows <= ld)
* @param cols number of columns
* @param gain
* @param offset
*/
template <typename math_t>
RAFT_KERNEL tanh_kernel(math_t* inout, int ld, int rows, int cols, math_t gain, math_t offset)
{
for (size_t tidy = threadIdx.y + blockIdx.y * blockDim.y; tidy < cols;
tidy += blockDim.y * gridDim.y)
for (size_t tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < rows;
tidx += blockDim.x * gridDim.x) {
inout[tidx + tidy * ld] = tanh(gain * inout[tidx + tidy * ld] + offset);
}
}
/** Epiloge function for rbf kernel using expansion.
*
* Calculates output_ij = exp(-gain * (norm_x_i + norm_y_j - 2*input_ij));
*
* Intended usage
* - input is the product of two matrices X and Y input_ij = sum_k X_ik * Y_jk
* - norm_x_i = l2_norm(x_i), where x_i is the i-th row of matrix X
* - norm_y_j = l2_norm(y_j), where y_j is the j-th row of matrix Y
*
* @param inout device vector in column major format, size [ld * cols]
* @param ld leading dimension of the inout buffer
* @param rows number of rows (rows <= ld)
* @param cols number of columns
* @param norm_x l2-norm of X's rows
* @param norm_y l2-norm of Y's rows
* @param gain
*/
template <typename math_t>
RAFT_KERNEL rbf_kernel_expanded(
math_t* inout, int ld, int rows, int cols, math_t* norm_x, math_t* norm_y, math_t gain)
{
for (size_t tidy = threadIdx.y + blockIdx.y * blockDim.y; tidy < cols;
tidy += blockDim.y * gridDim.y) {
math_t norm_y_val = norm_y[tidy];
for (size_t tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < rows;
tidx += blockDim.x * gridDim.x) {
inout[tidx + tidy * ld] =
exp(-1.0 * gain * (norm_x[tidx] + norm_y_val - inout[tidx + tidy * ld] * 2));
}
}
}
namespace {
std::tuple<dim3, dim3> generateLaunchConfig2dElementwiseOp(int n1, int n2)
{
dim3 block_shape = dim3(32, 4);
const int num_blocks_x = raft::ceildiv(n1, 32);
const int num_blocks_y = std::min(raft::ceildiv(n2, 32), (1 << 16) - 1);
dim3 grid_shape = dim3(num_blocks_x, num_blocks_y);
return std::make_tuple(grid_shape, block_shape);
}
} // namespace
/**
* Create a kernel matrix using polynomial kernel function.
*/
template <typename math_t, typename exp_t>
class PolynomialKernel : public GramMatrixBase<math_t> {
exp_t exponent;
math_t gain;
math_t offset;
void applyKernel(
math_t* inout, int ld, int rows, int cols, bool is_row_major, cudaStream_t stream)
{
const int n_minor = is_row_major ? cols : rows;
if (ld == n_minor) {
polynomial_kernel_nopad<<<raft::ceildiv<size_t>((size_t)rows * cols, 128), 128, 0, stream>>>(
inout, rows * cols, exponent, gain, offset);
} else {
int n1 = is_row_major ? cols : rows;
int n2 = is_row_major ? rows : cols;
auto [grid_shape, block_shape] = generateLaunchConfig2dElementwiseOp(n1, n2);
polynomial_kernel<<<grid_shape, block_shape, 0, stream>>>(
inout, ld, n1, n2, exponent, gain, offset);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
public:
/**
* Constructs a polynomial kernel object.
* It evaluates the kernel matrix using the following formula:
* K_ij = (gain*<x1_i, x2_k> + offset)^exponent
*
* @tparam math_t floating point type
* @tparam exp_t type of exponent
* @param exponent
* @param gain
* @param offset
*/
PolynomialKernel(exp_t exponent, math_t gain, math_t offset)
: GramMatrixBase<math_t>(), exponent(exponent), gain(gain), offset(offset)
{
}
[[deprecated]] PolynomialKernel(exp_t exponent, math_t gain, math_t offset, cublasHandle_t handle)
: GramMatrixBase<math_t>(handle), exponent(exponent), gain(gain), offset(offset)
{
}
/** Evaluate kernel matrix using polynomial kernel.
*
* output[i,k] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using polynomial kernel.
*
* output[i,k] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using polynomial kernel.
*
* output[i,k] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate the Gram matrix using the legacy interface.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1 (usually it is n1)
* @param ld2 leading dimension of x2 (usually it is n2)
* @param ld_out leading dimension of out (usually it is n1)
*/
[[deprecated]] void evaluate(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1,
int ld2,
int ld_out)
{
ASSERT(GramMatrixBase<math_t>::legacy_interface,
"Legacy interface can only be used with legacy ctor.");
GramMatrixBase<math_t>::linear(
x1, n1, n_cols, x2, n2, out, is_row_major, stream, ld1, ld2, ld_out);
applyKernel(out, ld_out, n1, n2, is_row_major, stream);
}
};
/**
* Create a kernel matrix using tanh kernel function.
*/
template <typename math_t>
class TanhKernel : public GramMatrixBase<math_t> {
math_t gain, offset;
void applyKernel(
math_t* inout, int ld, int rows, int cols, bool is_row_major, cudaStream_t stream)
{
const int n_minor = is_row_major ? cols : rows;
if (ld == n_minor) {
tanh_kernel_nopad<<<raft::ceildiv<size_t>((size_t)rows * cols, 128), 128, 0, stream>>>(
inout, rows * cols, gain, offset);
} else {
int n1 = is_row_major ? cols : rows;
int n2 = is_row_major ? rows : cols;
auto [grid_shape, block_shape] = generateLaunchConfig2dElementwiseOp(n1, n2);
tanh_kernel<<<grid_shape, block_shape, 0, stream>>>(inout, ld, n1, n2, gain, offset);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
public:
/**
* Constructs a tanh kernel object.
* It evaluates the kernel matrix using the following formula:
* K_ij = tanh(gain*<x1_i, x2_k> + offset)
*
* @tparam math_t floating point type
* @param gain
* @param offset
*/
TanhKernel(math_t gain, math_t offset) : GramMatrixBase<math_t>(), gain(gain), offset(offset) {}
[[deprecated]] TanhKernel(math_t gain, math_t offset, cublasHandle_t handle)
: GramMatrixBase<math_t>(handle), gain(gain), offset(offset)
{
}
/** Evaluate kernel matrix using tanh kernel.
*
* output_[i + k*n1] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using tanh kernel.
*
* output_[i + k*n1] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using tanh kernel.
*
* output_[i + k*n1] = (gain*<x1_i, x2_k> + offset)^exponent,
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and < , > denotes dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate the Gram matrix using the legacy interface.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1 (usually it is n1)
* @param ld2 leading dimension of x2 (usually it is n2)
* @param ld_out leading dimension of out (usually it is n1)
*/
[[deprecated]] void evaluate(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1,
int ld2,
int ld_out)
{
ASSERT(GramMatrixBase<math_t>::legacy_interface,
"Legacy interface can only be used with legacy ctor.");
GramMatrixBase<math_t>::linear(
x1, n1, n_cols, x2, n2, out, is_row_major, stream, ld1, ld2, ld_out);
applyKernel(out, ld_out, n1, n2, is_row_major, stream);
}
};
/**
* Create a kernel matrix using RBF kernel function.
*/
template <typename math_t>
class RBFKernel : public GramMatrixBase<math_t> {
math_t gain;
void applyKernel(math_t* inout,
int ld,
int rows,
int cols,
math_t* norm_x1,
math_t* norm_x2,
bool is_row_major,
cudaStream_t stream)
{
int n1 = is_row_major ? cols : rows;
int n2 = is_row_major ? rows : cols;
math_t* norm_n1 = is_row_major ? norm_x2 : norm_x1;
math_t* norm_n2 = is_row_major ? norm_x1 : norm_x2;
auto [grid_shape, block_shape] = generateLaunchConfig2dElementwiseOp(n1, n2);
rbf_kernel_expanded<<<grid_shape, block_shape, 0, stream>>>(
inout, ld, n1, n2, norm_n1, norm_n2, gain);
}
public:
/**
* Constructs a RBF kernel object.
* It evaluates the kernel matrix using the following formula:
* K_ij = exp(-gain*|x1_i- x2_k|^2)
*
* @tparam math_t floating point type
* @param gain
*/
RBFKernel(math_t gain) : GramMatrixBase<math_t>(), gain(gain) {}
[[deprecated]] RBFKernel(math_t gain, cublasHandle_t handle)
: GramMatrixBase<math_t>(handle), gain(gain)
{
}
void matrixRowNormL2(raft::resources const& handle,
dense_input_matrix_view_t<math_t> matrix,
math_t* target)
{
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(matrix);
int minor = is_row_major ? matrix.extent(1) : matrix.extent(0);
int ld = is_row_major ? matrix.stride(0) : matrix.stride(1);
ASSERT(ld == minor, "RBF Kernel lazy rowNorm compute does not support ld parameter");
raft::linalg::rowNorm(target,
matrix.data_handle(),
matrix.extent(1),
matrix.extent(0),
raft::linalg::NormType::L2Norm,
is_row_major,
resource::get_cuda_stream(handle));
}
void matrixRowNormL2(raft::resources const& handle,
csr_input_matrix_view_t<math_t> matrix,
math_t* target)
{
auto matrix_structure = matrix.structure_view();
raft::sparse::linalg::rowNormCsr(handle,
matrix_structure.get_indptr().data(),
matrix.get_elements().data(),
matrix_structure.get_nnz(),
matrix_structure.get_n_rows(),
target,
raft::linalg::NormType::L2Norm);
}
/** Evaluate kernel matrix using RBF kernel.
*
* output_[i + k*n1] = exp(-gain*|x1_i - x2_k|^2),
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and | | euclidean distance.
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void evaluate(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
// lazy compute norms if not given
rmm::device_uvector<math_t> tmp_norm_x1(0, stream);
rmm::device_uvector<math_t> tmp_norm_x2(0, stream);
if (norm_x1 == nullptr) {
tmp_norm_x1.reserve(x1.extent(0), stream);
norm_x1 = tmp_norm_x1.data();
matrixRowNormL2(handle, x1, norm_x1);
}
if (norm_x2 == nullptr) {
tmp_norm_x2.reserve(x2.extent(0), stream);
norm_x2 = tmp_norm_x2.data();
matrixRowNormL2(handle, x2, norm_x2);
}
// compute L2expanded
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
norm_x1,
norm_x2,
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using RBF kernel.
*
* output_[i + k*n1] = exp(-gain*|x1_i - x2_k|^2),
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and | | euclidean distance.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
// lazy compute norms if not given
rmm::device_uvector<math_t> tmp_norm_x1(0, stream);
rmm::device_uvector<math_t> tmp_norm_x2(0, stream);
if (norm_x1 == nullptr) {
tmp_norm_x1.reserve(x1.structure_view().get_n_rows(), stream);
norm_x1 = tmp_norm_x1.data();
matrixRowNormL2(handle, x1, norm_x1);
}
if (norm_x2 == nullptr) {
tmp_norm_x2.reserve(x2.extent(0), stream);
norm_x2 = tmp_norm_x2.data();
matrixRowNormL2(handle, x2, norm_x2);
}
// compute L2expanded
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
norm_x1,
norm_x2,
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate kernel matrix using RBF kernel.
*
* output_[i + k*n1] = exp(-gain*|x1_i - x2_k|^2),
* where x1_i is the i-th vector from the x1 set, and x2_k is k-th vector
* in the x2 set, and | | euclidean distance.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
// lazy compute norms if not given
rmm::device_uvector<math_t> tmp_norm_x1(0, stream);
rmm::device_uvector<math_t> tmp_norm_x2(0, stream);
if (norm_x1 == nullptr) {
tmp_norm_x1.reserve(x1.structure_view().get_n_rows(), stream);
norm_x1 = tmp_norm_x1.data();
matrixRowNormL2(handle, x1, norm_x1);
}
if (norm_x2 == nullptr) {
tmp_norm_x2.reserve(x2.structure_view().get_n_rows(), stream);
norm_x2 = tmp_norm_x2.data();
matrixRowNormL2(handle, x2, norm_x2);
}
// compute L2expanded
bool is_row_major = GramMatrixBase<math_t>::get_is_row_major(out);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
GramMatrixBase<math_t>::linear(handle, x1, x2, out);
applyKernel(out.data_handle(),
ld_out,
out.extent(0),
out.extent(1),
norm_x1,
norm_x2,
is_row_major,
resource::get_cuda_stream(handle));
}
/** Evaluate the Gram matrix using the legacy interface.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1 (usually it is n1)
* @param ld2 leading dimension of x2 (usually it is n2)
* @param ld_out leading dimension of out (usually it is n1)
*/
[[deprecated]] void evaluate(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1,
int ld2,
int ld_out)
{
ASSERT(GramMatrixBase<math_t>::legacy_interface,
"Legacy interface can only be used with legacy ctor.");
int minor1 = is_row_major ? n_cols : n1;
int minor2 = is_row_major ? n_cols : n2;
int minor_out = is_row_major ? n2 : n1;
ASSERT(ld1 == minor1, "RBF Kernel distance does not support ld1 parameter");
ASSERT(ld2 == minor2, "RBF Kernel distance does not support ld2 parameter");
ASSERT(ld_out == minor_out, "RBF Kernel distance does not support ld_out parameter");
math_t gain = this->gain;
using index_t = int64_t;
rbf_fin_op fin_op{gain};
raft::resources handle;
resource::set_cuda_stream(handle, stream);
cuvs::distance::distance<cuvs::distance::DistanceType::L2Unexpanded,
math_t,
math_t,
math_t,
decltype(fin_op),
index_t>(handle,
const_cast<math_t*>(x1),
const_cast<math_t*>(x2),
out,
n1,
n2,
n_cols,
NULL,
0,
fin_op,
is_row_major);
}
};
}; // end namespace cuvs::distance::kernels::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/kernels/rbf_fin_op.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/*
* This file defines rbf_fin_op, which is used in GramMatrixBase.
*
* This struct has been moved to a separate file, so that it is cheap to include
* in distance/distance-ext.cuh, where an instance of cuvs::distance::distance
* with the rbf_fin_op is instantiated.
*
*/
#include <raft/core/math.hpp> // raft::exp
#include <raft/util/cuda_dev_essentials.cuh> // HD
namespace cuvs::distance::kernels::detail {
/** @brief: Final op for Gram matrix with RBF kernel.
*
* Calculates output = e^(-gain * in)
*
*/
template <typename OutT>
struct rbf_fin_op {
OutT gain;
explicit HD rbf_fin_op(OutT gain_) noexcept : gain(gain_) {}
template <typename... Args>
HDI OutT operator()(OutT d_val, Args... unused_args)
{
return raft::exp(-gain * d_val);
}
}; // struct rbf_fin_op
} // namespace cuvs::distance::kernels::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/kernels/kernel_factory.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "gram_matrix.cuh"
#include "kernel_matrices.cuh"
#include <cuvs/distance/distance_types.hpp>
#include <raft/util/cudart_utils.hpp>
namespace cuvs::distance::kernels::detail {
template <typename math_t>
class KernelFactory {
public:
static GramMatrixBase<math_t>* create(KernelParams params)
{
GramMatrixBase<math_t>* res;
// KernelParams is not templated, we convert the parameters to math_t here:
math_t coef0 = params.coef0;
math_t gamma = params.gamma;
switch (params.kernel) {
case LINEAR: res = new GramMatrixBase<math_t>(); break;
case POLYNOMIAL: res = new PolynomialKernel<math_t, int>(params.degree, gamma, coef0); break;
case TANH: res = new TanhKernel<math_t>(gamma, coef0); break;
case RBF: res = new RBFKernel<math_t>(gamma); break;
default: throw raft::exception("Kernel not implemented");
}
return res;
}
[[deprecated]] static GramMatrixBase<math_t>* create(KernelParams params, cublasHandle_t handle)
{
GramMatrixBase<math_t>* res;
// KernelParams is not templated, we convert the parameters to math_t here:
math_t coef0 = params.coef0;
math_t gamma = params.gamma;
switch (params.kernel) {
case LINEAR: res = new GramMatrixBase<math_t>(handle); break;
case POLYNOMIAL:
res = new PolynomialKernel<math_t, int>(params.degree, gamma, coef0, handle);
break;
case TANH: res = new TanhKernel<math_t>(gamma, coef0, handle); break;
case RBF: res = new RBFKernel<math_t>(gamma, handle); break;
default: throw raft::exception("Kernel not implemented");
}
return res;
}
};
}; // end namespace cuvs::distance::kernels::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/distance/detail/kernels/gram_matrix.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/distance.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/device_csr_matrix.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
// #include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/sparse/distance/distance.cuh>
#include <raft/sparse/linalg/spmm.cuh>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/gemm.cuh>
namespace cuvs::distance::kernels::detail {
template <typename math_t>
using dense_input_matrix_view_t = raft::device_matrix_view<const math_t, int, layout_stride>;
template <typename math_t>
using dense_output_matrix_view_t = raft::device_matrix_view<math_t, int, layout_stride>;
template <typename math_t>
using csr_input_matrix_view_t = raft::device_csr_matrix_view<const math_t, int, int, int>;
/**
* Base class for general Gram matrices
* A Gram matrix is the Hermitian matrix of inner probucts G_ik = <x_i, x_k>
* Here, the inner product is evaluated for all elements from vectors sets X1,
* and X2.
*
* To be more precise, on exit the output buffer will store:
* - if is_row_major == true: out[j+k*n1] = <x1_j, x2_k>,
* - if is_row_major == false: out[j*n2 + k] = <x1_j, x2_k>,
* where x1_j is the j-th vector from the x1 set and x2_k is the k-th vector
* from the x2 set.
*/
template <typename math_t>
class GramMatrixBase {
protected:
cublasHandle_t cublas_handle;
bool legacy_interface;
public:
GramMatrixBase() : legacy_interface(false){};
[[deprecated]] GramMatrixBase(cublasHandle_t cublas_handle)
: cublas_handle(cublas_handle), legacy_interface(true){};
virtual ~GramMatrixBase(){};
/** Convenience function to evaluate the Gram matrix for two vector sets.
* Vector sets are provided in Matrix format
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void operator()(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1 = nullptr,
math_t* norm_x2 = nullptr)
{
evaluate(handle, x1, x2, out, norm_x1, norm_x2);
}
/** Convenience function to evaluate the Gram matrix for two vector sets.
* Vector sets are provided in Matrix format
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void operator()(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1 = nullptr,
math_t* norm_x2 = nullptr)
{
evaluate(handle, x1, x2, out, norm_x1, norm_x2);
}
/** Convenience function to evaluate the Gram matrix for two vector sets.
* Vector sets are provided in Matrix format
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 optional L2-norm of x1's rows for computation within RBF.
* @param norm_x2 optional L2-norm of x2's rows for computation within RBF.
*/
void operator()(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1 = nullptr,
math_t* norm_x2 = nullptr)
{
evaluate(handle, x1, x2, out, norm_x1, norm_x2);
}
// unfortunately, 'evaluate' cannot be templatized as it needs to be virtual
/** Evaluate the Gram matrix for two vector sets using simple dot product.
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
virtual void evaluate(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
linear(handle, x1, x2, out);
}
/** Evaluate the Gram matrix for two vector sets using simple dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
virtual void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
linear(handle, x1, x2, out);
}
/** Evaluate the Gram matrix for two vector sets using simple dot product.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
* @param norm_x1 unused.
* @param norm_x2 unused.
*/
virtual void evaluate(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out,
math_t* norm_x1,
math_t* norm_x2)
{
linear(handle, x1, x2, out);
}
/** Evaluate the Gram matrix for two vector sets using simple dot product.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1 (usually it is n1)
* @param ld2 leading dimension of x2 (usually it is n2)
* @param ld_out leading dimension of out (usually it is n1)
*/
[[deprecated]] virtual void evaluate(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1,
int ld2,
int ld_out)
{
linear(x1, n1, n_cols, x2, n2, out, is_row_major, stream, ld1, ld2, ld_out);
}
/** Convenience function to evaluate the Gram matrix for two vector sets.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1
* @param ld2 leading dimension of x2
* @param ld_out leading dimension of out
*/
[[deprecated]] void operator()(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1 = 0,
int ld2 = 0,
int ld_out = 0)
{
ASSERT(legacy_interface, "Legacy interface can only be used with legacy ctor.");
if (ld1 <= 0) { ld1 = is_row_major ? n_cols : n1; }
if (ld2 <= 0) { ld2 = is_row_major ? n_cols : n2; }
if (ld_out <= 0) { ld_out = is_row_major ? n2 : n1; }
evaluate(x1, n1, n_cols, x2, n2, out, is_row_major, stream, ld1, ld2, ld_out);
}
protected:
/** Calculates the Gram matrix using simple dot product between vector sets.
*
* out = x1 * x2
*
* Can be used as a building block for more complex kernel functions.
*
* @param [in] x1 device array of vectors, size [n1*n_cols]
* @param [in] n1 number vectors in x1
* @param [in] n_cols number of columns (features) in x1 and x2
* @param [in] x2 device array of vectors, size [n2*n_cols]
* @param [in] n2 number vectors in x2
* @param [out] out device buffer to store the Gram matrix, size [n1*n2]
* @param [in] is_row_major whether the input and output matrices are in row
* major format
* @param [in] stream cuda stream
* @param ld1 leading dimension of x1
* @param ld2 leading dimension of x2
* @param ld_out leading dimension of out
*/
[[deprecated]] void linear(const math_t* x1,
int n1,
int n_cols,
const math_t* x2,
int n2,
math_t* out,
bool is_row_major,
cudaStream_t stream,
int ld1,
int ld2,
int ld_out)
{
math_t alpha = 1.0;
math_t beta = 0.0;
if (is_row_major) {
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
n2,
n1,
n_cols,
&alpha,
x2,
ld2,
x1,
ld1,
&beta,
out,
ld_out,
stream));
} else {
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
n1,
n2,
n_cols,
&alpha,
x1,
ld1,
x2,
ld2,
&beta,
out,
ld_out,
stream));
}
}
protected:
bool get_is_row_major(dense_output_matrix_view_t<math_t> matrix)
{
return (matrix.stride(1) == 1);
}
bool get_is_row_major(dense_input_matrix_view_t<math_t> matrix)
{
return (matrix.stride(1) == 1);
}
bool get_is_col_major(dense_output_matrix_view_t<math_t> matrix)
{
return (matrix.stride(0) == 1);
}
bool get_is_col_major(dense_input_matrix_view_t<math_t> matrix)
{
return (matrix.stride(0) == 1);
}
/** Calculates the Gram matrix using simple dot product between vector sets.
*
* out = x1 * x2
*
* Can be used as a building block for more complex kernel functions.
*
* @param [in] handle raft handle
* @param [in] x1 dense device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
*/
void linear(raft::resources const& handle,
dense_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out)
{
// check is_row_major consistency
bool is_row_major = get_is_row_major(x1) && get_is_row_major(x2) && get_is_row_major(out);
bool is_col_major = get_is_col_major(x1) && get_is_col_major(x2) && get_is_col_major(out);
ASSERT(is_row_major || is_col_major,
"GramMatrix leading dimensions for x1, x2 and out do not match");
// check dimensions
int n1 = out.extent(0);
int n2 = out.extent(1);
int n_cols = x1.extent(1);
ASSERT(x1.extent(0) == n1, "GramMatrix input matrix dimensions for x1 and out do not match");
ASSERT(x2.extent(0) == n2, "GramMatrix input matrix dimensions for x2 and out do not match");
ASSERT(x2.extent(1) == n_cols, "GramMatrix input matrix dimensions for x1 and x2 do not match");
// extract major stride
int ld1 = is_row_major ? x1.stride(0) : x1.stride(1);
int ld2 = is_row_major ? x2.stride(0) : x2.stride(1);
int ld_out = is_row_major ? out.stride(0) : out.stride(1);
math_t alpha = 1.0;
math_t beta = 0.0;
if (is_row_major) {
// #TODO: Use mdspan-based API when stride-capable
// https://github.com/rapidsai/raft/issues/875
raft::linalg::gemm(handle,
true,
false,
n2,
n1,
n_cols,
&alpha,
x2.data_handle(),
ld2,
x1.data_handle(),
ld1,
&beta,
out.data_handle(),
ld_out,
resource::get_cuda_stream(handle));
} else {
// #TODO: Use mdspan-based API when stride-capable
// https://github.com/rapidsai/raft/issues/875
raft::linalg::gemm(handle,
false,
true,
n1,
n2,
n_cols,
&alpha,
x1.data_handle(),
ld1,
x2.data_handle(),
ld2,
&beta,
out.data_handle(),
ld_out,
resource::get_cuda_stream(handle));
}
}
/** Calculates the Gram matrix using simple dot product between vector sets.
*
* out = x1 * x2
*
* Can be used as a building block for more complex kernel functions.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 dense device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
*/
void linear(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
dense_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out)
{
// check is_row_major consistency
bool is_row_major = get_is_row_major(x2) && get_is_row_major(out);
bool is_col_major = get_is_col_major(x2) && get_is_col_major(out);
ASSERT(is_row_major || is_col_major,
"GramMatrix leading dimensions for x2 and out do not match");
// check dimensions
auto x1_structure = x1.structure_view();
ASSERT(x1_structure.get_n_rows() == out.extent(0),
"GramMatrix input matrix dimensions for x1 and out do not match");
ASSERT(x2.extent(0) == out.extent(1),
"GramMatrix input matrix dimensions for x2 and out do not match");
ASSERT(x2.extent(1) == x1_structure.get_n_cols(),
"GramMatrix input matrix dimensions for x1 and x2 do not match");
math_t alpha = 1.0;
math_t beta = 0.0;
raft::sparse::linalg::spmm(handle, false, true, &alpha, x1, x2, &beta, out);
}
/** Calculates the Gram matrix using simple dot product between vector sets.
*
* out = x1 * x2
*
* Can be used as a building block for more complex kernel functions.
*
* @param [in] handle raft handle
* @param [in] x1 csr device matrix view, size [n1*n_cols]
* @param [in] x2 csr device matrix view, size [n2*n_cols]
* @param [out] out dense device matrix view for the Gram matrix, size [n1*n2]
*/
void linear(raft::resources const& handle,
csr_input_matrix_view_t<math_t> x1,
csr_input_matrix_view_t<math_t> x2,
dense_output_matrix_view_t<math_t> out)
{
// check layout consistency (w.r.t. strides a matrix might be both row & col major)
bool is_row_major_nopad = get_is_row_major(out) && out.stride(0) == out.extent(1);
bool is_col_major_nopad = get_is_col_major(out) && out.stride(1) == out.extent(0);
ASSERT(is_row_major_nopad || is_col_major_nopad,
"Sparse linear Kernel distance does not support ld_out parameter");
// switch a,b based on is_row_major
if (is_col_major_nopad) {
auto out_row_major = raft::make_device_matrix_view<math_t, int, raft::row_major>(
out.data_handle(), out.extent(1), out.extent(0));
raft::sparse::distance::pairwise_distance(
handle, x2, x1, out_row_major, cuvs::distance::DistanceType::InnerProduct, 0.0);
} else {
auto out_row_major = raft::make_device_matrix_view<math_t, int, raft::row_major>(
out.data_handle(), out.extent(0), out.extent(1));
raft::sparse::distance::pairwise_distance(
handle, x1, x2, out_row_major, cuvs::distance::DistanceType::InnerProduct, 0.0);
}
}
};
}; // end namespace cuvs::distance::kernels::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ball_cover-ext.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // uint32_t
#include <cuvs/distance/distance_types.hpp> // cuvs::distance::DistanceType
#include <cuvs/neighbors/ball_cover_types.hpp> // BallCoverIndex
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace cuvs::neighbors::ball_cover {
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void build_index(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index) RAFT_EXPLICIT;
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void all_knn_query(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
int_t k,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0) RAFT_EXPLICIT;
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void all_knn_query(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
raft::device_matrix_view<idx_t, matrix_idx_t, raft::row_major> inds,
raft::device_matrix_view<value_t, matrix_idx_t, raft::row_major> dists,
int_t k,
bool perform_post_filtering = true,
float weight = 1.0) RAFT_EXPLICIT;
template <typename idx_t, typename value_t, typename int_t>
void knn_query(raft::resources const& handle,
const BallCoverIndex<idx_t, value_t, int_t>& index,
int_t k,
const value_t* query,
int_t n_query_pts,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0) RAFT_EXPLICIT;
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void knn_query(raft::resources const& handle,
const BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
raft::device_matrix_view<const value_t, matrix_idx_t, raft::row_major> query,
raft::device_matrix_view<idx_t, matrix_idx_t, raft::row_major> inds,
raft::device_matrix_view<value_t, matrix_idx_t, raft::row_major> dists,
int_t k,
bool perform_post_filtering = true,
float weight = 1.0) RAFT_EXPLICIT;
} // namespace cuvs::neighbors::ball_cover
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_neighbors_ball_cover(idx_t, value_t, int_t, matrix_idx_t) \
extern template void \
cuvs::neighbors::ball_cover::build_index<idx_t, value_t, int_t, matrix_idx_t>( \
raft::resources const& handle, \
cuvs::neighbors::ball_cover::BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index); \
\
extern template void \
cuvs::neighbors::ball_cover::all_knn_query<idx_t, value_t, int_t, matrix_idx_t>( \
raft::resources const& handle, \
cuvs::neighbors::ball_cover::BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index, \
int_t k, \
idx_t* inds, \
value_t* dists, \
bool perform_post_filtering, \
float weight); \
\
extern template void \
cuvs::neighbors::ball_cover::all_knn_query<idx_t, value_t, int_t, matrix_idx_t>( \
raft::resources const& handle, \
cuvs::neighbors::ball_cover::BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index, \
raft::device_matrix_view<idx_t, matrix_idx_t, raft::row_major> inds, \
raft::device_matrix_view<value_t, matrix_idx_t, raft::row_major> dists, \
int_t k, \
bool perform_post_filtering, \
float weight); \
\
extern template void cuvs::neighbors::ball_cover::knn_query<idx_t, value_t, int_t>( \
raft::resources const& handle, \
const cuvs::neighbors::ball_cover::BallCoverIndex<idx_t, value_t, int_t>& index, \
int_t k, \
const value_t* query, \
int_t n_query_pts, \
idx_t* inds, \
value_t* dists, \
bool perform_post_filtering, \
float weight); \
\
extern template void \
cuvs::neighbors::ball_cover::knn_query<idx_t, value_t, int_t, matrix_idx_t>( \
raft::resources const& handle, \
const cuvs::neighbors::ball_cover::BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index, \
raft::device_matrix_view<const value_t, matrix_idx_t, raft::row_major> query, \
raft::device_matrix_view<idx_t, matrix_idx_t, raft::row_major> inds, \
raft::device_matrix_view<value_t, matrix_idx_t, raft::row_major> dists, \
int_t k, \
bool perform_post_filtering, \
float weight);
instantiate_raft_neighbors_ball_cover(int64_t, float, uint32_t, uint32_t);
#undef instantiate_raft_neighbors_ball_cover
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/refine-inl.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/neighbors/detail/refine.cuh>
#include <cuvs/spatial/knn/detail/ann_utils.cuh>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resources.hpp>
namespace cuvs::neighbors {
/**
* @defgroup ann_refine Approximate Nearest Neighbors Refinement
* @{
*/
/**
* @brief Refine nearest neighbor search.
*
* Refinement is an operation that follows an approximate NN search. The approximate search has
* already selected n_candidates neighbor candidates for each query. We narrow it down to k
* neighbors. For each query, we calculate the exact distance between the query and its
* n_candidates neighbor candidate, and select the k nearest ones.
*
* The k nearest neighbors and distances are returned.
*
* Example usage
* @code{.cpp}
* using namespace cuvs::neighbors;
* // use default index parameters
* ivf_pq::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = ivf_pq::build(handle, index_params, dataset, N, D);
* // use default search parameters
* ivf_pq::search_params search_params;
* // search m = 4 * k nearest neighbours for each of the N queries
* ivf_pq::search(handle, search_params, index, queries, N, 4 * k, neighbor_candidates,
* out_dists_tmp);
* // refine it to the k nearest one
* refine(handle, dataset, queries, neighbor_candidates, out_indices, out_dists,
* index.metric());
* @endcode
*
*
* @param[in] handle the raft handle
* @param[in] dataset device matrix that stores the dataset [n_rows, dims]
* @param[in] queries device matrix of the queries [n_queris, dims]
* @param[in] neighbor_candidates indices of candidate vectors [n_queries, n_candidates], where
* n_candidates >= k
* @param[out] indices device matrix that stores the refined indices [n_queries, k]
* @param[out] distances device matrix that stores the refined distances [n_queries, k]
* @param[in] metric distance metric to use. Euclidean (L2) is used by default
*/
template <typename idx_t, typename data_t, typename distance_t, typename matrix_idx>
void refine(raft::resources const& handle,
raft::device_matrix_view<const data_t, matrix_idx, raft::row_major> dataset,
raft::device_matrix_view<const data_t, matrix_idx, raft::row_major> queries,
raft::device_matrix_view<const idx_t, matrix_idx, raft::row_major> neighbor_candidates,
raft::device_matrix_view<idx_t, matrix_idx, raft::row_major> indices,
raft::device_matrix_view<distance_t, matrix_idx, raft::row_major> distances,
distance::DistanceType metric = distance::DistanceType::L2Unexpanded)
{
detail::refine_device(handle, dataset, queries, neighbor_candidates, indices, distances, metric);
}
/** Same as above, but all input and out data is in host memory.
* @param[in] handle the raft handle
* @param[in] dataset host matrix that stores the dataset [n_rows, dims]
* @param[in] queries host matrix of the queries [n_queris, dims]
* @param[in] neighbor_candidates host matrix with indices of candidate vectors [n_queries,
* n_candidates], where n_candidates >= k
* @param[out] indices host matrix that stores the refined indices [n_queries, k]
* @param[out] distances host matrix that stores the refined distances [n_queries, k]
* @param[in] metric distance metric to use. Euclidean (L2) is used by default
*/
template <typename idx_t, typename data_t, typename distance_t, typename matrix_idx>
void refine(raft::resources const& handle,
raft::host_matrix_view<const data_t, matrix_idx, raft::row_major> dataset,
raft::host_matrix_view<const data_t, matrix_idx, raft::row_major> queries,
raft::host_matrix_view<const idx_t, matrix_idx, raft::row_major> neighbor_candidates,
raft::host_matrix_view<idx_t, matrix_idx, raft::row_major> indices,
raft::host_matrix_view<distance_t, matrix_idx, raft::row_major> distances,
distance::DistanceType metric = distance::DistanceType::L2Unexpanded)
{
detail::refine_host(dataset, queries, neighbor_candidates, indices, distances, metric);
}
/** @} */ // end group ann_refine
} // namespace cuvs::neighbors
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ivf_pq-inl.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/neighbors/detail/ivf_pq_build.cuh>
#include <cuvs/neighbors/detail/ivf_pq_search.cuh>
#include <cuvs/neighbors/ivf_pq_serialize.cuh>
#include <cuvs/neighbors/ivf_pq_types.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/device_memory_resource.hpp>
#include <raft/core/resources.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <memory> // shared_ptr
namespace cuvs::neighbors::ivf_pq {
/**
* @defgroup ivf_pq IVF PQ Algorithm
* @{
*/
/**
* @brief Build the index from the dataset for efficient search.
*
* NB: Currently, the following distance metrics are supported:
* - L2Expanded
* - L2Unexpanded
* - InnerProduct
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] params configure the index building
* @param[in] dataset a device matrix view to a row-major matrix [n_rows, dim]
*
* @return the constructed ivf-pq index
*/
template <typename T, typename IdxT = uint32_t>
index<IdxT> build(raft::resources const& handle,
const index_params& params,
raft::device_matrix_view<const T, IdxT, raft::row_major> dataset)
{
IdxT n_rows = dataset.extent(0);
IdxT dim = dataset.extent(1);
return detail::build(handle, params, dataset.data_handle(), n_rows, dim);
}
/**
* @brief Extend the index with the new data.
* *
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] new_vectors a device matrix view to a row-major matrix [n_rows, idx.dim()]
* @param[in] new_indices a device vector view to a vector of indices [n_rows].
* If the original index is empty (`idx.size() == 0`), you can pass `std::nullopt`
* here to imply a continuous range `[0...n_rows)`.
* @param[inout] idx
*/
template <typename T, typename IdxT>
index<IdxT> extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, raft::row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices,
const index<IdxT>& idx)
{
ASSERT(new_vectors.extent(1) == idx.dim(),
"new_vectors should have the same dimension as the index");
IdxT n_rows = new_vectors.extent(0);
if (new_indices.has_value()) {
ASSERT(n_rows == new_indices.value().extent(0),
"new_vectors and new_indices have different number of rows");
}
return detail::extend(handle,
idx,
new_vectors.data_handle(),
new_indices.has_value() ? new_indices.value().data_handle() : nullptr,
n_rows);
}
/**
* @brief Extend the index with the new data.
* *
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] new_vectors a device matrix view to a row-major matrix [n_rows, idx.dim()]
* @param[in] new_indices a device vector view to a vector of indices [n_rows].
* If the original index is empty (`idx.size() == 0`), you can pass `std::nullopt`
* here to imply a continuous range `[0...n_rows)`.
* @param[inout] idx
*/
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, raft::row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices,
index<IdxT>* idx)
{
ASSERT(new_vectors.extent(1) == idx->dim(),
"new_vectors should have the same dimension as the index");
IdxT n_rows = new_vectors.extent(0);
if (new_indices.has_value()) {
ASSERT(n_rows == new_indices.value().extent(0),
"new_vectors and new_indices have different number of rows");
}
*idx = detail::extend(handle,
*idx,
new_vectors.data_handle(),
new_indices.has_value() ? new_indices.value().data_handle() : nullptr,
n_rows);
}
/**
* @brief Search ANN using the constructed index with the given filter.
*
* See the [ivf_pq::build](#ivf_pq::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`.
* The exact size of the temporary buffer depends on multiple factors and is an implementation
* detail. However, you can safely specify a small initial size for the memory pool, so that only a
* few allocations happen to grow it during the first invocations of the `search`.
*
* @tparam T data element type
* @tparam IdxT type of the indices
* @tparam IvfSampleFilterT Device filter function, with the signature
* `(uint32_t query_ix, uint32 cluster_ix, uint32_t sample_ix) -> bool` or
* `(uint32_t query_ix, uint32 sample_ix) -> bool`
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] idx ivf-pq constructed index
* @param[in] queries a device matrix view to a row-major matrix [n_queries, index->dim()]
* @param[out] neighbors a device matrix view to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device matrix view to the distances to the selected neighbors [n_queries,
* k]
* @param[in] sample_filter a device filter function that greenlights samples for a given query.
*/
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
raft::device_matrix_view<const T, uint32_t, raft::row_major> queries,
raft::device_matrix_view<IdxT, uint32_t, raft::row_major> neighbors,
raft::device_matrix_view<float, uint32_t, raft::row_major> distances,
IvfSampleFilterT sample_filter = IvfSampleFilterT{})
{
RAFT_EXPECTS(
queries.extent(0) == neighbors.extent(0) && queries.extent(0) == distances.extent(0),
"Number of rows in output neighbors and distances matrices must equal the number of queries.");
RAFT_EXPECTS(neighbors.extent(1) == distances.extent(1),
"Number of columns in output neighbors and distances matrices must equal k");
RAFT_EXPECTS(queries.extent(1) == idx.dim(),
"Number of query dimensions should equal number of dimensions in the index.");
std::uint32_t k = neighbors.extent(1);
detail::search(handle,
params,
idx,
queries.data_handle(),
queries.extent(0),
k,
neighbors.data_handle(),
distances.data_handle(),
sample_filter);
}
/**
* @brief Search ANN using the constructed index.
*
* See the [ivf_pq::build](#ivf_pq::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`.
* The exact size of the temporary buffer depends on multiple factors and is an implementation
* detail. However, you can safely specify a small initial size for the memory pool, so that only a
* few allocations happen to grow it during the first invocations of the `search`.
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] idx ivf-pq constructed index
* @param[in] queries a device matrix view to a row-major matrix [n_queries, index->dim()]
* @param[out] neighbors a device matrix view to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device matrix view to the distances to the selected neighbors [n_queries,
* k]
*/
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
raft::device_matrix_view<const T, uint32_t, raft::row_major> queries,
raft::device_matrix_view<IdxT, uint32_t, raft::row_major> neighbors,
raft::device_matrix_view<float, uint32_t, raft::row_major> distances)
{
search_with_filtering(handle,
params,
idx,
queries,
neighbors,
distances,
cuvs::neighbors::filtering::none_ivf_sample_filter{});
}
/** @} */ // end group ivf_pq
/**
* @brief Build the index from the dataset for efficient search.
*
* NB: Currently, the following distance metrics are supported:
* - L2Expanded
* - L2Unexpanded
* - InnerProduct
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors;
* // use default index parameters
* ivf_pq::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = ivf_pq::build(handle, index_params, dataset, N, D);
* // use default search parameters
* ivf_pq::search_params search_params;
* // search K nearest neighbours for each of the N queries
* ivf_pq::search(handle, search_params, index, queries, N, K, out_inds, out_dists);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[in] params configure the index building
* @param[in] dataset a device/host pointer to a row-major matrix [n_rows, dim]
* @param[in] n_rows the number of samples
* @param[in] dim the dimensionality of the data
*
* @return the constructed ivf-pq index
*/
template <typename T, typename IdxT = uint32_t>
auto build(raft::resources const& handle,
const index_params& params,
const T* dataset,
IdxT n_rows,
uint32_t dim) -> index<IdxT>
{
return detail::build(handle, params, dataset, n_rows, dim);
}
/**
* @brief Build a new index containing the data of the original plus new extra vectors.
*
* Implementation note:
* The new data is clustered according to existing kmeans clusters, the cluster
* centers are unchanged.
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors;
* ivf_pq::index_params index_params;
* index_params.add_data_on_build = false; // don't populate index on build
* index_params.kmeans_trainset_fraction = 1.0; // use whole dataset for kmeans training
* // train the index from a [N, D] dataset
* auto index_empty = ivf_pq::build(handle, index_params, dataset, N, D);
* // fill the index with the data
* auto index = ivf_pq::extend(handle, index_empty, dataset, nullptr, N);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[inout] idx original index
* @param[in] new_vectors a device/host pointer to a row-major matrix [n_rows, idx.dim()]
* @param[in] new_indices a device/host pointer to a vector of indices [n_rows].
* If the original index is empty (`idx.size() == 0`), you can pass `nullptr`
* here to imply a continuous range `[0...n_rows)`.
* @param[in] n_rows the number of samples
*
* @return the constructed extended ivf-pq index
*/
template <typename T, typename IdxT>
auto extend(raft::resources const& handle,
const index<IdxT>& idx,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows) -> index<IdxT>
{
return detail::extend(handle, idx, new_vectors, new_indices, n_rows);
}
/**
* @brief Extend the index with the new data.
* *
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] handle
* @param[inout] idx
* @param[in] new_vectors a device/host pointer to a row-major matrix [n_rows, idx.dim()]
* @param[in] new_indices a device/host pointer to a vector of indices [n_rows].
* If the original index is empty (`idx.size() == 0`), you can pass `nullptr`
* here to imply a continuous range `[0...n_rows)`.
* @param[in] n_rows the number of samples
*/
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
index<IdxT>* idx,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows)
{
detail::extend(handle, idx, new_vectors, new_indices, n_rows);
}
/**
* @brief Search ANN using the constructed index with the given filter.
*
* See the [ivf_pq::build](#ivf_pq::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`:
* @code{.cpp}
* ...
* // use default search parameters
* ivf_pq::search_params search_params;
* filtering::none_ivf_sample_filter filter;
* // Use the same allocator across multiple searches to reduce the number of
* // cuda memory allocations
* ivf_pq::search_with_filtering(
* handle, search_params, index, queries1, N1, K, out_inds1, out_dists1, filter);
* ivf_pq::search_with_filtering(
* handle, search_params, index, queries2, N2, K, out_inds2, out_dists2, filter);
* ivf_pq::search_with_filtering(
* handle, search_params, index, queries3, N3, K, out_inds3, out_dists3, nfilter);
* ...
* @endcode
* The exact size of the temporary buffer depends on multiple factors and is an implementation
* detail. However, you can safely specify a small initial size for the memory pool, so that only a
* few allocations happen to grow it during the first invocations of the `search`.
*
* @tparam T data element type
* @tparam IdxT type of the indices
* @tparam IvfSampleFilterT Device filter function, with the signature
* `(uint32_t query_ix, uint32 cluster_ix, uint32_t sample_ix) -> bool` or
* `(uint32_t query_ix, uint32 sample_ix) -> bool`
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] idx ivf-pq constructed index
* @param[in] queries a device pointer to a row-major matrix [n_queries, index->dim()]
* @param[in] n_queries the batch size
* @param[in] k the number of neighbors to find for each query.
* @param[out] neighbors a device pointer to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device pointer to the distances to the selected neighbors [n_queries, k]
* @param[in] sample_filter a device filter function that greenlights samples for a given query
*/
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
IvfSampleFilterT sample_filter = IvfSampleFilterT{})
{
detail::search(handle, params, idx, queries, n_queries, k, neighbors, distances, sample_filter);
}
/**
* This function is deprecated and will be removed in a future.
* Please drop the `mr` argument and use `raft::resource::set_workspace_resource` instead.
*/
template <typename T, typename IdxT, typename IvfSampleFilterT>
[[deprecated(
"Drop the `mr` argument and use `raft::resource::set_workspace_resource` instead")]] void
search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr,
IvfSampleFilterT sample_filter = IvfSampleFilterT{})
{
if (mr != nullptr) {
// Shallow copy of the resource with the automatic lifespan:
// change the workspace resource temporarily
raft::resources res_local(handle);
resource::set_workspace_resource(
res_local, std::shared_ptr<rmm::mr::device_memory_resource>{mr, void_op{}});
return search_with_filtering(
res_local, params, idx, queries, n_queries, k, neighbors, distances, sample_filter);
} else {
return search_with_filtering(
handle, params, idx, queries, n_queries, k, neighbors, distances, sample_filter);
}
}
/**
* @brief Search ANN using the constructed index.
*
* See the [ivf_pq::build](#ivf_pq::build) documentation for a usage example.
*
* Note, this function requires a temporary buffer to store intermediate results between cuda kernel
* calls, which may lead to undesirable allocations and slowdown. To alleviate the problem, you can
* pass a pool memory resource or a large enough pre-allocated memory resource to reduce or
* eliminate entirely allocations happening within `search`:
* @code{.cpp}
* ...
* // Create a pooling memory resource with a pre-defined initial size.
* rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> mr(
* rmm::mr::get_current_device_resource(), 1024 * 1024);
* // use default search parameters
* ivf_pq::search_params search_params;
* // Use the same allocator across multiple searches to reduce the number of
* // cuda memory allocations
* ivf_pq::search(handle, search_params, index, queries1, N1, K, out_inds1, out_dists1, &mr);
* ivf_pq::search(handle, search_params, index, queries2, N2, K, out_inds2, out_dists2, &mr);
* ivf_pq::search(handle, search_params, index, queries3, N3, K, out_inds3, out_dists3, &mr);
* ...
* @endcode
* The exact size of the temporary buffer depends on multiple factors and is an implementation
* detail. However, you can safely specify a small initial size for the memory pool, so that only a
* few allocations happen to grow it during the first invocations of the `search`.
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle
* @param[in] params configure the search
* @param[in] idx ivf-pq constructed index
* @param[in] queries a device pointer to a row-major matrix [n_queries, index->dim()]
* @param[in] n_queries the batch size
* @param[in] k the number of neighbors to find for each query.
* @param[out] neighbors a device pointer to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device pointer to the distances to the selected neighbors [n_queries, k]
*/
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances)
{
return search_with_filtering(handle,
params,
idx,
queries,
n_queries,
k,
neighbors,
distances,
cuvs::neighbors::filtering::none_ivf_sample_filter{});
}
/**
* This function is deprecated and will be removed in a future.
* Please drop the `mr` argument and use `raft::resource::set_workspace_resource` instead.
*/
template <typename T, typename IdxT>
[[deprecated(
"Drop the `mr` argument and use `raft::resource::set_workspace_resource` instead")]] void
search(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr)
{
return search_with_filtering(handle,
params,
idx,
queries,
n_queries,
k,
neighbors,
distances,
mr,
cuvs::neighbors::filtering::none_ivf_sample_filter{});
}
} // namespace cuvs::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ball_cover-inl.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __BALL_COVER_H
#define __BALL_COVER_H
#pragma once
#include <cstdint>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/neighbors/ball_cover_types.hpp>
#include <cuvs/spatial/knn/detail/ball_cover.cuh>
#include <cuvs/spatial/knn/detail/ball_cover/common.cuh>
#include <thrust/transform.h>
namespace cuvs::neighbors::ball_cover {
/**
* @defgroup random_ball_cover Random Ball Cover algorithm
* @{
*/
/**
* Builds and populates a previously unbuilt BallCoverIndex
*
* Usage example:
* @code{.cpp}
*
* #include <raft/core/resources.hpp>
* #include <cuvs/neighbors/ball_cover.cuh>
* #include <cuvs/distance/distance_types.hpp>
* using namespace cuvs::neighbors;
*
* raft::resources handle;
* ...
* auto metric = cuvs::distance::DistanceType::L2Expanded;
* BallCoverIndex index(handle, X, metric);
*
* ball_cover::build_index(handle, index);
* @endcode
*
* @tparam idx_t knn index type
* @tparam value_t knn value type
* @tparam int_t integral type for knn params
* @tparam matrix_idx_t matrix indexing type
* @param[in] handle library resource management handle
* @param[inout] index an empty (and not previous built) instance of BallCoverIndex
*/
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void build_index(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
if (index.metric == cuvs::distance::DistanceType::Haversine) {
cuvs::spatial::knn::detail::rbc_build_index(
handle, index, spatial::knn::detail::HaversineFunc<value_t, int_t>());
} else if (index.metric == cuvs::distance::DistanceType::L2SqrtExpanded ||
index.metric == cuvs::distance::DistanceType::L2SqrtUnexpanded) {
cuvs::spatial::knn::detail::rbc_build_index(
handle, index, spatial::knn::detail::EuclideanFunc<value_t, int_t>());
} else {
RAFT_FAIL("Metric not support");
}
index.set_index_trained();
}
/** @} */ // end group random_ball_cover
/**
* Performs a faster exact knn in metric spaces using the triangle
* inequality with a number of landmark points to reduce the
* number of distance computations from O(n^2) to O(sqrt(n)). This
* performs an all neighbors knn, which can reuse memory when
* the index and query are the same array. This function will
* build the index and assumes rbc_build_index() has not already
* been called.
* @tparam idx_t knn index type
* @tparam value_t knn distance type
* @tparam int_t type for integers, such as number of rows/cols
* @param[in] handle raft handle for resource management
* @param[inout] index ball cover index which has not yet been built
* @param[in] k number of nearest neighbors to find
* @param[in] perform_post_filtering if this is false, only the closest k landmarks
* are considered (which will return approximate
* results).
* @param[out] inds output knn indices
* @param[out] dists output knn distances
* @param[in] weight a weight for overlap between the closest landmark and
* the radius of other landmarks when pruning distances.
* Setting this value below 1 can effectively turn off
* computing distances against many other balls, enabling
* approximate nearest neighbors. Recall can be adjusted
* based on how many relevant balls are ignored. Note that
* many datasets can still have great recall even by only
* looking in the closest landmark.
*/
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void all_knn_query(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
int_t k,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
if (index.metric == cuvs::distance::DistanceType::Haversine) {
cuvs::spatial::knn::detail::rbc_all_knn_query(
handle,
index,
k,
inds,
dists,
spatial::knn::detail::HaversineFunc<value_t, int_t>(),
perform_post_filtering,
weight);
} else if (index.metric == cuvs::distance::DistanceType::L2SqrtExpanded ||
index.metric == cuvs::distance::DistanceType::L2SqrtUnexpanded) {
cuvs::spatial::knn::detail::rbc_all_knn_query(
handle,
index,
k,
inds,
dists,
spatial::knn::detail::EuclideanFunc<value_t, int_t>(),
perform_post_filtering,
weight);
} else {
RAFT_FAIL("Metric not supported");
}
index.set_index_trained();
}
/**
* @ingroup random_ball_cover
* @{
*/
/**
* Performs a faster exact knn in metric spaces using the triangle
* inequality with a number of landmark points to reduce the
* number of distance computations from O(n^2) to O(sqrt(n)). This
* performs an all neighbors knn, which can reuse memory when
* the index and query are the same array. This function will
* build the index and assumes rbc_build_index() has not already
* been called.
*
* Usage example:
* @code{.cpp}
*
* #include <raft/core/resources.hpp>
* #include <cuvs/neighbors/ball_cover.cuh>
* #include <cuvs/distance/distance_types.hpp>
* using namespace cuvs::neighbors;
*
* raft::resources handle;
* ...
* auto metric = cuvs::distance::DistanceType::L2Expanded;
*
* // Construct a ball cover index
* BallCoverIndex index(handle, X, metric);
*
* // Perform all neighbors knn query
* ball_cover::all_knn_query(handle, index, inds, dists, k);
* @endcode
*
* @tparam idx_t knn index type
* @tparam value_t knn distance type
* @tparam int_t type for integers, such as number of rows/cols
* @tparam matrix_idx_t matrix indexing type
*
* @param[in] handle raft handle for resource management
* @param[in] index ball cover index which has not yet been built
* @param[out] inds output knn indices
* @param[out] dists output knn distances
* @param[in] k number of nearest neighbors to find
* @param[in] perform_post_filtering if this is false, only the closest k landmarks
* are considered (which will return approximate
* results).
* @param[in] weight a weight for overlap between the closest landmark and
* the radius of other landmarks when pruning distances.
* Setting this value below 1 can effectively turn off
* computing distances against many other balls, enabling
* approximate nearest neighbors. Recall can be adjusted
* based on how many relevant balls are ignored. Note that
* many datasets can still have great recall even by only
* looking in the closest landmark.
*/
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void all_knn_query(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
raft::device_matrix_view<idx_t, matrix_idx_t, raft::row_major> inds,
raft::device_matrix_view<value_t, matrix_idx_t, raft::row_major> dists,
int_t k,
bool perform_post_filtering = true,
float weight = 1.0)
{
RAFT_EXPECTS(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
RAFT_EXPECTS(k <= index.m,
"k must be less than or equal to the number of data points in the index");
RAFT_EXPECTS(inds.extent(1) == dists.extent(1) && dists.extent(1) == static_cast<matrix_idx_t>(k),
"Number of columns in output indices and distances matrices must be equal to k");
RAFT_EXPECTS(inds.extent(0) == dists.extent(0) && dists.extent(0) == index.get_X().extent(0),
"Number of rows in output indices and distances matrices must equal number of rows "
"in index matrix.");
all_knn_query(
handle, index, k, inds.data_handle(), dists.data_handle(), perform_post_filtering, weight);
}
/** @} */
/**
* Performs a faster exact knn in metric spaces using the triangle
* inequality with a number of landmark points to reduce the
* number of distance computations from O(n^2) to O(sqrt(n)). This
* function does not build the index and assumes rbc_build_index() has
* already been called. Use this function when the index and
* query arrays are different, otherwise use rbc_all_knn_query().
* @tparam idx_t index type
* @tparam value_t distances type
* @tparam int_t integer type for size info
* @param[in] handle raft handle for resource management
* @param[inout] index ball cover index which has not yet been built
* @param[in] k number of nearest neighbors to find
* @param[in] query the
* @param[in] perform_post_filtering if this is false, only the closest k landmarks
* are considered (which will return approximate
* results).
* @param[out] inds output knn indices
* @param[out] dists output knn distances
* @param[in] weight a weight for overlap between the closest landmark and
* the radius of other landmarks when pruning distances.
* Setting this value below 1 can effectively turn off
* computing distances against many other balls, enabling
* approximate nearest neighbors. Recall can be adjusted
* based on how many relevant balls are ignored. Note that
* many datasets can still have great recall even by only
* looking in the closest landmark.
* @param[in] n_query_pts number of query points
*/
template <typename idx_t, typename value_t, typename int_t>
void knn_query(raft::resources const& handle,
const BallCoverIndex<idx_t, value_t, int_t>& index,
int_t k,
const value_t* query,
int_t n_query_pts,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
if (index.metric == cuvs::distance::DistanceType::Haversine) {
cuvs::spatial::knn::detail::rbc_knn_query(handle,
index,
k,
query,
n_query_pts,
inds,
dists,
spatial::knn::detail::HaversineFunc<value_t, int_t>(),
perform_post_filtering,
weight);
} else if (index.metric == cuvs::distance::DistanceType::L2SqrtExpanded ||
index.metric == cuvs::distance::DistanceType::L2SqrtUnexpanded) {
cuvs::spatial::knn::detail::rbc_knn_query(handle,
index,
k,
query,
n_query_pts,
inds,
dists,
spatial::knn::detail::EuclideanFunc<value_t, int_t>(),
perform_post_filtering,
weight);
} else {
RAFT_FAIL("Metric not supported");
}
}
/**
* @ingroup random_ball_cover
* @{
*/
/**
* Performs a faster exact knn in metric spaces using the triangle
* inequality with a number of landmark points to reduce the
* number of distance computations from O(n^2) to O(sqrt(n)). This
* function does not build the index and assumes rbc_build_index() has
* already been called. Use this function when the index and
* query arrays are different, otherwise use rbc_all_knn_query().
*
* Usage example:
* @code{.cpp}
*
* #include <raft/core/resources.hpp>
* #include <cuvs/neighbors/ball_cover.cuh>
* #include <cuvs/distance/distance_types.hpp>
* using namespace cuvs::neighbors;
*
* raft::resources handle;
* ...
* auto metric = cuvs::distance::DistanceType::L2Expanded;
*
* // Build a ball cover index
* BallCoverIndex index(handle, X, metric);
* ball_cover::build_index(handle, index);
*
* // Perform all neighbors knn query
* ball_cover::knn_query(handle, index, inds, dists, k);
* @endcode
*
* @tparam idx_t index type
* @tparam value_t distances type
* @tparam int_t integer type for size info
* @tparam matrix_idx_t
* @param[in] handle raft handle for resource management
* @param[in] index ball cover index which has not yet been built
* @param[in] query device matrix containing query data points
* @param[out] inds output knn indices
* @param[out] dists output knn distances
* @param[in] k number of nearest neighbors to find
* @param[in] perform_post_filtering if this is false, only the closest k landmarks
* are considered (which will return approximate
* results).
* @param[in] weight a weight for overlap between the closest landmark and
* the radius of other landmarks when pruning distances.
* Setting this value below 1 can effectively turn off
* computing distances against many other balls, enabling
* approximate nearest neighbors. Recall can be adjusted
* based on how many relevant balls are ignored. Note that
* many datasets can still have great recall even by only
* looking in the closest landmark.
*/
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void knn_query(raft::resources const& handle,
const BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
raft::device_matrix_view<const value_t, matrix_idx_t, raft::row_major> query,
raft::device_matrix_view<idx_t, matrix_idx_t, raft::row_major> inds,
raft::device_matrix_view<value_t, matrix_idx_t, raft::row_major> dists,
int_t k,
bool perform_post_filtering = true,
float weight = 1.0)
{
RAFT_EXPECTS(k <= index.m,
"k must be less than or equal to the number of data points in the index");
RAFT_EXPECTS(inds.extent(1) == dists.extent(1) && dists.extent(1) == static_cast<idx_t>(k),
"Number of columns in output indices and distances matrices must be equal to k");
RAFT_EXPECTS(inds.extent(0) == dists.extent(0) && dists.extent(0) == query.extent(0),
"Number of rows in output indices and distances matrices must equal number of rows "
"in search matrix.");
RAFT_EXPECTS(query.extent(1) == index.get_X().extent(1),
"Number of columns in query and index matrices must match.");
knn_query(handle,
index,
k,
query.data_handle(),
query.extent(0),
inds.data_handle(),
dists.data_handle(),
perform_post_filtering,
weight);
}
/** @} */
// TODO: implement functions for:
// 4. rbc_eps_neigh() - given a populated index, perform query against different query array
// 5. rbc_all_eps_neigh() - populate a BallCoverIndex and query against training data
} // namespace cuvs::neighbors::ball_cover
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/sample_filter_types.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef>
#include <cstdint>
#include <raft/core/detail/macros.hpp>
namespace cuvs::neighbors::filtering {
/* A filter that filters nothing. This is the default behavior. */
struct none_ivf_sample_filter {
inline _RAFT_HOST_DEVICE bool operator()(
// query index
const uint32_t query_ix,
// the current inverted list index
const uint32_t cluster_ix,
// the index of the current sample inside the current inverted list
const uint32_t sample_ix) const
{
return true;
}
};
/* A filter that filters nothing. This is the default behavior. */
struct none_cagra_sample_filter {
inline _RAFT_HOST_DEVICE bool operator()(
// query index
const uint32_t query_ix,
// the index of the current sample
const uint32_t sample_ix) const
{
return true;
}
};
template <typename filter_t, typename = void>
struct takes_three_args : std::false_type {};
template <typename filter_t>
struct takes_three_args<
filter_t,
std::void_t<decltype(std::declval<filter_t>()(uint32_t{}, uint32_t{}, uint32_t{}))>>
: std::true_type {};
/**
* @brief Filter used to convert the cluster index and sample index
* of an IVF search into a sample index. This can be used as an
* intermediate filter.
*
* @tparam index_t Indexing type
* @tparam filter_t
*/
template <typename index_t, typename filter_t>
struct ivf_to_sample_filter {
const index_t* const* inds_ptrs_;
const filter_t next_filter_;
ivf_to_sample_filter(const index_t* const* inds_ptrs, const filter_t next_filter)
: inds_ptrs_{inds_ptrs}, next_filter_{next_filter}
{
}
/** If the original filter takes three arguments, then don't modify the arguments.
* If the original filter takes two arguments, then we are using `inds_ptr_` to obtain the sample
* index.
*/
inline _RAFT_HOST_DEVICE bool operator()(
// query index
const uint32_t query_ix,
// the current inverted list index
const uint32_t cluster_ix,
// the index of the current sample inside the current inverted list
const uint32_t sample_ix) const
{
if constexpr (takes_three_args<filter_t>::value) {
return next_filter_(query_ix, cluster_ix, sample_ix);
} else {
return next_filter_(query_ix, inds_ptrs_[cluster_ix][sample_ix]);
}
}
};
/**
* If the filtering depends on the index of a sample, then the following
* filter template can be used:
*
* template <typename IdxT>
* struct index_ivf_sample_filter {
* using index_type = IdxT;
*
* const index_type* const* inds_ptr = nullptr;
*
* index_ivf_sample_filter() {}
* index_ivf_sample_filter(const index_type* const* _inds_ptr)
* : inds_ptr{_inds_ptr} {}
* index_ivf_sample_filter(const index_ivf_sample_filter&) = default;
* index_ivf_sample_filter(index_ivf_sample_filter&&) = default;
* index_ivf_sample_filter& operator=(const index_ivf_sample_filter&) = default;
* index_ivf_sample_filter& operator=(index_ivf_sample_filter&&) = default;
*
* inline _RAFT_HOST_DEVICE bool operator()(
* const uint32_t query_ix,
* const uint32_t cluster_ix,
* const uint32_t sample_ix) const {
* index_type database_idx = inds_ptr[cluster_ix][sample_ix];
*
* // return true or false, depending on the database_idx
* return true;
* }
* };
*
* Initialize it as:
* using filter_type = index_ivf_sample_filter<idx_t>;
* filter_type filter(raft_ivfpq_index.inds_ptrs().data_handle());
*
* Use it as:
* cuvs::neighbors::ivf_pq::search_with_filtering<data_t, idx_t, filter_type>(
* ...regular parameters here...,
* filter
* );
*
* Another example would be the following filter that greenlights samples according
* to a contiguous bit mask vector.
*
* template <typename IdxT>
* struct bitmask_ivf_sample_filter {
* using index_type = IdxT;
*
* const index_type* const* inds_ptr = nullptr;
* const uint64_t* const bit_mask_ptr = nullptr;
* const int64_t bit_mask_stride_64 = 0;
*
* bitmask_ivf_sample_filter() {}
* bitmask_ivf_sample_filter(
* const index_type* const* _inds_ptr,
* const uint64_t* const _bit_mask_ptr,
* const int64_t _bit_mask_stride_64)
* : inds_ptr{_inds_ptr},
* bit_mask_ptr{_bit_mask_ptr},
* bit_mask_stride_64{_bit_mask_stride_64} {}
* bitmask_ivf_sample_filter(const bitmask_ivf_sample_filter&) = default;
* bitmask_ivf_sample_filter(bitmask_ivf_sample_filter&&) = default;
* bitmask_ivf_sample_filter& operator=(const bitmask_ivf_sample_filter&) = default;
* bitmask_ivf_sample_filter& operator=(bitmask_ivf_sample_filter&&) = default;
*
* inline _RAFT_HOST_DEVICE bool operator()(
* const uint32_t query_ix,
* const uint32_t cluster_ix,
* const uint32_t sample_ix) const {
* const index_type database_idx = inds_ptr[cluster_ix][sample_ix];
* const uint64_t bit_mask_element =
* bit_mask_ptr[query_ix * bit_mask_stride_64 + database_idx / 64];
* const uint64_t masked_bool =
* bit_mask_element & (1ULL << (uint64_t)(database_idx % 64));
* const bool is_bit_set = (masked_bool != 0);
*
* return is_bit_set;
* }
* };
*/
} // namespace cuvs::neighbors::filtering
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/nn_descent.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/nn_descent.cuh"
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
namespace cuvs::neighbors::experimental::nn_descent {
/**
* @defgroup nn-descent CUDA gradient descent nearest neighbor
* @{
*/
/**
* @brief Build nn-descent Index with dataset in device memory
*
* The following distance metrics are supported:
* - L2
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors::experimental;
* // use default index parameters
* nn_descent::index_params index_params;
* // create and fill the index from a [N, D] raft::device_matrix_view dataset
* auto index = cagra::build(res, index_params, dataset);
* // index.graph() provides a raft::host_matrix_view of an
* // all-neighbors knn graph of dimensions [N, k] of the input
* // dataset
* @endcode
*
* @tparam T data-type of the input dataset
* @tparam IdxT data-type for the output index
* @param[in] res raft::resources is an object mangaging resources
* @param[in] params an instance of nn_descent::index_params that are parameters
* to run the nn-descent algorithm
* @param[in] dataset raft::device_matrix_view input dataset expected to be located
* in device memory
* @return index<IdxT> index containing all-neighbors knn graph in host memory
*/
template <typename T, typename IdxT = uint32_t>
index<IdxT> build(raft::resources const& res,
index_params const& params,
raft::device_matrix_view<const T, int64_t, raft::row_major> dataset)
{
return detail::build<T, IdxT>(res, params, dataset);
}
/**
* @brief Build nn-descent Index with dataset in device memory
*
* The following distance metrics are supported:
* - L2
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors::experimental;
* // use default index parameters
* nn_descent::index_params index_params;
* // create and fill the index from a [N, D] raft::device_matrix_view dataset
* auto knn_graph = raft::make_host_matrix<uint32_t, int64_t>(N, D);
* auto index = nn_descent::index{res, knn_graph.view()};
* cagra::build(res, index_params, dataset, index);
* // index.graph() provides a raft::host_matrix_view of an
* // all-neighbors knn graph of dimensions [N, k] of the input
* // dataset
* @endcode
*
* @tparam T data-type of the input dataset
* @tparam IdxT data-type for the output index
* @param res raft::resources is an object mangaging resources
* @param[in] params an instance of nn_descent::index_params that are parameters
* to run the nn-descent algorithm
* @param[in] dataset raft::device_matrix_view input dataset expected to be located
* in device memory
* @param[out] idx cuvs::neighbors::experimental::nn_descentindex containing all-neighbors knn graph
* in host memory
*/
template <typename T, typename IdxT = uint32_t>
void build(raft::resources const& res,
index_params const& params,
raft::device_matrix_view<const T, int64_t, raft::row_major> dataset,
index<IdxT>& idx)
{
detail::build<T, IdxT>(res, params, dataset, idx);
}
/**
* @brief Build nn-descent Index with dataset in host memory
*
* The following distance metrics are supported:
* - L2
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors::experimental;
* // use default index parameters
* nn_descent::index_params index_params;
* // create and fill the index from a [N, D] raft::host_matrix_view dataset
* auto index = cagra::build(res, index_params, dataset);
* // index.graph() provides a raft::host_matrix_view of an
* // all-neighbors knn graph of dimensions [N, k] of the input
* // dataset
* @endcode
*
* @tparam T data-type of the input dataset
* @tparam IdxT data-type for the output index
* @param res raft::resources is an object mangaging resources
* @param[in] params an instance of nn_descent::index_params that are parameters
* to run the nn-descent algorithm
* @param[in] dataset raft::host_matrix_view input dataset expected to be located
* in host memory
* @return index<IdxT> index containing all-neighbors knn graph in host memory
*/
template <typename T, typename IdxT = uint32_t>
index<IdxT> build(raft::resources const& res,
index_params const& params,
raft::host_matrix_view<const T, int64_t, raft::row_major> dataset)
{
return detail::build<T, IdxT>(res, params, dataset);
}
/**
* @brief Build nn-descent Index with dataset in host memory
*
* The following distance metrics are supported:
* - L2
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors::experimental;
* // use default index parameters
* nn_descent::index_params index_params;
* // create and fill the index from a [N, D] raft::host_matrix_view dataset
* auto knn_graph = raft::make_host_matrix<uint32_t, int64_t>(N, D);
* auto index = nn_descent::index{res, knn_graph.view()};
* cagra::build(res, index_params, dataset, index);
* // index.graph() provides a raft::host_matrix_view of an
* // all-neighbors knn graph of dimensions [N, k] of the input
* // dataset
* @endcode
*
* @tparam T data-type of the input dataset
* @tparam IdxT data-type for the output index
* @param[in] res raft::resources is an object mangaging resources
* @param[in] params an instance of nn_descent::index_params that are parameters
* to run the nn-descent algorithm
* @param[in] dataset raft::host_matrix_view input dataset expected to be located
* in host memory
* @param[out] idx cuvs::neighbors::experimental::nn_descentindex containing all-neighbors knn graph
* in host memory
*/
template <typename T, typename IdxT = uint32_t>
void build(raft::resources const& res,
index_params const& params,
raft::host_matrix_view<const T, int64_t, raft::row_major> dataset,
index<IdxT>& idx)
{
detail::build<T, IdxT>(res, params, dataset, idx);
}
/** @} */ // end group nn-descent
} // namespace cuvs::neighbors::experimental::nn_descent
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ivf_pq_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/neighbors/ann_types.hpp>
#include <cuvs/neighbors/ivf_list_types.hpp>
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/integer_utils.hpp>
#include <thrust/fill.h>
#include <memory>
#include <type_traits>
namespace cuvs::neighbors::ivf_pq {
/**
* @addtogroup ivf_pq
* @{
*/
/** A type for specifying how PQ codebooks are created. */
enum class codebook_gen { // NOLINT
PER_SUBSPACE = 0, // NOLINT
PER_CLUSTER = 1, // NOLINT
};
struct index_params : ann::index_params {
/**
* The number of inverted lists (clusters)
*
* Hint: the number of vectors per cluster (`n_rows/n_lists`) should be approximately 1,000 to
* 10,000.
*/
uint32_t n_lists = 1024;
/** The number of iterations searching for kmeans centers (index building). */
uint32_t kmeans_n_iters = 20;
/** The fraction of data to use during iterative kmeans building. */
double kmeans_trainset_fraction = 0.5;
/**
* The bit length of the vector element after compression by PQ.
*
* Possible values: [4, 5, 6, 7, 8].
*
* Hint: the smaller the 'pq_bits', the smaller the index size and the better the search
* performance, but the lower the recall.
*/
uint32_t pq_bits = 8;
/**
* The dimensionality of the vector after compression by PQ. When zero, an optimal value is
* selected using a heuristic.
*
* NB: `pq_dim * pq_bits` must be a multiple of 8.
*
* Hint: a smaller 'pq_dim' results in a smaller index size and better search performance, but
* lower recall. If 'pq_bits' is 8, 'pq_dim' can be set to any number, but multiple of 8 are
* desirable for good performance. If 'pq_bits' is not 8, 'pq_dim' should be a multiple of 8.
* For good performance, it is desirable that 'pq_dim' is a multiple of 32. Ideally, 'pq_dim'
* should be also a divisor of the dataset dim.
*/
uint32_t pq_dim = 0;
/** How PQ codebooks are created. */
codebook_gen codebook_kind = codebook_gen::PER_SUBSPACE;
/**
* Apply a random rotation matrix on the input data and queries even if `dim % pq_dim == 0`.
*
* Note: if `dim` is not multiple of `pq_dim`, a random rotation is always applied to the input
* data and queries to transform the working space from `dim` to `rot_dim`, which may be slightly
* larger than the original space and and is a multiple of `pq_dim` (`rot_dim % pq_dim == 0`).
* However, this transform is not necessary when `dim` is multiple of `pq_dim`
* (`dim == rot_dim`, hence no need in adding "extra" data columns / features).
*
* By default, if `dim == rot_dim`, the rotation transform is initialized with the identity
* matrix. When `force_random_rotation == true`, a random orthogonal transform matrix is generated
* regardless of the values of `dim` and `pq_dim`.
*/
bool force_random_rotation = false;
/**
* By default, the algorithm allocates more space than necessary for individual clusters
* (`list_data`). This allows to amortize the cost of memory allocation and reduce the number of
* data copies during repeated calls to `extend` (extending the database).
*
* The alternative is the conservative allocation behavior; when enabled, the algorithm always
* allocates the minimum amount of memory required to store the given number of records. Set this
* flag to `true` if you prefer to use as little GPU memory for the database as possible.
*/
bool conservative_memory_allocation = false;
};
struct search_params : ann::search_params {
/** The number of clusters to search. */
uint32_t n_probes = 20;
/**
* Data type of look up table to be created dynamically at search time.
*
* Possible values: [CUDA_R_32F, CUDA_R_16F, CUDA_R_8U]
*
* The use of low-precision types reduces the amount of shared memory required at search time, so
* fast shared memory kernels can be used even for datasets with large dimansionality. Note that
* the recall is slightly degraded when low-precision type is selected.
*/
cudaDataType_t lut_dtype = CUDA_R_32F;
/**
* Storage data type for distance/similarity computed at search time.
*
* Possible values: [CUDA_R_16F, CUDA_R_32F]
*
* If the performance limiter at search time is device memory access, selecting FP16 will improve
* performance slightly.
*/
cudaDataType_t internal_distance_dtype = CUDA_R_32F;
/**
* Preferred fraction of SM's unified memory / L1 cache to be used as shared memory.
*
* Possible values: [0.0 - 1.0] as a fraction of the `sharedMemPerMultiprocessor`.
*
* One wants to increase the carveout to make sure a good GPU occupancy for the main search
* kernel, but not to keep it too high to leave some memory to be used as L1 cache. Note, this
* value is interpreted only as a hint. Moreover, a GPU usually allows only a fixed set of cache
* configurations, so the provided value is rounded up to the nearest configuration. Refer to the
* NVIDIA tuning guide for the target GPU architecture.
*
* Note, this is a low-level tuning parameter that can have drastic negative effects on the search
* performance if tweaked incorrectly.
*/
double preferred_shmem_carveout = 1.0;
};
static_assert(std::is_aggregate_v<index_params>);
static_assert(std::is_aggregate_v<search_params>);
/** Size of the interleaved group. */
constexpr static uint32_t kIndexGroupSize = 32;
/** Stride of the interleaved group for vectorized loads. */
constexpr static uint32_t kIndexGroupVecLen = 16;
/**
* Default value returned by `search` when the `n_probes` is too small and top-k is too large.
* One may encounter it if the combined size of probed clusters is smaller than the requested
* number of results per query.
*/
template <typename IdxT>
constexpr static IdxT kOutOfBoundsRecord = std::numeric_limits<IdxT>::max();
template <typename SizeT, typename IdxT>
struct list_spec {
using value_type = uint8_t;
using index_type = IdxT;
/** PQ-encoded data stored in the interleaved format:
*
* [ raft::ceildiv(list_size, kIndexGroupSize)
* , raft::ceildiv(pq_dim, (kIndexGroupVecLen * 8u) / pq_bits)
* , kIndexGroupSize
* , kIndexGroupVecLen
* ].
*/
using list_extents =
extents<SizeT, raft::dynamic_extent, raft::dynamic_extent, kIndexGroupSize, kIndexGroupVecLen>;
SizeT align_max;
SizeT align_min;
uint32_t pq_bits;
uint32_t pq_dim;
constexpr list_spec(uint32_t pq_bits, uint32_t pq_dim, bool conservative_memory_allocation)
: pq_bits(pq_bits),
pq_dim(pq_dim),
align_min(kIndexGroupSize),
align_max(conservative_memory_allocation ? kIndexGroupSize : 1024)
{
}
// Allow casting between different size-types (for safer size and offset calculations)
template <typename OtherSizeT>
constexpr explicit list_spec(const list_spec<OtherSizeT, IdxT>& other_spec)
: pq_bits{other_spec.pq_bits},
pq_dim{other_spec.pq_dim},
align_min{other_spec.align_min},
align_max{other_spec.align_max}
{
}
/** Determine the extents of an array enough to hold a given amount of data. */
constexpr auto make_list_extents(SizeT n_rows) const -> list_extents
{
// how many elems of pq_dim fit into one kIndexGroupVecLen-byte chunk
auto pq_chunk = (kIndexGroupVecLen * 8u) / pq_bits;
return make_extents<SizeT>(div_rounding_up_safe<SizeT>(n_rows, kIndexGroupSize),
div_rounding_up_safe<SizeT>(pq_dim, pq_chunk),
kIndexGroupSize,
kIndexGroupVecLen);
}
};
template <typename IdxT, typename SizeT = uint32_t>
using list_data = ivf::list<list_spec, SizeT, IdxT>;
/**
* @brief IVF-PQ index.
*
* In the IVF-PQ index, a database vector y is approximated with two level quantization:
*
* y = Q_1(y) + Q_2(y - Q_1(y))
*
* The first level quantizer (Q_1), maps the vector y to the nearest cluster center. The number of
* clusters is n_lists.
*
* The second quantizer encodes the residual, and it is defined as a product quantizer [1].
*
* A product quantizer encodes a `dim` dimensional vector with a `pq_dim` dimensional vector.
* First we split the input vector into `pq_dim` subvectors (denoted by u), where each u vector
* contains `pq_len` distinct components of y
*
* y_1, y_2, ... y_{pq_len}, y_{pq_len+1}, ... y_{2*pq_len}, ... y_{dim-pq_len+1} ... y_{dim}
* \___________________/ \____________________________/ \______________________/
* u_1 u_2 u_{pq_dim}
*
* Then each subvector encoded with a separate quantizer q_i, end the results are concatenated
*
* Q_2(y) = q_1(u_1),q_2(u_2),...,q_{pq_dim}(u_pq_dim})
*
* Each quantizer q_i outputs a code with pq_bit bits. The second level quantizers are also defined
* by k-means clustering in the corresponding sub-space: the reproduction values are the centroids,
* and the set of reproduction values is the codebook.
*
* When the data dimensionality `dim` is not multiple of `pq_dim`, the feature space is transformed
* using a random orthogonal matrix to have `rot_dim = pq_dim * pq_len` dimensions
* (`rot_dim >= dim`).
*
* The second-level quantizers are trained either for each subspace or for each cluster:
* (a) codebook_gen::PER_SUBSPACE:
* creates `pq_dim` second-level quantizers - one for each slice of the data along features;
* (b) codebook_gen::PER_CLUSTER:
* creates `n_lists` second-level quantizers - one for each first-level cluster.
* In either case, the centroids are again found using k-means clustering interpreting the data as
* having pq_len dimensions.
*
* [1] Product quantization for nearest neighbor search Herve Jegou, Matthijs Douze, Cordelia Schmid
*
* @tparam IdxT type of the indices in the source dataset
*
*/
template <typename IdxT>
struct index : ann::index {
static_assert(!raft::is_narrowing_v<uint32_t, IdxT>,
"IdxT must be able to represent all values of uint32_t");
public:
/** Total length of the index. */
[[nodiscard]] constexpr inline auto size() const noexcept -> IdxT
{
return accum_sorted_sizes_(n_lists());
}
/** Dimensionality of the input data. */
[[nodiscard]] constexpr inline auto dim() const noexcept -> uint32_t { return dim_; }
/**
* Dimensionality of the cluster centers:
* input data dim extended with vector norms and padded to 8 elems.
*/
[[nodiscard]] constexpr inline auto dim_ext() const noexcept -> uint32_t
{
return raft::round_up_safe(dim() + 1, 8u);
}
/**
* Dimensionality of the data after transforming it for PQ processing
* (rotated and augmented to be muplitple of `pq_dim`).
*/
[[nodiscard]] constexpr inline auto rot_dim() const noexcept -> uint32_t
{
return pq_len() * pq_dim();
}
/** The bit length of an encoded vector element after compression by PQ. */
[[nodiscard]] constexpr inline auto pq_bits() const noexcept -> uint32_t { return pq_bits_; }
/** The dimensionality of an encoded vector after compression by PQ. */
[[nodiscard]] constexpr inline auto pq_dim() const noexcept -> uint32_t { return pq_dim_; }
/** Dimensionality of a subspaces, i.e. the number of vector components mapped to a subspace */
[[nodiscard]] constexpr inline auto pq_len() const noexcept -> uint32_t
{
return raft::div_rounding_up_unsafe(dim(), pq_dim());
}
/** The number of vectors in a PQ codebook (`1 << pq_bits`). */
[[nodiscard]] constexpr inline auto pq_book_size() const noexcept -> uint32_t
{
return 1 << pq_bits();
}
/** Distance metric used for clustering. */
[[nodiscard]] constexpr inline auto metric() const noexcept -> cuvs::distance::DistanceType
{
return metric_;
}
/** How PQ codebooks are created. */
[[nodiscard]] constexpr inline auto codebook_kind() const noexcept -> codebook_gen
{
return codebook_kind_;
}
/** Number of clusters/inverted lists (first level quantization). */
[[nodiscard]] constexpr inline auto n_lists() const noexcept -> uint32_t { return lists_.size(); }
/**
* Whether to use convervative memory allocation when extending the list (cluster) data
* (see index_params.conservative_memory_allocation).
*/
[[nodiscard]] constexpr inline auto conservative_memory_allocation() const noexcept -> bool
{
return conservative_memory_allocation_;
}
// Don't allow copying the index for performance reasons (try avoiding copying data)
index(const index&) = delete;
index(index&&) = default;
auto operator=(const index&) -> index& = delete;
auto operator=(index&&) -> index& = default;
~index() = default;
/** Construct an empty index. It needs to be trained and then populated. */
index(raft::resources const& handle,
cuvs::distance::DistanceType metric,
codebook_gen codebook_kind,
uint32_t n_lists,
uint32_t dim,
uint32_t pq_bits = 8,
uint32_t pq_dim = 0,
bool conservative_memory_allocation = false)
: ann::index(),
metric_(metric),
codebook_kind_(codebook_kind),
dim_(dim),
pq_bits_(pq_bits),
pq_dim_(pq_dim == 0 ? calculate_pq_dim(dim) : pq_dim),
conservative_memory_allocation_(conservative_memory_allocation),
pq_centers_{make_device_mdarray<float>(handle, make_pq_centers_extents())},
lists_{n_lists},
rotation_matrix_{make_device_matrix<float, uint32_t>(handle, this->rot_dim(), this->dim())},
list_sizes_{make_device_vector<uint32_t, uint32_t>(handle, n_lists)},
centers_{make_device_matrix<float, uint32_t>(handle, n_lists, this->dim_ext())},
centers_rot_{make_device_matrix<float, uint32_t>(handle, n_lists, this->rot_dim())},
data_ptrs_{make_device_vector<uint8_t*, uint32_t>(handle, n_lists)},
inds_ptrs_{make_device_vector<IdxT*, uint32_t>(handle, n_lists)},
accum_sorted_sizes_{make_host_vector<IdxT, uint32_t>(n_lists + 1)}
{
check_consistency();
accum_sorted_sizes_(n_lists) = 0;
}
/** Construct an empty index. It needs to be trained and then populated. */
index(raft::resources const& handle, const index_params& params, uint32_t dim)
: index(handle,
params.metric,
params.codebook_kind,
params.n_lists,
dim,
params.pq_bits,
params.pq_dim,
params.conservative_memory_allocation)
{
}
using pq_centers_extents = std::experimental::
extents<uint32_t, raft::dynamic_extent, raft::dynamic_extent, raft::dynamic_extent>;
/**
* PQ cluster centers
*
* - codebook_gen::PER_SUBSPACE: [pq_dim , pq_len, pq_book_size]
* - codebook_gen::PER_CLUSTER: [n_lists, pq_len, pq_book_size]
*/
inline auto pq_centers() noexcept
-> raft::device_mdspan<float, pq_centers_extents, raft::row_major>
{
return pq_centers_.view();
}
[[nodiscard]] inline auto pq_centers() const noexcept
-> raft::device_mdspan<const float, pq_centers_extents, raft::row_major>
{
return pq_centers_.view();
}
/** Lists' data and indices. */
inline auto lists() noexcept -> std::vector<std::shared_ptr<list_data<IdxT>>>& { return lists_; }
[[nodiscard]] inline auto lists() const noexcept
-> const std::vector<std::shared_ptr<list_data<IdxT>>>&
{
return lists_;
}
/** Pointers to the inverted lists (clusters) data [n_lists]. */
inline auto data_ptrs() noexcept -> raft::device_vector_view<uint8_t*, uint32_t, raft::row_major>
{
return data_ptrs_.view();
}
[[nodiscard]] inline auto data_ptrs() const noexcept
-> raft::device_vector_view<const uint8_t* const, uint32_t, raft::row_major>
{
return make_mdspan<const uint8_t* const, uint32_t, raft::row_major, false, true>(
data_ptrs_.data_handle(), data_ptrs_.extents());
}
/** Pointers to the inverted lists (clusters) indices [n_lists]. */
inline auto inds_ptrs() noexcept -> raft::device_vector_view<IdxT*, uint32_t, raft::row_major>
{
return inds_ptrs_.view();
}
[[nodiscard]] inline auto inds_ptrs() const noexcept
-> raft::device_vector_view<const IdxT* const, uint32_t, raft::row_major>
{
return make_mdspan<const IdxT* const, uint32_t, raft::row_major, false, true>(
inds_ptrs_.data_handle(), inds_ptrs_.extents());
}
/** The transform matrix (original space -> rotated padded space) [rot_dim, dim] */
inline auto rotation_matrix() noexcept
-> raft::device_matrix_view<float, uint32_t, raft::row_major>
{
return rotation_matrix_.view();
}
[[nodiscard]] inline auto rotation_matrix() const noexcept
-> raft::device_matrix_view<const float, uint32_t, raft::row_major>
{
return rotation_matrix_.view();
}
/**
* Accumulated list sizes, sorted in descending order [n_lists + 1].
* The last value contains the total length of the index.
* The value at index zero is always zero.
*
* That is, the content of this span is as if the `list_sizes` was sorted and then accumulated.
*
* This span is used during search to estimate the maximum size of the workspace.
*/
inline auto accum_sorted_sizes() noexcept
-> raft::host_vector_view<IdxT, uint32_t, raft::row_major>
{
return accum_sorted_sizes_.view();
}
[[nodiscard]] inline auto accum_sorted_sizes() const noexcept
-> raft::host_vector_view<const IdxT, uint32_t, raft::row_major>
{
return accum_sorted_sizes_.view();
}
/** Sizes of the lists [n_lists]. */
inline auto list_sizes() noexcept -> raft::device_vector_view<uint32_t, uint32_t, raft::row_major>
{
return list_sizes_.view();
}
[[nodiscard]] inline auto list_sizes() const noexcept
-> raft::device_vector_view<const uint32_t, uint32_t, raft::row_major>
{
return list_sizes_.view();
}
/** Cluster centers corresponding to the lists in the original space [n_lists, dim_ext] */
inline auto centers() noexcept -> raft::device_matrix_view<float, uint32_t, raft::row_major>
{
return centers_.view();
}
[[nodiscard]] inline auto centers() const noexcept
-> raft::device_matrix_view<const float, uint32_t, raft::row_major>
{
return centers_.view();
}
/** Cluster centers corresponding to the lists in the rotated space [n_lists, rot_dim] */
inline auto centers_rot() noexcept -> raft::device_matrix_view<float, uint32_t, raft::row_major>
{
return centers_rot_.view();
}
[[nodiscard]] inline auto centers_rot() const noexcept
-> raft::device_matrix_view<const float, uint32_t, raft::row_major>
{
return centers_rot_.view();
}
/** fetch size of a particular IVF list in bytes using the list extents.
* Usage example:
* @code{.cpp}
* raft::resources res;
* // use default index params
* ivf_pq::index_params index_params;
* // extend the IVF lists while building the index
* index_params.add_data_on_build = true;
* // create and fill the index from a [N, D] dataset
* auto index = cuvs::neighbors::ivf_pq::build<int64_t>(res, index_params, dataset, N, D);
* // Fetch the size of the fourth list
* uint32_t size = index.get_list_size_in_bytes(3);
* @endcode
*
* @param[in] label list ID
*/
inline auto get_list_size_in_bytes(uint32_t label) -> uint32_t
{
RAFT_EXPECTS(label < this->n_lists(),
"Expected label to be less than number of lists in the index");
auto list_data = this->lists()[label]->data;
return list_data.size();
}
private:
cuvs::distance::DistanceType metric_;
codebook_gen codebook_kind_;
uint32_t dim_;
uint32_t pq_bits_;
uint32_t pq_dim_;
bool conservative_memory_allocation_;
// Primary data members
std::vector<std::shared_ptr<list_data<IdxT>>> lists_;
raft::device_vector<uint32_t, uint32_t, raft::row_major> list_sizes_;
raft::device_mdarray<float, pq_centers_extents, raft::row_major> pq_centers_;
raft::device_matrix<float, uint32_t, raft::row_major> centers_;
raft::device_matrix<float, uint32_t, raft::row_major> centers_rot_;
raft::device_matrix<float, uint32_t, raft::row_major> rotation_matrix_;
// Computed members for accelerating search.
raft::device_vector<uint8_t*, uint32_t, raft::row_major> data_ptrs_;
raft::device_vector<IdxT*, uint32_t, raft::row_major> inds_ptrs_;
raft::host_vector<IdxT, uint32_t, raft::row_major> accum_sorted_sizes_;
/** Throw an error if the index content is inconsistent. */
void check_consistency()
{
RAFT_EXPECTS(pq_bits() >= 4 && pq_bits() <= 8,
"`pq_bits` must be within closed range [4,8], but got %u.",
pq_bits());
RAFT_EXPECTS((pq_bits() * pq_dim()) % 8 == 0,
"`pq_bits * pq_dim` must be a multiple of 8, but got %u * %u = %u.",
pq_bits(),
pq_dim(),
pq_bits() * pq_dim());
}
auto make_pq_centers_extents() -> pq_centers_extents
{
switch (codebook_kind()) {
case codebook_gen::PER_SUBSPACE:
return make_extents<uint32_t>(pq_dim(), pq_len(), pq_book_size());
case codebook_gen::PER_CLUSTER:
return make_extents<uint32_t>(n_lists(), pq_len(), pq_book_size());
default: RAFT_FAIL("Unreachable code");
}
}
static inline auto calculate_pq_dim(uint32_t dim) -> uint32_t
{
// If the dimensionality is large enough, we can reduce it to improve performance
if (dim >= 128) { dim /= 2; }
// Round it down to 32 to improve performance.
auto r = raft::round_down_safe<uint32_t>(dim, 32);
if (r > 0) return r;
// If the dimensionality is really low, round it to the closest power-of-two
r = 1;
while ((r << 1) <= dim) {
r = r << 1;
}
return r;
}
};
/** @} */
} // namespace cuvs::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/cagra.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/cagra/cagra_build.cuh"
#include "detail/cagra/cagra_search.cuh"
#include "detail/cagra/graph_core.cuh"
#include <cuvs/neighbors/cagra_types.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_device_accessor.hpp>
#include <raft/core/mdspan.hpp>
#include <raft/core/resources.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cuvs::neighbors::cagra {
/**
* @defgroup cagra CUDA ANN Graph-based nearest neighbor search
* @{
*/
/**
* @brief Build a kNN graph using IVF-PQ.
*
* The kNN graph is the first building block for CAGRA index.
*
* The output is a dense matrix that stores the neighbor indices for each point in the dataset.
* Each point has the same number of neighbors.
*
* See [cagra::build](#cagra::build) for an alternative method.
*
* The following distance metrics are supported:
* - L2Expanded
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors;
* // use default index parameters
* ivf_pq::index_params build_params;
* ivf_pq::search_params search_params
* auto knn_graph = raft::make_host_matrix<IdxT, IdxT>(dataset.extent(0), 128);
* // create knn graph
* cagra::build_knn_graph(res, dataset, knn_graph.view(), 2, build_params, search_params);
* auto optimized_gaph = raft::make_host_matrix<IdxT, IdxT>(dataset.extent(0), 64);
* cagra::optimize(res, dataset, knn_graph.view(), optimized_graph.view());
* // Construct an index from dataset and optimized knn_graph
* auto index = cagra::index<T, IdxT>(res, build_params.metric(), dataset,
* optimized_graph.view());
* @endcode
*
* @tparam DataT data element type
* @tparam IdxT type of the dataset vector indices
*
* @param[in] res raft resources
* @param[in] dataset a matrix view (host or device) to a row-major matrix [n_rows, dim]
* @param[out] knn_graph a host matrix view to store the output knn graph [n_rows, graph_degree]
* @param[in] refine_rate (optional) refinement rate for ivf-pq search
* @param[in] build_params (optional) ivf_pq index building parameters for knn graph
* @param[in] search_params (optional) ivf_pq search parameters
*/
template <typename DataT, typename IdxT, typename accessor>
void build_knn_graph(
raft::resources const& res,
raft::mdspan<const DataT, raft::matrix_extent<int64_t>, raft::row_major, accessor> dataset,
raft::host_matrix_view<IdxT, int64_t, raft::row_major> knn_graph,
std::optional<float> refine_rate = std::nullopt,
std::optional<ivf_pq::index_params> build_params = std::nullopt,
std::optional<ivf_pq::search_params> search_params = std::nullopt)
{
using internal_IdxT = typename std::make_unsigned<IdxT>::type;
auto knn_graph_internal = raft::make_host_matrix_view<internal_IdxT, int64_t>(
reinterpret_cast<internal_IdxT*>(knn_graph.data_handle()),
knn_graph.extent(0),
knn_graph.extent(1));
auto dataset_internal =
raft::mdspan<const DataT, raft::matrix_extent<int64_t>, raft::row_major, accessor>(
dataset.data_handle(), dataset.extent(0), dataset.extent(1));
cagra::detail::build_knn_graph(
res, dataset_internal, knn_graph_internal, refine_rate, build_params, search_params);
}
/**
* @brief Build a kNN graph using NN-descent.
*
* The kNN graph is the first building block for CAGRA index.
*
* The output is a dense matrix that stores the neighbor indices for each point in the dataset.
* Each point has the same number of neighbors.
*
* See [cagra::build](#cagra::build) for an alternative method.
*
* The following distance metrics are supported:
* - L2Expanded
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors;
* using namespace cuvs::neighbors::experimental;
* // use default index parameters
* nn_descent::index_params build_params;
* build_params.graph_degree = 128;
* auto knn_graph = raft::make_host_matrix<IdxT, IdxT>(dataset.extent(0), 128);
* // create knn graph
* cagra::build_knn_graph(res, dataset, knn_graph.view(), build_params);
* auto optimized_gaph = raft::make_host_matrix<IdxT, int64_t>(dataset.extent(0), 64);
* cagra::optimize(res, dataset, nn_descent_index.graph.view(), optimized_graph.view());
* // Construct an index from dataset and optimized knn_graph
* auto index = cagra::index<T, IdxT>(res, build_params.metric(), dataset,
* optimized_graph.view());
* @endcode
*
* @tparam DataT data element type
* @tparam IdxT type of the dataset vector indices
* @tparam accessor host or device accessor_type for the dataset
* @param[in] res raft::resources is an object mangaging resources
* @param[in] dataset input raft::host/device_matrix_view that can be located in
* in host or device memory
* @param[out] knn_graph a host matrix view to store the output knn graph [n_rows, graph_degree]
* @param[in] build_params an instance of experimental::nn_descent::index_params that are parameters
* to run the nn-descent algorithm
*/
template <typename DataT,
typename IdxT = uint32_t,
typename accessor =
host_device_accessor<std::experimental::default_accessor<DataT>, memory_type::device>>
void build_knn_graph(
raft::resources const& res,
raft::mdspan<const DataT, raft::matrix_extent<int64_t>, raft::row_major, accessor> dataset,
raft::host_matrix_view<IdxT, int64_t, raft::row_major> knn_graph,
experimental::nn_descent::index_params build_params)
{
detail::build_knn_graph<DataT, IdxT>(res, dataset, knn_graph, build_params);
}
/**
* @brief Sort a KNN graph index.
* Preprocessing step for `cagra::optimize`: If a KNN graph is not built using
* `cagra::build_knn_graph`, then it is necessary to call this function before calling
* `cagra::optimize`. If the graph is built by `cagra::build_knn_graph`, it is already sorted and
* you do not need to call this function.
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors;
* cagra::index_params build_params;
* auto knn_graph = raft::make_host_matrix<IdxT, IdxT>(dataset.extent(0), 128);
* // build KNN graph not using `cagra::build_knn_graph`
* // build(knn_graph, dataset, ...);
* // sort graph index
* sort_knn_graph(res, dataset.view(), knn_graph.view());
* // optimize graph
* cagra::optimize(res, dataset, knn_graph.view(), optimized_graph.view());
* // Construct an index from dataset and optimized knn_graph
* auto index = cagra::index<T, IdxT>(res, build_params.metric(), dataset,
* optimized_graph.view());
* @endcode
*
* @tparam DataT type of the data in the source dataset
* @tparam IdxT type of the dataset vector indices
*
* @param[in] res raft resources
* @param[in] dataset a matrix view (host or device) to a row-major matrix [n_rows, dim]
* @param[in,out] knn_graph a matrix view (host or device) of the input knn graph [n_rows,
* knn_graph_degree]
*/
template <typename DataT,
typename IdxT = uint32_t,
typename d_accessor =
host_device_accessor<std::experimental::default_accessor<DataT>, memory_type::device>,
typename g_accessor =
host_device_accessor<std::experimental::default_accessor<IdxT>, memory_type::host>>
void sort_knn_graph(
raft::resources const& res,
raft::mdspan<const DataT, raft::matrix_extent<int64_t>, raft::row_major, d_accessor> dataset,
raft::mdspan<IdxT, raft::matrix_extent<int64_t>, raft::row_major, g_accessor> knn_graph)
{
using internal_IdxT = typename std::make_unsigned<IdxT>::type;
using g_accessor_internal =
host_device_accessor<std::experimental::default_accessor<internal_IdxT>, g_accessor::mem_type>;
auto knn_graph_internal =
raft::mdspan<internal_IdxT, raft::matrix_extent<int64_t>, raft::row_major, g_accessor_internal>(
reinterpret_cast<internal_IdxT*>(knn_graph.data_handle()),
knn_graph.extent(0),
knn_graph.extent(1));
auto dataset_internal =
raft::mdspan<const DataT, raft::matrix_extent<int64_t>, raft::row_major, d_accessor>(
dataset.data_handle(), dataset.extent(0), dataset.extent(1));
cagra::detail::graph::sort_knn_graph(res, dataset_internal, knn_graph_internal);
}
/**
* @brief Prune a KNN graph.
*
* Decrease the number of neighbors for each node.
*
* See [cagra::build_knn_graph](#cagra::build_knn_graph) for usage example
*
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res raft resources
* @param[in] knn_graph a matrix view (host or device) of the input knn graph [n_rows,
* knn_graph_degree]
* @param[out] new_graph a host matrix view of the optimized knn graph [n_rows, graph_degree]
*/
template <typename IdxT = uint32_t,
typename g_accessor =
host_device_accessor<std::experimental::default_accessor<IdxT>, memory_type::host>>
void optimize(
raft::resources const& res,
raft::mdspan<IdxT, raft::matrix_extent<int64_t>, raft::row_major, g_accessor> knn_graph,
raft::host_matrix_view<IdxT, int64_t, raft::row_major> new_graph)
{
detail::optimize(res, knn_graph, new_graph);
}
/**
* @brief Build the index from the dataset for efficient search.
*
* The build consist of two steps: build an intermediate knn-graph, and optimize it to
* create the final graph. The index_params struct controls the node degree of these
* graphs.
*
* It is required that dataset and the optimized graph fit the GPU memory.
*
* To customize the parameters for knn-graph building and pruning, and to reuse the
* intermediate results, you could build the index in two steps using
* [cagra::build_knn_graph](#cagra::build_knn_graph) and [cagra::optimize](#cagra::optimize).
*
* The following distance metrics are supported:
* - L2
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors;
* // use default index parameters
* cagra::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = cagra::build(res, index_params, dataset);
* // use default search parameters
* cagra::search_params search_params;
* // search K nearest neighbours
* auto neighbors = raft::make_device_matrix<uint32_t>(res, n_queries, k);
* auto distances = raft::make_device_matrix<float>(res, n_queries, k);
* cagra::search(res, search_params, index, queries, neighbors, distances);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices in the source dataset
*
* @param[in] res
* @param[in] params parameters for building the index
* @param[in] dataset a matrix view (host or device) to a row-major matrix [n_rows, dim]
*
* @return the constructed cagra index
*/
template <typename T,
typename IdxT = uint32_t,
typename Accessor =
host_device_accessor<std::experimental::default_accessor<T>, memory_type::host>>
index<T, IdxT> build(
raft::resources const& res,
const index_params& params,
raft::mdspan<const T, raft::matrix_extent<int64_t>, raft::row_major, Accessor> dataset)
{
return detail::build<T, IdxT, Accessor>(res, params, dataset);
}
/**
* @brief Search ANN using the constructed index.
*
* See the [cagra::build](#cagra::build) documentation for a usage example.
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] res raft resources
* @param[in] params configure the search
* @param[in] idx cagra index
* @param[in] queries a device matrix view to a row-major matrix [n_queries, index->dim()]
* @param[out] neighbors a device matrix view to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device matrix view to the distances to the selected neighbors [n_queries,
* k]
*/
template <typename T, typename IdxT>
void search(raft::resources const& res,
const search_params& params,
const index<T, IdxT>& idx,
raft::device_matrix_view<const T, int64_t, raft::row_major> queries,
raft::device_matrix_view<IdxT, int64_t, raft::row_major> neighbors,
raft::device_matrix_view<float, int64_t, raft::row_major> distances)
{
RAFT_EXPECTS(
queries.extent(0) == neighbors.extent(0) && queries.extent(0) == distances.extent(0),
"Number of rows in output neighbors and distances matrices must equal the number of queries.");
RAFT_EXPECTS(neighbors.extent(1) == distances.extent(1),
"Number of columns in output neighbors and distances matrices must equal k");
RAFT_EXPECTS(queries.extent(1) == idx.dim(),
"Number of query dimensions should equal number of dimensions in the index.");
using internal_IdxT = typename std::make_unsigned<IdxT>::type;
auto queries_internal = raft::make_device_matrix_view<const T, int64_t, raft::row_major>(
queries.data_handle(), queries.extent(0), queries.extent(1));
auto neighbors_internal = raft::make_device_matrix_view<internal_IdxT, int64_t, raft::row_major>(
reinterpret_cast<internal_IdxT*>(neighbors.data_handle()),
neighbors.extent(0),
neighbors.extent(1));
auto distances_internal = raft::make_device_matrix_view<float, int64_t, raft::row_major>(
distances.data_handle(), distances.extent(0), distances.extent(1));
cagra::detail::search_main<T,
internal_IdxT,
decltype(cuvs::neighbors::filtering::none_cagra_sample_filter()),
IdxT>(res,
params,
idx,
queries_internal,
neighbors_internal,
distances_internal,
cuvs::neighbors::filtering::none_cagra_sample_filter());
}
/**
* @brief Search ANN using the constructed index with the given sample filter.
*
* Usage example:
* @code{.cpp}
* using namespace cuvs::neighbors;
* // use default index parameters
* cagra::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = cagra::build(res, index_params, dataset);
* // use default search parameters
* cagra::search_params search_params;
* // create a bitset to filter the search
* auto removed_indices = raft::make_device_vector<IdxT>(res, n_removed_indices);
* raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset(
* res, removed_indices.view(), dataset.extent(0));
* // search K nearest neighbours according to a bitset
* auto neighbors = raft::make_device_matrix<uint32_t>(res, n_queries, k);
* auto distances = raft::make_device_matrix<float>(res, n_queries, k);
* cagra::search_with_filtering(res, search_params, index, queries, neighbors, distances,
* filtering::bitset_filter(removed_indices_bitset.view()));
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
* @tparam CagraSampleFilterT Device filter function, with the signature
* `(uint32_t query ix, uint32_t sample_ix) -> bool`
*
* @param[in] res raft resources
* @param[in] params configure the search
* @param[in] idx cagra index
* @param[in] queries a device matrix view to a row-major matrix [n_queries, index->dim()]
* @param[out] neighbors a device matrix view to the indices of the neighbors in the source dataset
* [n_queries, k]
* @param[out] distances a device matrix view to the distances to the selected neighbors [n_queries,
* k]
* @param[in] sample_filter a device filter function that greenlights samples for a given query
*/
template <typename T, typename IdxT, typename CagraSampleFilterT>
void search_with_filtering(raft::resources const& res,
const search_params& params,
const index<T, IdxT>& idx,
raft::device_matrix_view<const T, int64_t, raft::row_major> queries,
raft::device_matrix_view<IdxT, int64_t, raft::row_major> neighbors,
raft::device_matrix_view<float, int64_t, raft::row_major> distances,
CagraSampleFilterT sample_filter = CagraSampleFilterT())
{
RAFT_EXPECTS(
queries.extent(0) == neighbors.extent(0) && queries.extent(0) == distances.extent(0),
"Number of rows in output neighbors and distances matrices must equal the number of queries.");
RAFT_EXPECTS(neighbors.extent(1) == distances.extent(1),
"Number of columns in output neighbors and distances matrices must equal k");
RAFT_EXPECTS(queries.extent(1) == idx.dim(),
"Number of query dimensions should equal number of dimensions in the index.");
using internal_IdxT = typename std::make_unsigned<IdxT>::type;
auto queries_internal = raft::make_device_matrix_view<const T, int64_t, raft::row_major>(
queries.data_handle(), queries.extent(0), queries.extent(1));
auto neighbors_internal = raft::make_device_matrix_view<internal_IdxT, int64_t, raft::row_major>(
reinterpret_cast<internal_IdxT*>(neighbors.data_handle()),
neighbors.extent(0),
neighbors.extent(1));
auto distances_internal = raft::make_device_matrix_view<float, int64_t, raft::row_major>(
distances.data_handle(), distances.extent(0), distances.extent(1));
cagra::detail::search_main<T, internal_IdxT, CagraSampleFilterT, IdxT>(
res, params, idx, queries_internal, neighbors_internal, distances_internal, sample_filter);
}
/** @} */ // end group cagra
} // namespace cuvs::neighbors::cagra
// TODO: Remove deprecated experimental namespace in 23.12 release
namespace cuvs::neighbors::experimental::cagra {
using cuvs::neighbors::cagra::build;
using cuvs::neighbors::cagra::build_knn_graph;
using cuvs::neighbors::cagra::optimize;
using cuvs::neighbors::cagra::search;
using cuvs::neighbors::cagra::sort_knn_graph;
} // namespace cuvs::neighbors::experimental::cagra
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ivf_list.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/neighbors/ivf_list_types.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/core/serialize.hpp>
#include <raft/util/integer_utils.hpp>
#include <thrust/fill.h>
#include <fstream>
#include <memory>
#include <type_traits>
namespace cuvs::neighbors::ivf {
/** The data for a single IVF list. */
template <template <typename, typename...> typename SpecT,
typename SizeT,
typename... SpecExtraArgs>
list<SpecT, SizeT, SpecExtraArgs...>::list(raft::resources const& res,
const spec_type& spec,
size_type n_rows)
: size{n_rows}, data{res}, indices{res}
{
auto capacity = raft::round_up_safe<SizeT>(n_rows, spec.align_max);
if (n_rows < spec.align_max) {
capacity = bound_by_power_of_two<SizeT>(std::max<SizeT>(n_rows, spec.align_min));
capacity = std::min<SizeT>(capacity, spec.align_max);
}
try {
data = raft::make_device_mdarray<value_type>(res, spec.make_list_extents(capacity));
indices = raft::make_device_vector<index_type, SizeT>(res, capacity);
} catch (std::bad_alloc& e) {
RAFT_FAIL(
"ivf::list: failed to allocate a big enough list to hold all data "
"(requested size: %zu records, selected capacity: %zu records). "
"Allocator exception: %s",
size_t(size),
size_t(capacity),
e.what());
}
// Fill the index buffer with a pre-defined marker for easier debugging
thrust::fill_n(raft::resource::get_thrust_policy(res),
indices.data_handle(),
indices.size(),
ivf::kInvalidRecord<index_type>);
}
/**
* Resize a list by the given id, so that it can contain the given number of records;
* copy the data if necessary.
*/
template <typename ListT>
void resize_list(raft::resources const& res,
std::shared_ptr<ListT>& orig_list, // NOLINT
const typename ListT::spec_type& spec,
typename ListT::size_type new_used_size,
typename ListT::size_type old_used_size)
{
bool skip_resize = false;
if (orig_list) {
if (new_used_size <= orig_list->indices.extent(0)) {
auto shared_list_size = old_used_size;
if (new_used_size <= old_used_size ||
orig_list->size.compare_exchange_strong(shared_list_size, new_used_size)) {
// We don't need to resize the list if:
// 1. The list exists
// 2. The new size fits in the list
// 3. The list doesn't grow or no-one else has grown it yet
skip_resize = true;
}
}
} else {
old_used_size = 0;
}
if (skip_resize) { return; }
auto new_list = std::make_shared<ListT>(res, spec, new_used_size);
if (old_used_size > 0) {
auto copied_data_extents = spec.make_list_extents(old_used_size);
auto copied_view = make_mdspan<typename ListT::value_type,
typename ListT::size_type,
raft::row_major,
false,
true>(new_list->data.data_handle(), copied_data_extents);
copy(copied_view.data_handle(),
orig_list->data.data_handle(),
copied_view.size(),
resource::get_cuda_stream(res));
copy(new_list->indices.data_handle(),
orig_list->indices.data_handle(),
old_used_size,
resource::get_cuda_stream(res));
}
// swap the shared pointer content with the new list
new_list.swap(orig_list);
}
template <typename ListT>
auto serialize_list(const raft::resources& handle,
std::ostream& os,
const ListT& ld,
const typename ListT::spec_type& store_spec,
std::optional<typename ListT::size_type> size_override = std::nullopt)
-> enable_if_valid_list_t<ListT>
{
using size_type = typename ListT::size_type;
auto size = size_override.value_or(ld.size.load());
serialize_scalar(handle, os, size);
if (size == 0) { return; }
auto data_extents = store_spec.make_list_extents(size);
auto data_array =
raft::make_host_mdarray<typename ListT::value_type, size_type, raft::row_major>(data_extents);
auto inds_array = raft::make_host_mdarray<typename ListT::index_type, size_type, raft::row_major>(
make_extents<size_type>(size));
copy(data_array.data_handle(),
ld.data.data_handle(),
data_array.size(),
resource::get_cuda_stream(handle));
copy(inds_array.data_handle(),
ld.indices.data_handle(),
inds_array.size(),
resource::get_cuda_stream(handle));
resource::sync_stream(handle);
serialize_mdspan(handle, os, data_array.view());
serialize_mdspan(handle, os, inds_array.view());
}
template <typename ListT>
auto serialize_list(const raft::resources& handle,
std::ostream& os,
const std::shared_ptr<ListT>& ld,
const typename ListT::spec_type& store_spec,
std::optional<typename ListT::size_type> size_override = std::nullopt)
-> enable_if_valid_list_t<ListT>
{
if (ld) {
return serialize_list<ListT>(handle, os, *ld, store_spec, size_override);
} else {
return serialize_scalar(handle, os, typename ListT::size_type{0});
}
}
template <typename ListT>
auto deserialize_list(const raft::resources& handle,
std::istream& is,
std::shared_ptr<ListT>& ld,
const typename ListT::spec_type& store_spec,
const typename ListT::spec_type& device_spec) -> enable_if_valid_list_t<ListT>
{
using size_type = typename ListT::size_type;
auto size = deserialize_scalar<size_type>(handle, is);
if (size == 0) { return ld.reset(); }
std::make_shared<ListT>(handle, device_spec, size).swap(ld);
auto data_extents = store_spec.make_list_extents(size);
auto data_array =
raft::make_host_mdarray<typename ListT::value_type, size_type, raft::row_major>(data_extents);
auto inds_array = raft::make_host_mdarray<typename ListT::index_type, size_type, raft::row_major>(
make_extents<size_type>(size));
deserialize_mdspan(handle, is, data_array.view());
deserialize_mdspan(handle, is, inds_array.view());
copy(ld->data.data_handle(),
data_array.data_handle(),
data_array.size(),
resource::get_cuda_stream(handle));
// NB: copying exactly 'size' indices to leave the rest 'kInvalidRecord' intact.
copy(
ld->indices.data_handle(), inds_array.data_handle(), size, resource::get_cuda_stream(handle));
// Make sure the data is copied from host to device before the host arrays get out of the scope.
resource::sync_stream(handle);
}
} // namespace cuvs::neighbors::ivf
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/brute_force-ext.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <optional>
#include <cuvs/distance/distance_types.hpp> // cuvs::distance::DistanceType
#include <cuvs/neighbors/brute_force_types.hpp>
#include <raft/core/device_mdspan.hpp> // raft::device_matrix_view
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/core/resources.hpp> // raft::resources
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace cuvs::neighbors::brute_force {
template <typename value_t, typename idx_t>
inline void knn_merge_parts(
raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, raft::row_major> in_keys,
raft::device_matrix_view<const idx_t, idx_t, raft::row_major> in_values,
raft::device_matrix_view<value_t, idx_t, raft::row_major> out_keys,
raft::device_matrix_view<idx_t, idx_t, raft::row_major> out_values,
size_t n_samples,
std::optional<raft::device_vector_view<idx_t, idx_t>> translations = std::nullopt) RAFT_EXPLICIT;
template <typename T, typename Accessor>
index<T> build(
raft::resources const& res,
raft::mdspan<const T, raft::matrix_extent<int64_t>, raft::row_major, Accessor> dataset,
cuvs::distance::DistanceType metric = distance::DistanceType::L2Unexpanded,
T metric_arg = 0.0) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void search(raft::resources const& res,
const index<T>& idx,
raft::device_matrix_view<const T, int64_t, raft::row_major> queries,
raft::device_matrix_view<IdxT, int64_t, raft::row_major> neighbors,
raft::device_matrix_view<T, int64_t, raft::row_major> distances) RAFT_EXPLICIT;
template <typename idx_t,
typename value_t,
typename matrix_idx,
typename index_layout,
typename search_layout,
typename epilogue_op = raft::identity_op>
void knn(raft::resources const& handle,
std::vector<raft::device_matrix_view<const value_t, matrix_idx, index_layout>> index,
raft::device_matrix_view<const value_t, matrix_idx, search_layout> search,
raft::device_matrix_view<idx_t, matrix_idx, raft::row_major> indices,
raft::device_matrix_view<value_t, matrix_idx, raft::row_major> distances,
distance::DistanceType metric = distance::DistanceType::L2Unexpanded,
std::optional<float> metric_arg = std::make_optional<float>(2.0f),
std::optional<idx_t> global_id_offset = std::nullopt,
epilogue_op distance_epilogue = raft::identity_op()) RAFT_EXPLICIT;
template <typename value_t, typename idx_t, typename idx_layout, typename query_layout>
void fused_l2_knn(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, idx_layout> index,
raft::device_matrix_view<const value_t, idx_t, query_layout> query,
raft::device_matrix_view<idx_t, idx_t, raft::row_major> out_inds,
raft::device_matrix_view<value_t, idx_t, raft::row_major> out_dists,
cuvs::distance::DistanceType metric) RAFT_EXPLICIT;
} // namespace cuvs::neighbors::brute_force
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
// No extern template for cuvs::neighbors::brute_force::knn_merge_parts
#define instantiate_raft_neighbors_brute_force_knn( \
idx_t, value_t, matrix_idx, index_layout, search_layout, epilogue_op) \
extern template void cuvs::neighbors::brute_force:: \
knn<idx_t, value_t, matrix_idx, index_layout, search_layout, epilogue_op>( \
raft::resources const& handle, \
std::vector<raft::device_matrix_view<const value_t, matrix_idx, index_layout>> index, \
raft::device_matrix_view<const value_t, matrix_idx, search_layout> search, \
raft::device_matrix_view<idx_t, matrix_idx, raft::row_major> indices, \
raft::device_matrix_view<value_t, matrix_idx, raft::row_major> distances, \
cuvs::distance::DistanceType metric, \
std::optional<float> metric_arg, \
std::optional<idx_t> global_id_offset, \
epilogue_op distance_epilogue);
instantiate_raft_neighbors_brute_force_knn(
int64_t, float, uint32_t, raft::row_major, raft::row_major, raft::identity_op);
instantiate_raft_neighbors_brute_force_knn(
int64_t, float, int64_t, raft::row_major, raft::row_major, raft::identity_op);
instantiate_raft_neighbors_brute_force_knn(
int, float, int, raft::row_major, raft::row_major, raft::identity_op);
instantiate_raft_neighbors_brute_force_knn(
uint32_t, float, uint32_t, raft::row_major, raft::row_major, raft::identity_op);
#undef instantiate_raft_neighbors_brute_force_knn
namespace cuvs::neighbors::brute_force {
extern template void search<float, int>(
raft::resources const& res,
const cuvs::neighbors::brute_force::index<float>& idx,
raft::device_matrix_view<const float, int64_t, raft::row_major> queries,
raft::device_matrix_view<int, int64_t, raft::row_major> neighbors,
raft::device_matrix_view<float, int64_t, raft::row_major> distances);
extern template void search<float, int64_t>(
raft::resources const& res,
const cuvs::neighbors::brute_force::index<float>& idx,
raft::device_matrix_view<const float, int64_t, raft::row_major> queries,
raft::device_matrix_view<int64_t, int64_t, raft::row_major> neighbors,
raft::device_matrix_view<float, int64_t, raft::row_major> distances);
extern template cuvs::neighbors::brute_force::index<float> build<float>(
raft::resources const& res,
raft::device_matrix_view<const float, int64_t, raft::row_major> dataset,
cuvs::distance::DistanceType metric,
float metric_arg);
} // namespace cuvs::neighbors::brute_force
#define instantiate_raft_neighbors_brute_force_fused_l2_knn( \
value_t, idx_t, idx_layout, query_layout) \
extern template void cuvs::neighbors::brute_force::fused_l2_knn( \
raft::resources const& handle, \
raft::device_matrix_view<const value_t, idx_t, idx_layout> index, \
raft::device_matrix_view<const value_t, idx_t, query_layout> query, \
raft::device_matrix_view<idx_t, idx_t, raft::row_major> out_inds, \
raft::device_matrix_view<value_t, idx_t, raft::row_major> out_dists, \
cuvs::distance::DistanceType metric);
instantiate_raft_neighbors_brute_force_fused_l2_knn(float,
int64_t,
raft::row_major,
raft::row_major)
#undef instantiate_raft_neighbors_brute_force_fused_l2_knn
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/refine-ext.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // int64_t
#include <cuvs/distance/distance_types.hpp> // cuvs::distance::DistanceType
#include <raft/core/device_mdspan.hpp> // raft::device_matrix_view
#include <raft/core/host_mdspan.hpp> // // raft::host_matrix_view
#include <raft/core/resources.hpp> // raft::resources
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace cuvs::neighbors {
template <typename idx_t, typename data_t, typename distance_t, typename matrix_idx>
void refine(raft::resources const& handle,
raft::device_matrix_view<const data_t, matrix_idx, raft::row_major> dataset,
raft::device_matrix_view<const data_t, matrix_idx, raft::row_major> queries,
raft::device_matrix_view<const idx_t, matrix_idx, raft::row_major> neighbor_candidates,
raft::device_matrix_view<idx_t, matrix_idx, raft::row_major> indices,
raft::device_matrix_view<distance_t, matrix_idx, raft::row_major> distances,
cuvs::distance::DistanceType metric = distance::DistanceType::L2Unexpanded)
RAFT_EXPLICIT;
template <typename idx_t, typename data_t, typename distance_t, typename matrix_idx>
void refine(raft::resources const& handle,
raft::host_matrix_view<const data_t, matrix_idx, raft::row_major> dataset,
raft::host_matrix_view<const data_t, matrix_idx, raft::row_major> queries,
raft::host_matrix_view<const idx_t, matrix_idx, raft::row_major> neighbor_candidates,
raft::host_matrix_view<idx_t, matrix_idx, raft::row_major> indices,
raft::host_matrix_view<distance_t, matrix_idx, raft::row_major> distances,
cuvs::distance::DistanceType metric = distance::DistanceType::L2Unexpanded)
RAFT_EXPLICIT;
} // namespace cuvs::neighbors
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_neighbors_refine(idx_t, data_t, distance_t, matrix_idx) \
extern template void cuvs::neighbors::refine<idx_t, data_t, distance_t, matrix_idx>( \
raft::resources const& handle, \
raft::device_matrix_view<const data_t, matrix_idx, raft::row_major> dataset, \
raft::device_matrix_view<const data_t, matrix_idx, raft::row_major> queries, \
raft::device_matrix_view<const idx_t, matrix_idx, raft::row_major> neighbor_candidates, \
raft::device_matrix_view<idx_t, matrix_idx, raft::row_major> indices, \
raft::device_matrix_view<distance_t, matrix_idx, raft::row_major> distances, \
cuvs::distance::DistanceType metric); \
\
extern template void cuvs::neighbors::refine<idx_t, data_t, distance_t, matrix_idx>( \
raft::resources const& handle, \
raft::host_matrix_view<const data_t, matrix_idx, raft::row_major> dataset, \
raft::host_matrix_view<const data_t, matrix_idx, raft::row_major> queries, \
raft::host_matrix_view<const idx_t, matrix_idx, raft::row_major> neighbor_candidates, \
raft::host_matrix_view<idx_t, matrix_idx, raft::row_major> indices, \
raft::host_matrix_view<distance_t, matrix_idx, raft::row_major> distances, \
cuvs::distance::DistanceType metric);
instantiate_raft_neighbors_refine(int64_t, float, float, int64_t);
instantiate_raft_neighbors_refine(int64_t, int8_t, float, int64_t);
instantiate_raft_neighbors_refine(int64_t, uint8_t, float, int64_t);
#undef instantiate_raft_neighbors_refine
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ivf_flat_serialize.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/ivf_flat_serialize.cuh"
namespace cuvs::neighbors::ivf_flat {
/**
* \defgroup ivf_flat_serialize IVF-Flat Serialize
* @{
*/
/**
* Write the index to an output stream
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create an output stream
* std::ostream os(std::cout.rdbuf());
* // create an index with `auto index = ivf_flat::build(...);`
* raft::serialize(handle, os, index);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle the raft handle
* @param[in] os output stream
* @param[in] index IVF-Flat index
*
*/
template <typename T, typename IdxT>
void serialize(raft::resources const& handle, std::ostream& os, const index<T, IdxT>& index)
{
detail::serialize(handle, os, index);
}
/**
* Save the index to file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create a string with a filepath
* std::string filename("/path/to/index");
* // create an index with `auto index = ivf_flat::build(...);`
* raft::serialize(handle, filename, index);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle the raft handle
* @param[in] filename the file name for saving the index
* @param[in] index IVF-Flat index
*
*/
template <typename T, typename IdxT>
void serialize(raft::resources const& handle,
const std::string& filename,
const index<T, IdxT>& index)
{
detail::serialize(handle, filename, index);
}
/**
* Load index from input stream
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create an input stream
* std::istream is(std::cin.rdbuf());
* using T = float; // data element type
* using IdxT = int; // type of the index
* auto index = raft::deserialize<T, IdxT>(handle, is);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle the raft handle
* @param[in] is input stream
*
* @return cuvs::neighbors::ivf_flat::index<T, IdxT>
*/
template <typename T, typename IdxT>
index<T, IdxT> deserialize(raft::resources const& handle, std::istream& is)
{
return detail::deserialize<T, IdxT>(handle, is);
}
/**
* Load index from file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create a string with a filepath
* std::string filename("/path/to/index");
* using T = float; // data element type
* using IdxT = int; // type of the index
* auto index = raft::deserialize<T, IdxT>(handle, filename);
* @endcode
*
* @tparam T data element type
* @tparam IdxT type of the indices
*
* @param[in] handle the raft handle
* @param[in] filename the name of the file that stores the index
*
* @return cuvs::neighbors::ivf_flat::index<T, IdxT>
*/
template <typename T, typename IdxT>
index<T, IdxT> deserialize(raft::resources const& handle, const std::string& filename)
{
return detail::deserialize<T, IdxT>(handle, filename);
}
/**@}*/
} // namespace cuvs::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ivf_flat.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "ivf_flat-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "ivf_flat-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ivf_pq_serialize.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/ivf_pq_serialize.cuh"
namespace cuvs::neighbors::ivf_pq {
/**
* \defgroup ivf_pq_serialize IVF-PQ Serialize
* @{
*/
/**
* Write the index to an output stream
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create an output stream
* std::ostream os(std::cout.rdbuf());
* // create an index with `auto index = ivf_pq::build(...);`
* raft::serialize(handle, os, index);
* @endcode
*
* @tparam IdxT type of the index
*
* @param[in] handle the raft handle
* @param[in] os output stream
* @param[in] index IVF-PQ index
*
*/
template <typename IdxT>
void serialize(raft::resources const& handle, std::ostream& os, const index<IdxT>& index)
{
detail::serialize(handle, os, index);
}
/**
* Save the index to file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create a string with a filepath
* std::string filename("/path/to/index");
* // create an index with `auto index = ivf_pq::build(...);`
* raft::serialize(handle, filename, index);
* @endcode
*
* @tparam IdxT type of the index
*
* @param[in] handle the raft handle
* @param[in] filename the file name for saving the index
* @param[in] index IVF-PQ index
*
*/
template <typename IdxT>
void serialize(raft::resources const& handle, const std::string& filename, const index<IdxT>& index)
{
detail::serialize(handle, filename, index);
}
/**
* Load index from input stream
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create an input stream
* std::istream is(std::cin.rdbuf());
* using IdxT = int; // type of the index
* auto index = raft::deserialize<IdxT>(handle, is);
* @endcode
*
* @tparam IdxT type of the index
*
* @param[in] handle the raft handle
* @param[in] is input stream
*
* @return cuvs::neighbors::ivf_pq::index<IdxT>
*/
template <typename IdxT>
index<IdxT> deserialize(raft::resources const& handle, std::istream& is)
{
return detail::deserialize<IdxT>(handle, is);
}
/**
* Load index from file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // create a string with a filepath
* std::string filename("/path/to/index");
* using IdxT = int; // type of the index
* auto index = raft::deserialize<IdxT>(handle, filename);
* @endcode
*
* @tparam IdxT type of the index
*
* @param[in] handle the raft handle
* @param[in] filename the name of the file that stores the index
*
* @return cuvs::neighbors::ivf_pq::index<IdxT>
*/
template <typename IdxT>
index<IdxT> deserialize(raft::resources const& handle, const std::string& filename)
{
return detail::deserialize<IdxT>(handle, filename);
}
/**@}*/
} // namespace cuvs::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/epsilon_neighborhood.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __EPSILON_NEIGH_H
#define __EPSILON_NEIGH_H
#pragma once
#include <cuvs/spatial/knn/detail/epsilon_neighborhood.cuh>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
namespace cuvs::neighbors::epsilon_neighborhood {
/**
* @brief Computes epsilon neighborhood for the L2-Squared distance metric
*
* @tparam value_t IO and math type
* @tparam idx_t Index type
*
* @param[out] adj adjacency matrix [row-major] [on device] [dim = m x n]
* @param[out] vd vertex degree array [on device] [len = m + 1]
* `vd + m` stores the total number of edges in the adjacency
* matrix. Pass a nullptr if you don't need this info.
* @param[in] x first matrix [row-major] [on device] [dim = m x k]
* @param[in] y second matrix [row-major] [on device] [dim = n x k]
* @param[in] m number of rows in x
* @param[in] n number of rows in y
* @param[in] k number of columns in x and k
* @param[in] eps defines epsilon neighborhood radius (should be passed as
* squared as we compute L2-squared distance in this method)
* @param[in] stream cuda stream
*/
template <typename value_t, typename idx_t>
void epsUnexpL2SqNeighborhood(bool* adj,
idx_t* vd,
const value_t* x,
const value_t* y,
idx_t m,
idx_t n,
idx_t k,
value_t eps,
cudaStream_t stream)
{
spatial::knn::detail::epsUnexpL2SqNeighborhood<value_t, idx_t>(
adj, vd, x, y, m, n, k, eps, stream);
}
/**
* @defgroup epsilon_neighbors Epislon Neighborhood Operations
* @{
*/
/**
* @brief Computes epsilon neighborhood for the L2-Squared distance metric and given ball size.
* The epsilon neighbors is represented by a dense boolean adjacency matrix of size m * n and
* an array of degrees for each vertex, which can be used as a compressed sparse row (CSR)
* indptr array.
*
* @code{.cpp}
* #include <cuvs/neighbors/epsilon_neighborhood.cuh>
* #include <raft/core/resources.hpp>
* #include <raft/core/device_mdarray.hpp>
* using namespace cuvs::neighbors;
* raft::raft::resources handle;
* ...
* auto adj = raft::make_device_matrix<bool>(handle, m * n);
* auto vd = raft::make_device_vector<int>(handle, m+1);
* epsilon_neighborhood::eps_neighbors_l2sq(handle, x, y, adj.view(), vd.view(), eps);
* @endcode
*
* @tparam value_t IO and math type
* @tparam idx_t Index type
* @tparam matrix_idx_t matrix indexing type
*
* @param[in] handle raft handle to manage library resources
* @param[in] x first matrix [row-major] [on device] [dim = m x k]
* @param[in] y second matrix [row-major] [on device] [dim = n x k]
* @param[out] adj adjacency matrix [row-major] [on device] [dim = m x n]
* @param[out] vd vertex degree array [on device] [len = m + 1]
* `vd + m` stores the total number of edges in the adjacency
* matrix. Pass a nullptr if you don't need this info.
* @param[in] eps defines epsilon neighborhood radius (should be passed as
* squared as we compute L2-squared distance in this method)
*/
template <typename value_t, typename idx_t, typename matrix_idx_t>
void eps_neighbors_l2sq(raft::resources const& handle,
raft::device_matrix_view<const value_t, matrix_idx_t, raft::row_major> x,
raft::device_matrix_view<const value_t, matrix_idx_t, raft::row_major> y,
raft::device_matrix_view<bool, matrix_idx_t, raft::row_major> adj,
raft::device_vector_view<idx_t, matrix_idx_t> vd,
value_t eps)
{
epsUnexpL2SqNeighborhood<value_t, idx_t>(adj.data_handle(),
vd.data_handle(),
x.data_handle(),
y.data_handle(),
x.extent(0),
y.extent(0),
x.extent(1),
eps,
resource::get_cuda_stream(handle));
}
/** @} */ // end group epsilon_neighbors
} // namespace cuvs::neighbors::epsilon_neighborhood
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ivf_pq-ext.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // int64_t
#include <cuvs/neighbors/ivf_pq_types.hpp> // cuvs::neighbors::ivf_pq::index
#include <raft/core/device_mdspan.hpp> // raft::device_matrix_view
#include <raft/core/resources.hpp> // raft::resources
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#include <rmm/mr/device/per_device_resource.hpp> // rmm::mr::device_memory_resource
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace cuvs::neighbors::ivf_pq {
template <typename T, typename IdxT = uint32_t>
index<IdxT> build(raft::resources const& handle,
const index_params& params,
raft::device_matrix_view<const T, IdxT, raft::row_major> dataset) RAFT_EXPLICIT;
template <typename T, typename IdxT>
index<IdxT> extend(
raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, raft::row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT, raft::row_major>> new_indices,
const index<IdxT>& idx) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
raft::device_matrix_view<const T, IdxT, raft::row_major> new_vectors,
std::optional<raft::device_vector_view<const IdxT, IdxT, raft::row_major>> new_indices,
index<IdxT>* idx) RAFT_EXPLICIT;
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
raft::device_matrix_view<const T, uint32_t, raft::row_major> queries,
raft::device_matrix_view<IdxT, uint32_t, raft::row_major> neighbors,
raft::device_matrix_view<float, uint32_t, raft::row_major> distances,
IvfSampleFilterT sample_filter) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const search_params& params,
const index<IdxT>& idx,
raft::device_matrix_view<const T, uint32_t, raft::row_major> queries,
raft::device_matrix_view<IdxT, uint32_t, raft::row_major> neighbors,
raft::device_matrix_view<float, uint32_t, raft::row_major> distances) RAFT_EXPLICIT;
template <typename T, typename IdxT = uint32_t>
auto build(raft::resources const& handle,
const index_params& params,
const T* dataset,
IdxT n_rows,
uint32_t dim) -> index<IdxT> RAFT_EXPLICIT;
template <typename T, typename IdxT>
auto extend(raft::resources const& handle,
const index<IdxT>& idx,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows) -> index<IdxT> RAFT_EXPLICIT;
template <typename T, typename IdxT>
void extend(raft::resources const& handle,
index<IdxT>* idx,
const T* new_vectors,
const IdxT* new_indices,
IdxT n_rows) RAFT_EXPLICIT;
template <typename T, typename IdxT, typename IvfSampleFilterT>
void search_with_filtering(raft::resources const& handle,
const cuvs::neighbors::ivf_pq::search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
IvfSampleFilterT sample_filter = IvfSampleFilterT{}) RAFT_EXPLICIT;
template <typename T, typename IdxT>
void search(raft::resources const& handle,
const cuvs::neighbors::ivf_pq::search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances) RAFT_EXPLICIT;
template <typename T, typename IdxT, typename IvfSampleFilterT>
[[deprecated(
"Drop the `mr` argument and use `raft::resource::set_workspace_resource` instead")]] void
search_with_filtering(raft::resources const& handle,
const cuvs::neighbors::ivf_pq::search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr,
IvfSampleFilterT sample_filter = IvfSampleFilterT{}) RAFT_EXPLICIT;
template <typename T, typename IdxT>
[[deprecated(
"Drop the `mr` argument and use `raft::resource::set_workspace_resource` instead")]] void
search(raft::resources const& handle,
const cuvs::neighbors::ivf_pq::search_params& params,
const index<IdxT>& idx,
const T* queries,
uint32_t n_queries,
uint32_t k,
IdxT* neighbors,
float* distances,
rmm::mr::device_memory_resource* mr) RAFT_EXPLICIT;
} // namespace cuvs::neighbors::ivf_pq
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_neighbors_ivf_pq_build(T, IdxT) \
extern template cuvs::neighbors::ivf_pq::index<IdxT> cuvs::neighbors::ivf_pq::build<T, IdxT>( \
raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::index_params& params, \
raft::device_matrix_view<const T, IdxT, raft::row_major> dataset); \
\
extern template auto cuvs::neighbors::ivf_pq::build( \
raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::index_params& params, \
const T* dataset, \
IdxT n_rows, \
uint32_t dim) \
->cuvs::neighbors::ivf_pq::index<IdxT>;
instantiate_raft_neighbors_ivf_pq_build(float, int64_t);
instantiate_raft_neighbors_ivf_pq_build(int8_t, int64_t);
instantiate_raft_neighbors_ivf_pq_build(uint8_t, int64_t);
#undef instantiate_raft_neighbors_ivf_pq_build
#define instantiate_raft_neighbors_ivf_pq_extend(T, IdxT) \
extern template cuvs::neighbors::ivf_pq::index<IdxT> cuvs::neighbors::ivf_pq::extend<T, IdxT>( \
raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, raft::row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT, raft::row_major>> new_indices, \
const cuvs::neighbors::ivf_pq::index<IdxT>& idx); \
\
extern template void cuvs::neighbors::ivf_pq::extend<T, IdxT>( \
raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, raft::row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT, raft::row_major>> new_indices, \
cuvs::neighbors::ivf_pq::index<IdxT>* idx); \
\
extern template auto cuvs::neighbors::ivf_pq::extend<T, IdxT>( \
raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::index<IdxT>& idx, \
const T* new_vectors, \
const IdxT* new_indices, \
IdxT n_rows) \
->cuvs::neighbors::ivf_pq::index<IdxT>; \
\
extern template void cuvs::neighbors::ivf_pq::extend<T, IdxT>( \
raft::resources const& handle, \
cuvs::neighbors::ivf_pq::index<IdxT>* idx, \
const T* new_vectors, \
const IdxT* new_indices, \
IdxT n_rows);
instantiate_raft_neighbors_ivf_pq_extend(float, int64_t);
instantiate_raft_neighbors_ivf_pq_extend(int8_t, int64_t);
instantiate_raft_neighbors_ivf_pq_extend(uint8_t, int64_t);
#undef instantiate_raft_neighbors_ivf_pq_extend
#define instantiate_raft_neighbors_ivf_pq_search(T, IdxT) \
extern template void cuvs::neighbors::ivf_pq::search<T, IdxT>( \
raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::search_params& params, \
const cuvs::neighbors::ivf_pq::index<IdxT>& idx, \
raft::device_matrix_view<const T, uint32_t, raft::row_major> queries, \
raft::device_matrix_view<IdxT, uint32_t, raft::row_major> neighbors, \
raft::device_matrix_view<float, uint32_t, raft::row_major> distances); \
\
extern template void cuvs::neighbors::ivf_pq::search<T, IdxT>( \
raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::search_params& params, \
const cuvs::neighbors::ivf_pq::index<IdxT>& idx, \
const T* queries, \
uint32_t n_queries, \
uint32_t k, \
IdxT* neighbors, \
float* distances, \
rmm::mr::device_memory_resource* mr); \
\
extern template void cuvs::neighbors::ivf_pq::search<T, IdxT>( \
raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::search_params& params, \
const cuvs::neighbors::ivf_pq::index<IdxT>& idx, \
const T* queries, \
uint32_t n_queries, \
uint32_t k, \
IdxT* neighbors, \
float* distances)
instantiate_raft_neighbors_ivf_pq_search(float, int64_t);
instantiate_raft_neighbors_ivf_pq_search(int8_t, int64_t);
instantiate_raft_neighbors_ivf_pq_search(uint8_t, int64_t);
#undef instantiate_raft_neighbors_ivf_pq_search
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ann_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/distance_types.hpp>
namespace cuvs::neighbors::ann {
/**
* @defgroup ann_types Approximate Nearest Neighbors Types
* @{
*/
/** The base for approximate KNN index structures. */
struct index {};
/** The base for KNN index parameters. */
struct index_params {
/** Distance type. */
cuvs::distance::DistanceType metric = distance::DistanceType::L2Expanded;
/** The argument used by some distance metrics. */
float metric_arg = 2.0f;
/**
* Whether to add the dataset content to the index, i.e.:
*
* - `true` means the index is filled with the dataset vectors and ready to search after calling
* `build`.
* - `false` means `build` only trains the underlying model (e.g. quantizer or clustering), but
* the index is left empty; you'd need to call `extend` on the index afterwards to populate it.
*/
bool add_data_on_build = true;
};
struct search_params {};
/** @} */ // end group ann_types
}; // namespace cuvs::neighbors::ann
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/ivf_pq.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "ivf_pq-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "ivf_pq-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/neighbors/cagra_types.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_types.hpp"
#include <raft/core/resource/cuda_stream.hpp>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/neighbors/detail/cagra/utils.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/integer_utils.hpp>
#include <memory>
#include <optional>
#include <string>
#include <thrust/fill.h>
#include <type_traits>
#include <raft/core/logger.hpp>
namespace cuvs::neighbors::cagra {
/**
* @addtogroup cagra
* @{
*/
/**
* @brief ANN algorithm used by CAGRA to build knn graph
*
*/
enum class graph_build_algo {
/* Use IVF-PQ to build all-neighbors knn graph */
IVF_PQ,
/* Experimental, use NN-Descent to build all-neighbors knn graph */
NN_DESCENT
};
struct index_params : ann::index_params {
/** Degree of input graph for pruning. */
size_t intermediate_graph_degree = 128;
/** Degree of output graph. */
size_t graph_degree = 64;
/** ANN algorithm to build knn graph. */
graph_build_algo build_algo = graph_build_algo::IVF_PQ;
/** Number of Iterations to run if building with NN_DESCENT */
size_t nn_descent_niter = 20;
};
enum class search_algo {
/** For large batch sizes. */
SINGLE_CTA,
/** For small batch sizes. */
MULTI_CTA,
MULTI_KERNEL,
AUTO
};
enum class hash_mode { HASH, SMALL, AUTO };
struct search_params : ann::search_params {
/** Maximum number of queries to search at the same time (batch size). Auto select when 0.*/
size_t max_queries = 0;
/** Number of intermediate search results retained during the search.
*
* This is the main knob to adjust trade off between accuracy and search speed.
* Higher values improve the search accuracy.
*/
size_t itopk_size = 64;
/** Upper limit of search iterations. Auto select when 0.*/
size_t max_iterations = 0;
// In the following we list additional search parameters for fine tuning.
// Reasonable default values are automatically chosen.
/** Which search implementation to use. */
search_algo algo = search_algo::AUTO;
/** Number of threads used to calculate a single distance. 4, 8, 16, or 32. */
size_t team_size = 0;
/** Number of graph nodes to select as the starting point for the search in each iteration. aka
* search width?*/
size_t search_width = 1;
/** Lower limit of search iterations. */
size_t min_iterations = 0;
/** Thread block size. 0, 64, 128, 256, 512, 1024. Auto selection when 0. */
size_t thread_block_size = 0;
/** Hashmap type. Auto selection when AUTO. */
hash_mode hashmap_mode = hash_mode::AUTO;
/** Lower limit of hashmap bit length. More than 8. */
size_t hashmap_min_bitlen = 0;
/** Upper limit of hashmap fill rate. More than 0.1, less than 0.9.*/
float hashmap_max_fill_rate = 0.5;
/** Number of iterations of initial random seed node selection. 1 or more. */
uint32_t num_random_samplings = 1;
/** Bit mask used for initial random seed node selection. */
uint64_t rand_xor_mask = 0x128394;
};
static_assert(std::is_aggregate_v<index_params>);
static_assert(std::is_aggregate_v<search_params>);
/**
* @brief CAGRA index.
*
* The index stores the dataset and a kNN graph in device memory.
*
* @tparam T data element type
* @tparam IdxT type of the vector indices (represent dataset.extent(0))
*
*/
template <typename T, typename IdxT>
struct index : ann::index {
static_assert(!raft::is_narrowing_v<uint32_t, IdxT>,
"IdxT must be able to represent all values of uint32_t");
public:
/** Distance metric used for clustering. */
[[nodiscard]] constexpr inline auto metric() const noexcept -> cuvs::distance::DistanceType
{
return metric_;
}
/** Total length of the index (number of vectors). */
[[nodiscard]] constexpr inline auto size() const noexcept -> IdxT
{
return dataset_view_.extent(0);
}
/** Dimensionality of the data. */
[[nodiscard]] constexpr inline auto dim() const noexcept -> uint32_t
{
return dataset_view_.extent(1);
}
/** Graph degree */
[[nodiscard]] constexpr inline auto graph_degree() const noexcept -> uint32_t
{
return graph_view_.extent(1);
}
/** Dataset [size, dim] */
[[nodiscard]] inline auto dataset() const noexcept
-> raft::device_matrix_view<const T, int64_t, raft::layout_stride>
{
return dataset_view_;
}
/** neighborhood graph [size, graph-degree] */
[[nodiscard]] inline auto graph() const noexcept
-> raft::device_matrix_view<const IdxT, int64_t, raft::row_major>
{
return graph_view_;
}
// Don't allow copying the index for performance reasons (try avoiding copying data)
index(const index&) = delete;
index(index&&) = default;
auto operator=(const index&) -> index& = delete;
auto operator=(index&&) -> index& = default;
~index() = default;
/** Construct an empty index. */
index(raft::resources const& res,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Expanded)
: ann::index(),
metric_(metric),
dataset_(raft::make_device_matrix<T, int64_t>(res, 0, 0)),
graph_(raft::make_device_matrix<IdxT, int64_t>(res, 0, 0))
{
}
/** Construct an index from dataset and knn_graph arrays
*
* If the dataset and graph is already in GPU memory, then the index is just a thin wrapper around
* these that stores a non-owning a reference to the arrays.
*
* The constructor also accepts host arrays. In that case they are copied to the device, and the
* device arrays will be owned by the index.
*
* In case the dasates rows are not 16 bytes aligned, then we create a padded copy in device
* memory to ensure alignment for vectorized load.
*
* Usage examples:
*
* - Cagra index is normally created by the cagra::build
* @code{.cpp}
* using namespace cuvs::neighbors::experimental;
* auto dataset = raft::make_host_matrix<float, int64_t>(n_rows, n_cols);
* load_dataset(dataset.view());
* // use default index parameters
* cagra::index_params index_params;
* // create and fill the index from a [N, D] dataset
* auto index = cagra::build(res, index_params, dataset);
* // use default search parameters
* cagra::search_params search_params;
* // search K nearest neighbours
* auto neighbors = raft::make_device_matrix<uint32_t, int64_t>(res, n_queries, k);
* auto distances = raft::make_device_matrix<float, int64_t>(res, n_queries, k);
* cagra::search(res, search_params, index, queries, neighbors, distances);
* @endcode
* In the above example, we have passed a host dataset to build. The returned index will own a
* device copy of the dataset and the knn_graph. In contrast, if we pass the dataset as a
* raft::device_mdspan to build, then it will only store a reference to it.
*
* - Constructing index using existing knn-graph
* @code{.cpp}
* using namespace cuvs::neighbors::experimental;
*
* auto dataset = raft::make_device_matrix<float, int64_t>(res, n_rows, n_cols);
* auto knn_graph = raft::make_device_matrix<uint32_n, int64_t>(res, n_rows, graph_degree);
*
* // custom loading and graph creation
* // load_dataset(dataset.view());
* // create_knn_graph(knn_graph.view());
*
* // Wrap the existing device arrays into an index structure
* cagra::index<T, IdxT> index(res, metric, raft::make_const_mdspan(dataset.view()),
* raft::make_const_mdspan(knn_graph.view()));
*
* // Both knn_graph and dataset objects have to be in scope while the index is used because
* // the index only stores a reference to these.
* cagra::search(res, search_params, index, queries, neighbors, distances);
* @endcode
*
*/
template <typename data_accessor, typename graph_accessor>
index(raft::resources const& res,
cuvs::distance::DistanceType metric,
raft::mdspan<const T, raft::matrix_extent<int64_t>, raft::row_major, data_accessor> dataset,
raft::mdspan<const IdxT, raft::matrix_extent<int64_t>, raft::row_major, graph_accessor>
knn_graph)
: ann::index(),
metric_(metric),
dataset_(raft::make_device_matrix<T, int64_t>(res, 0, 0)),
graph_(raft::make_device_matrix<IdxT, int64_t>(res, 0, 0))
{
RAFT_EXPECTS(dataset.extent(0) == knn_graph.extent(0),
"Dataset and knn_graph must have equal number of rows");
update_dataset(res, dataset);
update_graph(res, knn_graph);
raft::resource::sync_stream(res);
}
/**
* Replace the dataset with a new dataset.
*
* If the new dataset rows are aligned on 16 bytes, then only a reference is stored to the
* dataset. It is the caller's responsibility to ensure that dataset stays alive as long as the
* index.
*/
void update_dataset(raft::resources const& res,
raft::device_matrix_view<const T, int64_t, raft::row_major> dataset)
{
if (dataset.extent(1) * sizeof(T) % 16 != 0) {
RAFT_LOG_DEBUG("Creating a padded copy of CAGRA dataset in device memory");
copy_padded(res, dataset);
} else {
dataset_view_ = raft::make_device_strided_matrix_view<const T, int64_t>(
dataset.data_handle(), dataset.extent(0), dataset.extent(1), dataset.extent(1));
}
}
/**
* Replace the dataset with a new dataset.
*
* We create a copy of the dataset on the device. The index manages the lifetime of this copy.
*/
void update_dataset(raft::resources const& res,
raft::host_matrix_view<const T, int64_t, raft::row_major> dataset)
{
RAFT_LOG_DEBUG("Copying CAGRA dataset from host to device");
copy_padded(res, dataset);
}
/**
* Replace the graph with a new graph.
*
* Since the new graph is a device array, we store a reference to that, and it is
* the caller's responsibility to ensure that knn_graph stays alive as long as the index.
*/
void update_graph(raft::resources const& res,
raft::device_matrix_view<const IdxT, int64_t, raft::row_major> knn_graph)
{
graph_view_ = knn_graph;
}
/**
* Replace the graph with a new graph.
*
* We create a copy of the graph on the device. The index manages the lifetime of this copy.
*/
void update_graph(raft::resources const& res,
raft::host_matrix_view<const IdxT, int64_t, raft::row_major> knn_graph)
{
RAFT_LOG_DEBUG("Copying CAGRA knn graph from host to device");
if ((graph_.extent(0) != knn_graph.extent(0)) || (graph_.extent(1) != knn_graph.extent(1))) {
// clear existing memory before allocating to prevent OOM errors on large graphs
if (graph_.size()) { graph_ = raft::make_device_matrix<IdxT, int64_t>(res, 0, 0); }
graph_ =
raft::make_device_matrix<IdxT, int64_t>(res, knn_graph.extent(0), knn_graph.extent(1));
}
raft::copy(graph_.data_handle(),
knn_graph.data_handle(),
knn_graph.size(),
raft::resource::get_cuda_stream(res));
graph_view_ = graph_.view();
}
private:
/** Create a device copy of the dataset, and pad it if necessary. */
template <typename data_accessor>
void copy_padded(
raft::resources const& res,
raft::mdspan<const T, raft::matrix_extent<int64_t>, raft::row_major, data_accessor> dataset)
{
detail::copy_with_padding(res, dataset_, dataset);
dataset_view_ = raft::make_device_strided_matrix_view<const T, int64_t>(
dataset_.data_handle(), dataset_.extent(0), dataset.extent(1), dataset_.extent(1));
RAFT_LOG_DEBUG("CAGRA dataset strided matrix view %zux%zu, stride %zu",
static_cast<size_t>(dataset_view_.extent(0)),
static_cast<size_t>(dataset_view_.extent(1)),
static_cast<size_t>(dataset_view_.stride(0)));
}
cuvs::distance::DistanceType metric_;
raft::device_matrix<T, int64_t, raft::row_major> dataset_;
raft::device_matrix<IdxT, int64_t, raft::row_major> graph_;
raft::device_matrix_view<const T, int64_t, raft::layout_stride> dataset_view_;
raft::device_matrix_view<const IdxT, int64_t, raft::row_major> graph_view_;
};
/** @} */
} // namespace cuvs::neighbors::cagra
// TODO: Remove deprecated experimental namespace in 23.12 release
namespace cuvs::neighbors::experimental::cagra {
using cuvs::neighbors::cagra::graph_build_algo;
using cuvs::neighbors::cagra::hash_mode;
using cuvs::neighbors::cagra::index;
using cuvs::neighbors::cagra::index_params;
using cuvs::neighbors::cagra::search_algo;
using cuvs::neighbors::cagra::search_params;
} // namespace cuvs::neighbors::experimental::cagra
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.