repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/spectral/modularity_maximization.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MODULARITY_MAXIMIZATION_H
#define __MODULARITY_MAXIMIZATION_H
#pragma once
#include <tuple>
#include <raft/spectral/detail/modularity_maximization.hpp>
namespace raft {
namespace spectral {
// =========================================================
// Spectral modularity_maximization
// =========================================================
/** Compute partition for a weighted undirected graph. This
* partition attempts to minimize the cost function:
* Cost = \f$sum_i\f$ (Edges cut by ith partition)/(Vertices in ith partition)
*
* @param handle raft handle for managing expensive resources
* @param csr_m Weighted graph in CSR format
* @param eigen_solver Eigensolver implementation
* @param cluster_solver Cluster solver implementation
* @param clusters (Output, device memory, n entries) Partition
* assignments.
* @param eigVals Output eigenvalue array pointer on device
* @param eigVecs Output eigenvector array pointer on device
* @return statistics: number of eigensolver iterations, .
*/
template <typename vertex_t, typename weight_t, typename EigenSolver, typename ClusterSolver>
std::tuple<vertex_t, weight_t, vertex_t> modularity_maximization(
raft::resources const& handle,
matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
EigenSolver const& eigen_solver,
ClusterSolver const& cluster_solver,
vertex_t* __restrict__ clusters,
weight_t* eigVals,
weight_t* eigVecs)
{
return raft::spectral::detail::
modularity_maximization<vertex_t, weight_t, EigenSolver, ClusterSolver>(
handle, csr_m, eigen_solver, cluster_solver, clusters, eigVals, eigVecs);
}
//===================================================
// Analysis of graph partition
// =========================================================
/// Compute modularity
/** This function determines the modularity based on a graph and cluster assignments
* @param handle raft handle for managing expensive resources
* @param csr_m Weighted graph in CSR format
* @param nClusters Number of clusters.
* @param clusters (Input, device memory, n entries) Cluster assignments.
* @param modularity On exit, modularity
*/
template <typename vertex_t, typename weight_t>
void analyzeModularity(raft::resources const& handle,
matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
vertex_t nClusters,
vertex_t const* __restrict__ clusters,
weight_t& modularity)
{
raft::spectral::detail::analyzeModularity<vertex_t, weight_t>(
handle, csr_m, nClusters, clusters, modularity);
}
} // namespace spectral
} // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/spectral/cluster_solvers_deprecated.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Note: This file is deprecated and will be removed in a future release
* Please use include/raft/cluster/kmeans.cuh instead
*/
#ifndef __CLUSTER_SOLVERS_deprecated_H
#define __CLUSTER_SOLVERS_deprecated_H
#pragma once
#include <raft/cluster/kmeans_deprecated.cuh>
#include <utility> // for std::pair
namespace raft {
namespace spectral {
using namespace matrix;
// aggregate of control params for Eigen Solver:
//
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct cluster_solver_config_deprecated_t {
size_type_t n_clusters;
size_type_t maxIter;
value_type_t tol;
unsigned long long seed{123456};
};
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct kmeans_solver_deprecated_t {
explicit kmeans_solver_deprecated_t(
cluster_solver_config_deprecated_t<index_type_t, value_type_t, size_type_t> const& config)
: config_(config)
{
}
std::pair<value_type_t, index_type_t> solve(raft::resources const& handle,
size_type_t n_obs_vecs,
size_type_t dim,
value_type_t const* __restrict__ obs,
index_type_t* __restrict__ codes) const
{
RAFT_EXPECTS(obs != nullptr, "Null obs buffer.");
RAFT_EXPECTS(codes != nullptr, "Null codes buffer.");
value_type_t residual{};
index_type_t iters{};
raft::cluster::kmeans(handle,
n_obs_vecs,
dim,
config_.n_clusters,
config_.tol,
config_.maxIter,
obs,
codes,
residual,
iters,
config_.seed);
return std::make_pair(residual, iters);
}
auto const& get_config(void) const { return config_; }
private:
cluster_solver_config_deprecated_t<index_type_t, value_type_t, size_type_t> config_;
};
} // namespace spectral
} // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/spectral/partition.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __PARTITION_H
#define __PARTITION_H
#pragma once
#include <tuple>
#include <raft/spectral/detail/partition.hpp>
namespace raft {
namespace spectral {
// =========================================================
// Spectral partitioner
// =========================================================
/// Compute spectral graph partition
/** Compute partition for a weighted undirected graph. This
* partition attempts to minimize the cost function:
* Cost = \f$sum_i\f$ (Edges cut by ith partition)/(Vertices in ith partition)
*
* @param handle raft handle for managing expensive resources
* @param csr_m Weighted graph in CSR format
* @param eigen_solver Eigensolver implementation
* @param cluster_solver Cluster solver implementation
* @param clusters (Output, device memory, n entries) Partition
* assignments.
* @param eigVals Output eigenvalue array pointer on device
* @param eigVecs Output eigenvector array pointer on device
* @return statistics: number of eigensolver iterations, .
*/
template <typename vertex_t, typename weight_t, typename EigenSolver, typename ClusterSolver>
std::tuple<vertex_t, weight_t, vertex_t> partition(
raft::resources const& handle,
matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
EigenSolver const& eigen_solver,
ClusterSolver const& cluster_solver,
vertex_t* __restrict__ clusters,
weight_t* eigVals,
weight_t* eigVecs)
{
return raft::spectral::detail::partition<vertex_t, weight_t, EigenSolver, ClusterSolver>(
handle, csr_m, eigen_solver, cluster_solver, clusters, eigVals, eigVecs);
}
// =========================================================
// Analysis of graph partition
// =========================================================
/// Compute cost function for partition
/** This function determines the edges cut by a partition and a cost
* function:
* Cost = \f$sum_i\f$ (Edges cut by ith partition)/(Vertices in ith partition)
* Graph is assumed to be weighted and undirected.
*
* @param handle raft handle for managing expensive resources
* @param csr_m Weighted graph in CSR format
* @param nClusters Number of partitions.
* @param clusters (Input, device memory, n entries) Partition
* assignments.
* @param edgeCut On exit, weight of edges cut by partition.
* @param cost On exit, partition cost function.
*/
template <typename vertex_t, typename weight_t>
void analyzePartition(raft::resources const& handle,
matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
vertex_t nClusters,
const vertex_t* __restrict__ clusters,
weight_t& edgeCut,
weight_t& cost)
{
raft::spectral::detail::analyzePartition<vertex_t, weight_t>(
handle, csr_m, nClusters, clusters, edgeCut, cost);
}
} // namespace spectral
} // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/spectral | rapidsai_public_repos/raft/cpp/include/raft/spectral/detail/warn_dbg.hpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdexcept>
#include <string>
#include <raft/core/detail/macros.hpp>
#ifdef DEBUG
#define COUT() (std::cout)
#define CERR() (std::cerr)
// nope:
//
#define WARNING(message) \
do { \
std::stringstream ss; \
ss << "Warning (" << __FILE__ << ":" << __LINE__ << "): " << message; \
CERR() << ss.str() << std::endl; \
} while (0)
#else // DEBUG
#define WARNING(message)
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spectral | rapidsai_public_repos/raft/cpp/include/raft/spectral/detail/spectral_util.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/spectral/matrix_wrappers.hpp>
#include <raft/util/cudart_utils.hpp>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <algorithm>
namespace raft {
namespace spectral {
template <typename index_type_t, typename value_type_t>
RAFT_KERNEL scale_obs_kernel(index_type_t m, index_type_t n, value_type_t* obs)
{
index_type_t i, j, k, index, mm;
value_type_t alpha, v, last;
bool valid;
// ASSUMPTION: kernel is launched with either 2, 4, 8, 16 or 32 threads in x-dimension
// compute alpha
mm = (((m + blockDim.x - 1) / blockDim.x) * blockDim.x); // m in multiple of blockDim.x
alpha = 0.0;
for (j = threadIdx.y + blockIdx.y * blockDim.y; j < n; j += blockDim.y * gridDim.y) {
for (i = threadIdx.x; i < mm; i += blockDim.x) {
// check if the thread is valid
valid = i < m;
// get the value of the last thread
last = __shfl_sync(warp_full_mask(), alpha, blockDim.x - 1, blockDim.x);
// if you are valid read the value from memory, otherwise set your value to 0
alpha = (valid) ? obs[i + j * m] : 0.0;
alpha = alpha * alpha;
// do prefix sum (of size warpSize=blockDim.x =< 32)
for (k = 1; k < blockDim.x; k *= 2) {
v = __shfl_up_sync(warp_full_mask(), alpha, k, blockDim.x);
if (threadIdx.x >= k) alpha += v;
}
// shift by last
alpha += last;
}
}
// scale by alpha
alpha = __shfl_sync(warp_full_mask(), alpha, blockDim.x - 1, blockDim.x);
alpha = raft::sqrt(alpha);
for (j = threadIdx.y + blockIdx.y * blockDim.y; j < n; j += blockDim.y * gridDim.y) {
for (i = threadIdx.x; i < m; i += blockDim.x) { // blockDim.x=32
index = i + j * m;
obs[index] = obs[index] / alpha;
}
}
}
template <typename index_type_t>
index_type_t next_pow2(index_type_t n)
{
index_type_t v;
// Reference:
// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2Float
v = n - 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return v + 1;
}
template <typename index_type_t, typename value_type_t>
cudaError_t scale_obs(index_type_t m, index_type_t n, value_type_t* obs)
{
index_type_t p2m;
// find next power of 2
p2m = next_pow2<index_type_t>(m);
// setup launch configuration
unsigned int xsize = std::max(2, std::min(p2m, 32));
dim3 nthreads{xsize, 256 / xsize, 1};
dim3 nblocks{1, (n + nthreads.y - 1) / nthreads.y, 1};
// launch scaling kernel (scale each column of obs by its norm)
scale_obs_kernel<index_type_t, value_type_t><<<nblocks, nthreads>>>(m, n, obs);
return cudaSuccess;
}
template <typename vertex_t, typename edge_t, typename weight_t>
void transform_eigen_matrix(raft::resources const& handle,
edge_t n,
vertex_t nEigVecs,
weight_t* eigVecs)
{
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
auto thrust_exec_policy = resource::get_thrust_policy(handle);
const weight_t zero{0.0};
const weight_t one{1.0};
// Whiten eigenvector matrix
for (auto i = 0; i < nEigVecs; ++i) {
weight_t mean, std;
mean = thrust::reduce(thrust_exec_policy,
thrust::device_pointer_cast(eigVecs + IDX(0, i, n)),
thrust::device_pointer_cast(eigVecs + IDX(0, i + 1, n)));
RAFT_CHECK_CUDA(stream);
mean /= n;
thrust::transform(thrust_exec_policy,
thrust::device_pointer_cast(eigVecs + IDX(0, i, n)),
thrust::device_pointer_cast(eigVecs + IDX(0, i + 1, n)),
thrust::make_constant_iterator(mean),
thrust::device_pointer_cast(eigVecs + IDX(0, i, n)),
thrust::minus<weight_t>());
RAFT_CHECK_CUDA(stream);
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasnrm2(cublas_h, n, eigVecs + IDX(0, i, n), 1, &std, stream));
std /= std::sqrt(static_cast<weight_t>(n));
thrust::transform(thrust_exec_policy,
thrust::device_pointer_cast(eigVecs + IDX(0, i, n)),
thrust::device_pointer_cast(eigVecs + IDX(0, i + 1, n)),
thrust::make_constant_iterator(std),
thrust::device_pointer_cast(eigVecs + IDX(0, i, n)),
thrust::divides<weight_t>());
RAFT_CHECK_CUDA(stream);
}
// Transpose eigenvector matrix
// TODO: in-place transpose
{
raft::spectral::matrix::vector_t<weight_t> work(handle, nEigVecs * n);
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgeam(cublas_h,
CUBLAS_OP_T,
CUBLAS_OP_N,
nEigVecs,
n,
&one,
eigVecs,
n,
&zero,
(weight_t*)NULL,
nEigVecs,
work.raw(),
nEigVecs,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(
eigVecs, work.raw(), nEigVecs * n * sizeof(weight_t), cudaMemcpyDeviceToDevice, stream));
}
}
namespace {
/// Functor to generate indicator vectors
/** For use in Thrust transform
*/
template <typename index_type_t, typename value_type_t>
struct equal_to_i_op {
const index_type_t i;
public:
equal_to_i_op(index_type_t _i) : i(_i) {}
template <typename Tuple_>
__host__ __device__ void operator()(Tuple_ t)
{
thrust::get<1>(t) = (thrust::get<0>(t) == i) ? (value_type_t)1.0 : (value_type_t)0.0;
}
};
} // namespace
// Construct indicator vector for ith partition
//
template <typename vertex_t, typename edge_t, typename weight_t>
bool construct_indicator(raft::resources const& handle,
edge_t index,
edge_t n,
weight_t& clustersize,
weight_t& partStats,
vertex_t const* __restrict__ clusters,
raft::spectral::matrix::vector_t<weight_t>& part_i,
raft::spectral::matrix::vector_t<weight_t>& Bx,
raft::spectral::matrix::laplacian_matrix_t<vertex_t, weight_t> const& B)
{
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
auto thrust_exec_policy = resource::get_thrust_policy(handle);
thrust::for_each(
thrust_exec_policy,
thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(clusters),
thrust::device_pointer_cast(part_i.raw()))),
thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(clusters + n),
thrust::device_pointer_cast(part_i.raw() + n))),
equal_to_i_op<vertex_t, weight_t>(index));
RAFT_CHECK_CUDA(stream);
// Compute size of ith partition
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasdot(
cublas_h, n, part_i.raw(), 1, part_i.raw(), 1, &clustersize, stream));
clustersize = round(clustersize);
if (clustersize < 0.5) { return false; }
// Compute part stats
B.mv(1, part_i.raw(), 0, Bx.raw());
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasdot(cublas_h, n, Bx.raw(), 1, part_i.raw(), 1, &partStats, stream));
return true;
}
} // namespace spectral
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spectral | rapidsai_public_repos/raft/cpp/include/raft/spectral/detail/lapack.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusolverDn.h>
#include <raft/core/error.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/detail/cusolver_wrappers.hpp>
// for now; TODO: check if/where this `define` should be;
//
#define USE_LAPACK
namespace raft {
#define lapackCheckError(status) \
{ \
if (status < 0) { \
std::stringstream ss; \
ss << "Lapack error: argument number " << -status << " had an illegal value."; \
throw exception(ss.str()); \
} else if (status > 0) \
RAFT_FAIL("Lapack error: internal error."); \
}
extern "C" void sgeqrf_(
int* m, int* n, float* a, int* lda, float* tau, float* work, int* lwork, int* info);
extern "C" void dgeqrf_(
int* m, int* n, double* a, int* lda, double* tau, double* work, int* lwork, int* info);
extern "C" void sormqr_(char* side,
char* trans,
int* m,
int* n,
int* k,
float* a,
int* lda,
const float* tau,
float* c,
int* ldc,
float* work,
int* lwork,
int* info);
extern "C" void dormqr_(char* side,
char* trans,
int* m,
int* n,
int* k,
double* a,
int* lda,
const double* tau,
double* c,
int* ldc,
double* work,
int* lwork,
int* info);
extern "C" int dgeev_(char* jobvl,
char* jobvr,
int* n,
double* a,
int* lda,
double* wr,
double* wi,
double* vl,
int* ldvl,
double* vr,
int* ldvr,
double* work,
int* lwork,
int* info);
extern "C" int sgeev_(char* jobvl,
char* jobvr,
int* n,
float* a,
int* lda,
float* wr,
float* wi,
float* vl,
int* ldvl,
float* vr,
int* ldvr,
float* work,
int* lwork,
int* info);
extern "C" cusolverStatus_t cusolverDnSgemmHost(cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const float* alpha,
const float* A,
int lda,
const float* B,
int ldb,
const float* beta,
float* C,
int ldc);
extern "C" cusolverStatus_t cusolverDnDgemmHost(cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const double* alpha,
const double* A,
int lda,
const double* B,
int ldb,
const double* beta,
double* C,
int ldc);
extern "C" cusolverStatus_t cusolverDnSsterfHost(int n, float* d, float* e, int* info);
extern "C" cusolverStatus_t cusolverDnDsterfHost(int n, double* d, double* e, int* info);
extern "C" cusolverStatus_t cusolverDnSsteqrHost(
const signed char* compz, int n, float* d, float* e, float* z, int ldz, float* work, int* info);
extern "C" cusolverStatus_t cusolverDnDsteqrHost(const signed char* compz,
int n,
double* d,
double* e,
double* z,
int ldz,
double* work,
int* info);
template <typename T>
class Lapack {
private:
Lapack();
~Lapack();
public:
static void check_lapack_enabled();
static void gemm(bool transa,
bool transb,
int m,
int n,
int k,
T alpha,
const T* A,
int lda,
const T* B,
int ldb,
T beta,
T* C,
int ldc);
// special QR for lanczos
static void sterf(int n, T* d, T* e);
static void steqr(char compz, int n, T* d, T* e, T* z, int ldz, T* work);
// QR
// computes the QR factorization of a general matrix
static void geqrf(int m, int n, T* a, int lda, T* tau, T* work, int* lwork);
// Generates the real orthogonal matrix Q of the QR factorization formed by geqrf.
// multiply C by implicit Q
static void ormqr(bool right_side,
bool transq,
int m,
int n,
int k,
T* a,
int lda,
T* tau,
T* c,
int ldc,
T* work,
int* lwork);
static void geev(T* A, T* eigenvalues, int dim, int lda);
static void geev(T* A, T* eigenvalues, T* eigenvectors, int dim, int lda, int ldvr);
static void geev(T* A,
T* eigenvalues_r,
T* eigenvalues_i,
T* eigenvectors_r,
T* eigenvectors_i,
int dim,
int lda,
int ldvr);
private:
static void lapack_gemm(const char transa,
const char transb,
int m,
int n,
int k,
float alpha,
const float* a,
int lda,
const float* b,
int ldb,
float beta,
float* c,
int ldc)
{
cublasOperation_t cublas_transa = (transa == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cublas_transb = (transb == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T;
cusolverDnSgemmHost(
cublas_transa, cublas_transb, m, n, k, &alpha, (float*)a, lda, (float*)b, ldb, &beta, c, ldc);
}
static void lapack_gemm(const signed char transa,
const signed char transb,
int m,
int n,
int k,
double alpha,
const double* a,
int lda,
const double* b,
int ldb,
double beta,
double* c,
int ldc)
{
cublasOperation_t cublas_transa = (transa == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cublas_transb = (transb == 'N') ? CUBLAS_OP_N : CUBLAS_OP_T;
cusolverDnDgemmHost(cublas_transa,
cublas_transb,
m,
n,
k,
&alpha,
(double*)a,
lda,
(double*)b,
ldb,
&beta,
c,
ldc);
}
static void lapack_sterf(int n, float* d, float* e, int* info)
{
cusolverDnSsterfHost(n, d, e, info);
}
static void lapack_sterf(int n, double* d, double* e, int* info)
{
cusolverDnDsterfHost(n, d, e, info);
}
static void lapack_steqr(
const signed char compz, int n, float* d, float* e, float* z, int ldz, float* work, int* info)
{
cusolverDnSsteqrHost(&compz, n, d, e, z, ldz, work, info);
}
static void lapack_steqr(const signed char compz,
int n,
double* d,
double* e,
double* z,
int ldz,
double* work,
int* info)
{
cusolverDnDsteqrHost(&compz, n, d, e, z, ldz, work, info);
}
static void lapack_geqrf(
int m, int n, float* a, int lda, float* tau, float* work, int* lwork, int* info)
{
sgeqrf_(&m, &n, a, &lda, tau, work, lwork, info);
}
static void lapack_geqrf(
int m, int n, double* a, int lda, double* tau, double* work, int* lwork, int* info)
{
dgeqrf_(&m, &n, a, &lda, tau, work, lwork, info);
}
static void lapack_ormqr(char side,
char trans,
int m,
int n,
int k,
float* a,
int lda,
float* tau,
float* c,
int ldc,
float* work,
int* lwork,
int* info)
{
sormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info);
}
static void lapack_ormqr(char side,
char trans,
int m,
int n,
int k,
double* a,
int lda,
double* tau,
double* c,
int ldc,
double* work,
int* lwork,
int* info)
{
dormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, lwork, info);
}
static int lapack_geev_dispatch(char* jobvl,
char* jobvr,
int* n,
double* a,
int* lda,
double* wr,
double* wi,
double* vl,
int* ldvl,
double* vr,
int* ldvr,
double* work,
int* lwork,
int* info)
{
return dgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
static int lapack_geev_dispatch(char* jobvl,
char* jobvr,
int* n,
float* a,
int* lda,
float* wr,
float* wi,
float* vl,
int* ldvl,
float* vr,
int* ldvr,
float* work,
int* lwork,
int* info)
{
return sgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
// real eigenvalues
static void lapack_geev(T* A, T* eigenvalues, int dim, int lda)
{
char job = 'N';
std::vector<T> WI(dim);
int ldv = 1;
T* vl = 0;
int work_size = 6 * dim;
std::vector<T> work(work_size);
int info;
lapack_geev_dispatch(&job,
&job,
&dim,
A,
&lda,
eigenvalues,
WI.data(),
vl,
&ldv,
vl,
&ldv,
work.data(),
&work_size,
&info);
lapackCheckError(info);
}
// real eigenpairs
static void lapack_geev(T* A, T* eigenvalues, T* eigenvectors, int dim, int lda, int ldvr)
{
char jobvl = 'N';
char jobvr = 'V';
std::vector<T> WI(dim);
int work_size = 6 * dim;
T* vl = 0;
int ldvl = 1;
std::vector<T> work(work_size);
int info;
lapack_geev_dispatch(&jobvl,
&jobvr,
&dim,
A,
&lda,
eigenvalues,
WI.data(),
vl,
&ldvl,
eigenvectors,
&ldvr,
work.data(),
&work_size,
&info);
lapackCheckError(info);
}
// complex eigenpairs
static void lapack_geev(T* A,
T* eigenvalues_r,
T* eigenvalues_i,
T* eigenvectors_r,
T* eigenvectors_i,
int dim,
int lda,
int ldvr)
{
char jobvl = 'N';
char jobvr = 'V';
int work_size = 8 * dim;
int ldvl = 1;
std::vector<T> work(work_size);
int info;
lapack_geev_dispatch(&jobvl,
&jobvr,
&dim,
A,
&lda,
eigenvalues_r,
eigenvalues_i,
0,
&ldvl,
eigenvectors_r,
&ldvr,
work.data(),
&work_size,
&info);
lapackCheckError(info);
}
};
template <typename T>
void Lapack<T>::check_lapack_enabled()
{
#ifndef USE_LAPACK
RAFT_FAIL("Error: LAPACK not enabled.");
#endif
}
template <typename T>
void Lapack<T>::gemm(bool transa,
bool transb,
int m,
int n,
int k,
T alpha,
const T* A,
int lda,
const T* B,
int ldb,
T beta,
T* C,
int ldc)
{
// check_lapack_enabled();
// #ifdef NVGRAPH_USE_LAPACK
const char transA_char = transa ? 'T' : 'N';
const char transB_char = transb ? 'T' : 'N';
lapack_gemm(transA_char, transB_char, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
// #endif
}
template <typename T>
void Lapack<T>::sterf(int n, T* d, T* e)
{
// check_lapack_enabled();
// #ifdef NVGRAPH_USE_LAPACK
int info;
lapack_sterf(n, d, e, &info);
lapackCheckError(info);
// #endif
}
template <typename T>
void Lapack<T>::steqr(char compz, int n, T* d, T* e, T* z, int ldz, T* work)
{
// check_lapack_enabled();
// #ifdef NVGRAPH_USE_LAPACK
int info;
lapack_steqr(compz, n, d, e, z, ldz, work, &info);
lapackCheckError(info);
// #endif
}
template <typename T>
void Lapack<T>::geqrf(int m, int n, T* a, int lda, T* tau, T* work, int* lwork)
{
check_lapack_enabled();
#ifdef USE_LAPACK
int info;
lapack_geqrf(m, n, a, lda, tau, work, lwork, &info);
lapackCheckError(info);
#endif
}
template <typename T>
void Lapack<T>::ormqr(bool right_side,
bool transq,
int m,
int n,
int k,
T* a,
int lda,
T* tau,
T* c,
int ldc,
T* work,
int* lwork)
{
check_lapack_enabled();
#ifdef USE_LAPACK
char side = right_side ? 'R' : 'L';
char trans = transq ? 'T' : 'N';
int info;
lapack_ormqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, &info);
lapackCheckError(info);
#endif
}
// real eigenvalues
template <typename T>
void Lapack<T>::geev(T* A, T* eigenvalues, int dim, int lda)
{
check_lapack_enabled();
#ifdef USE_LAPACK
lapack_geev(A, eigenvalues, dim, lda);
#endif
}
// real eigenpairs
template <typename T>
void Lapack<T>::geev(T* A, T* eigenvalues, T* eigenvectors, int dim, int lda, int ldvr)
{
check_lapack_enabled();
#ifdef USE_LAPACK
lapack_geev(A, eigenvalues, eigenvectors, dim, lda, ldvr);
#endif
}
// complex eigenpairs
template <typename T>
void Lapack<T>::geev(T* A,
T* eigenvalues_r,
T* eigenvalues_i,
T* eigenvectors_r,
T* eigenvectors_i,
int dim,
int lda,
int ldvr)
{
check_lapack_enabled();
#ifdef USE_LAPACK
lapack_geev(A, eigenvalues_r, eigenvalues_i, eigenvectors_r, eigenvectors_i, dim, lda, ldvr);
#endif
}
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spectral | rapidsai_public_repos/raft/cpp/include/raft/spectral/detail/partition.hpp | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <math.h>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <stdio.h>
#include <cuda.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <tuple>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/spectral/cluster_solvers.cuh>
#include <raft/spectral/detail/spectral_util.cuh>
#include <raft/spectral/eigen_solvers.cuh>
#include <raft/spectral/matrix_wrappers.hpp>
namespace raft {
namespace spectral {
namespace detail {
// =========================================================
// Spectral partitioner
// =========================================================
/// Compute spectral graph partition
/** Compute partition for a weighted undirected graph. This
* partition attempts to minimize the cost function:
* Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition)
*
* @param G Weighted graph in CSR format
* @param nClusters Number of partitions.
* @param nEigVecs Number of eigenvectors to compute.
* @param maxIter_lanczos Maximum number of Lanczos iterations.
* @param restartIter_lanczos Maximum size of Lanczos system before
* implicit restart.
* @param tol_lanczos Convergence tolerance for Lanczos method.
* @param maxIter_kmeans Maximum number of k-means iterations.
* @param tol_kmeans Convergence tolerance for k-means algorithm.
* @param clusters (Output, device memory, n entries) Partition
* assignments.
* @param iters_lanczos On exit, number of Lanczos iterations
* performed.
* @param iters_kmeans On exit, number of k-means iterations
* performed.
* @return statistics: number of eigensolver iterations, .
*/
template <typename vertex_t, typename weight_t, typename EigenSolver, typename ClusterSolver>
std::tuple<vertex_t, weight_t, vertex_t> partition(
raft::resources const& handle,
spectral::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
EigenSolver const& eigen_solver,
ClusterSolver const& cluster_solver,
vertex_t* __restrict__ clusters,
weight_t* eigVals,
weight_t* eigVecs)
{
RAFT_EXPECTS(clusters != nullptr, "Null clusters buffer.");
RAFT_EXPECTS(eigVals != nullptr, "Null eigVals buffer.");
RAFT_EXPECTS(eigVecs != nullptr, "Null eigVecs buffer.");
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
std::tuple<vertex_t, weight_t, vertex_t>
stats; //{iters_eig_solver,residual_cluster,iters_cluster_solver} // # iters eigen solver,
// cluster solver residual, # iters cluster solver
vertex_t n = csr_m.nrows_;
// -------------------------------------------------------
// Spectral partitioner
// -------------------------------------------------------
// Compute eigenvectors of Laplacian
// Initialize Laplacian
/// sparse_matrix_t<vertex_t, weight_t> A{handle, graph};
spectral::matrix::laplacian_matrix_t<vertex_t, weight_t> L{handle, csr_m};
auto eigen_config = eigen_solver.get_config();
auto nEigVecs = eigen_config.n_eigVecs;
// Compute smallest eigenvalues and eigenvectors
std::get<0>(stats) = eigen_solver.solve_smallest_eigenvectors(handle, L, eigVals, eigVecs);
// Whiten eigenvector matrix
transform_eigen_matrix(handle, n, nEigVecs, eigVecs);
// Find partition clustering
auto pair_cluster = cluster_solver.solve(handle, n, nEigVecs, eigVecs, clusters);
std::get<1>(stats) = pair_cluster.first;
std::get<2>(stats) = pair_cluster.second;
return stats;
}
// =========================================================
// Analysis of graph partition
// =========================================================
/// Compute cost function for partition
/** This function determines the edges cut by a partition and a cost
* function:
* Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition)
* Graph is assumed to be weighted and undirected.
*
* @param G Weighted graph in CSR format
* @param nClusters Number of partitions.
* @param clusters (Input, device memory, n entries) Partition
* assignments.
* @param edgeCut On exit, weight of edges cut by partition.
* @param cost On exit, partition cost function.
* @return error flag.
*/
template <typename vertex_t, typename weight_t>
void analyzePartition(raft::resources const& handle,
spectral::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
vertex_t nClusters,
const vertex_t* __restrict__ clusters,
weight_t& edgeCut,
weight_t& cost)
{
RAFT_EXPECTS(clusters != nullptr, "Null clusters buffer.");
vertex_t i;
vertex_t n = csr_m.nrows_;
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
weight_t partEdgesCut, clustersize;
// Device memory
spectral::matrix::vector_t<weight_t> part_i(handle, n);
spectral::matrix::vector_t<weight_t> Lx(handle, n);
// Initialize cuBLAS
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// Initialize Laplacian
/// sparse_matrix_t<vertex_t, weight_t> A{handle, graph};
spectral::matrix::laplacian_matrix_t<vertex_t, weight_t> L{handle, csr_m};
// Initialize output
cost = 0;
edgeCut = 0;
// Iterate through partitions
for (i = 0; i < nClusters; ++i) {
// Construct indicator vector for ith partition
if (!construct_indicator(handle, i, n, clustersize, partEdgesCut, clusters, part_i, Lx, L)) {
WARNING("empty partition");
continue;
}
// Record results
cost += partEdgesCut / clustersize;
edgeCut += partEdgesCut / 2;
}
}
} // namespace detail
} // namespace spectral
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spectral | rapidsai_public_repos/raft/cpp/include/raft/spectral/detail/matrix_wrappers.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cusparse_handle.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/system/cuda/execution_policy.h>
#include <algorithm>
// =========================================================
// Useful macros
// =========================================================
// Get index of matrix entry
#define IDX(i, j, lda) ((i) + (j) * (lda))
namespace raft {
namespace spectral {
namespace matrix {
namespace detail {
using size_type = int; // for now; TODO: move it in appropriate header
// Apply diagonal matrix to vector:
//
template <typename IndexType_, typename ValueType_>
RAFT_KERNEL diagmv(IndexType_ n,
ValueType_ alpha,
const ValueType_* __restrict__ D,
const ValueType_* __restrict__ x,
ValueType_* __restrict__ y)
{
IndexType_ i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
y[i] += alpha * D[i] * x[i];
i += blockDim.x * gridDim.x;
}
}
// specifies type of algorithm used
// for SpMv:
//
enum struct sparse_mv_alg_t : int {
SPARSE_MV_UNDEFINED = -1,
SPARSE_MV_ALG_DEFAULT, // generic, for any sparse matrix
SPARSE_MV_ALG1, // typical for CSR
SPARSE_MV_ALG2 // may provide better performance for irregular sparse matrices
};
// Vector "view"-like aggregate for linear algebra purposes
//
template <typename value_type>
struct vector_view_t {
value_type* buffer_;
size_type size_;
vector_view_t(value_type* buffer, size_type sz) : buffer_(buffer), size_(sz) {}
vector_view_t(vector_view_t&& other) : buffer_(other.raw()), size_(other.size()) {}
vector_view_t& operator=(vector_view_t&& other)
{
buffer_ = other.raw();
size_ = other.size();
}
};
template <typename value_type>
class vector_t {
public:
vector_t(resources const& raft_handle, size_type sz)
: buffer_(sz, resource::get_cuda_stream(raft_handle)),
thrust_policy(resource::get_thrust_policy(raft_handle))
{
}
size_type size(void) const { return buffer_.size(); }
value_type* raw(void) { return buffer_.data(); }
value_type const* raw(void) const { return buffer_.data(); }
value_type nrm1() const
{
return thrust::reduce(thrust_policy,
buffer_.data(),
buffer_.data() + buffer_.size(),
value_type{0},
[] __device__(auto left, auto right) {
auto abs_left = left > 0 ? left : -left;
auto abs_right = right > 0 ? right : -right;
return abs_left + abs_right;
});
}
void fill(value_type value)
{
thrust::fill_n(thrust_policy, buffer_.data(), buffer_.size(), value);
}
private:
using thrust_exec_policy_t =
thrust::detail::execute_with_allocator<rmm::mr::thrust_allocator<char>,
thrust::cuda_cub::execute_on_stream_base>;
rmm::device_uvector<value_type> buffer_;
const thrust_exec_policy_t thrust_policy;
};
template <typename index_type, typename value_type>
struct sparse_matrix_t {
sparse_matrix_t(resources const& raft_handle,
index_type const* row_offsets,
index_type const* col_indices,
value_type const* values,
index_type const nrows,
index_type const ncols,
index_type const nnz)
: handle_(raft_handle),
row_offsets_(row_offsets),
col_indices_(col_indices),
values_(values),
nrows_(nrows),
ncols_(ncols),
nnz_(nnz)
{
}
sparse_matrix_t(resources const& raft_handle,
index_type const* row_offsets,
index_type const* col_indices,
value_type const* values,
index_type const nrows,
index_type const nnz)
: handle_(raft_handle),
row_offsets_(row_offsets),
col_indices_(col_indices),
values_(values),
nrows_(nrows),
ncols_(nrows),
nnz_(nnz)
{
}
template <typename CSRView>
sparse_matrix_t(resources const& raft_handle, CSRView const& csr_view)
: handle_(raft_handle),
row_offsets_(csr_view.offsets),
col_indices_(csr_view.indices),
values_(csr_view.edge_data),
nrows_(csr_view.number_of_vertices),
ncols_(csr_view.number_of_vertices),
nnz_(csr_view.number_of_edges)
{
}
virtual ~sparse_matrix_t(void) =
default; // virtual because used as base for following matrix types
// y = alpha*A*x + beta*y
//(Note: removed const-ness of x, because CUDA 11 SpMV
// descriptor creation works with non-const, and const-casting
// down is dangerous)
//
virtual void mv(value_type alpha,
value_type* __restrict__ x,
value_type beta,
value_type* __restrict__ y,
sparse_mv_alg_t alg = sparse_mv_alg_t::SPARSE_MV_ALG1,
bool transpose = false,
bool symmetric = false) const
{
using namespace sparse;
RAFT_EXPECTS(x != nullptr, "Null x buffer.");
RAFT_EXPECTS(y != nullptr, "Null y buffer.");
auto cusparse_h = resource::get_cusparse_handle(handle_);
auto stream = resource::get_cuda_stream(handle_);
cusparseOperation_t trans = transpose ? CUSPARSE_OPERATION_TRANSPOSE : // transpose
CUSPARSE_OPERATION_NON_TRANSPOSE; // non-transpose
#if not defined CUDA_ENFORCE_LOWER and CUDA_VER_10_1_UP
auto size_x = transpose ? nrows_ : ncols_;
auto size_y = transpose ? ncols_ : nrows_;
cusparseSpMVAlg_t spmv_alg = translate_algorithm(alg);
// create descriptors:
//(below casts are necessary, because
// cusparseCreateCsr(...) takes non-const
// void*; the casts should be harmless)
//
cusparseSpMatDescr_t matA;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecreatecsr(&matA,
nrows_,
ncols_,
nnz_,
const_cast<index_type*>(row_offsets_),
const_cast<index_type*>(col_indices_),
const_cast<value_type*>(values_)));
cusparseDnVecDescr_t vecX;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecreatednvec(&vecX, size_x, x));
rmm::device_uvector<value_type> y_tmp(size_y, stream);
raft::copy(y_tmp.data(), y, size_y, stream);
cusparseDnVecDescr_t vecY;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecreatednvec(&vecY, size_y, y_tmp.data()));
// get (scratch) external device buffer size:
//
size_t bufferSize;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsespmv_buffersize(
cusparse_h, trans, &alpha, matA, vecX, &beta, vecY, spmv_alg, &bufferSize, stream));
// allocate external buffer:
//
vector_t<value_type> external_buffer(handle_, bufferSize);
// finally perform SpMV:
//
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsespmv(
cusparse_h, trans, &alpha, matA, vecX, &beta, vecY, spmv_alg, external_buffer.raw(), stream));
// FIXME: This is a workaround for a cusparse issue being encountered in CUDA 12
raft::copy(y, y_tmp.data(), size_y, stream);
// free descriptors:
//(TODO: maybe wrap them in a RAII struct?)
//
RAFT_CUSPARSE_TRY(cusparseDestroyDnVec(vecY));
RAFT_CUSPARSE_TRY(cusparseDestroyDnVec(vecX));
RAFT_CUSPARSE_TRY(cusparseDestroySpMat(matA));
#else
RAFT_CUSPARSE_TRY(
raft::sparse::detail::cusparsesetpointermode(cusparse_h, CUSPARSE_POINTER_MODE_HOST, stream));
cusparseMatDescr_t descr = 0;
RAFT_CUSPARSE_TRY(cusparseCreateMatDescr(&descr));
if (symmetric) {
RAFT_CUSPARSE_TRY(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_SYMMETRIC));
} else {
RAFT_CUSPARSE_TRY(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL));
}
RAFT_CUSPARSE_TRY(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO));
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecsrmv(cusparse_h,
trans,
nrows_,
ncols_,
nnz_,
&alpha,
descr,
values_,
row_offsets_,
col_indices_,
x,
&beta,
y,
stream));
RAFT_CUSPARSE_TRY(cusparseDestroyMatDescr(descr));
#endif
}
resources const& get_handle(void) const { return handle_; }
#if not defined CUDA_ENFORCE_LOWER and CUDA_VER_10_1_UP
cusparseSpMVAlg_t translate_algorithm(sparse_mv_alg_t alg) const
{
switch (alg) {
case sparse_mv_alg_t::SPARSE_MV_ALG1: return CUSPARSE_SPMV_CSR_ALG1;
case sparse_mv_alg_t::SPARSE_MV_ALG2: return CUSPARSE_SPMV_CSR_ALG2;
default: return CUSPARSE_SPMV_ALG_DEFAULT;
}
}
#endif
// private: // maybe not, keep this ASAPBNS ("as simple as possible, but not simpler"); hence,
// aggregate
raft::resources const& handle_;
index_type const* row_offsets_;
index_type const* col_indices_;
value_type const* values_;
index_type const nrows_;
index_type const ncols_;
index_type const nnz_;
};
template <typename index_type, typename value_type>
struct laplacian_matrix_t : sparse_matrix_t<index_type, value_type> {
laplacian_matrix_t(resources const& raft_handle,
index_type const* row_offsets,
index_type const* col_indices,
value_type const* values,
index_type const nrows,
index_type const nnz)
: sparse_matrix_t<index_type, value_type>(
raft_handle, row_offsets, col_indices, values, nrows, nnz),
diagonal_(raft_handle, nrows)
{
vector_t<value_type> ones{raft_handle, nrows};
ones.fill(1.0);
sparse_matrix_t<index_type, value_type>::mv(1, ones.raw(), 0, diagonal_.raw());
}
laplacian_matrix_t(resources const& raft_handle,
sparse_matrix_t<index_type, value_type> const& csr_m)
: sparse_matrix_t<index_type, value_type>(raft_handle,
csr_m.row_offsets_,
csr_m.col_indices_,
csr_m.values_,
csr_m.nrows_,
csr_m.nnz_),
diagonal_(raft_handle, csr_m.nrows_)
{
vector_t<value_type> ones{raft_handle, csr_m.nrows_};
ones.fill(1.0);
sparse_matrix_t<index_type, value_type>::mv(1, ones.raw(), 0, diagonal_.raw());
}
// y = alpha*A*x + beta*y
//
void mv(value_type alpha,
value_type* __restrict__ x,
value_type beta,
value_type* __restrict__ y,
sparse_mv_alg_t alg = sparse_mv_alg_t::SPARSE_MV_ALG1,
bool transpose = false,
bool symmetric = false) const override
{
constexpr int BLOCK_SIZE = 1024;
auto n = sparse_matrix_t<index_type, value_type>::nrows_;
auto handle = sparse_matrix_t<index_type, value_type>::get_handle();
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// scales y by beta:
//
if (beta == 0) {
RAFT_CUDA_TRY(cudaMemsetAsync(y, 0, n * sizeof(value_type), stream));
} else if (beta != 1) {
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasscal(cublas_h, n, &beta, y, 1, stream));
}
// Apply diagonal matrix
//
dim3 gridDim{std::min<unsigned int>((n + BLOCK_SIZE - 1) / BLOCK_SIZE, 65535), 1, 1};
dim3 blockDim{BLOCK_SIZE, 1, 1};
diagmv<<<gridDim, blockDim, 0, stream>>>(n, alpha, diagonal_.raw(), x, y);
RAFT_CHECK_CUDA(stream);
// Apply adjacency matrix
//
sparse_matrix_t<index_type, value_type>::mv(-alpha, x, 1, y, alg, transpose, symmetric);
}
vector_t<value_type> diagonal_;
};
template <typename index_type, typename value_type>
struct modularity_matrix_t : laplacian_matrix_t<index_type, value_type> {
modularity_matrix_t(resources const& raft_handle,
index_type const* row_offsets,
index_type const* col_indices,
value_type const* values,
index_type const nrows,
index_type const nnz)
: laplacian_matrix_t<index_type, value_type>(
raft_handle, row_offsets, col_indices, values, nrows, nnz)
{
edge_sum_ = laplacian_matrix_t<index_type, value_type>::diagonal_.nrm1();
}
modularity_matrix_t(resources const& raft_handle,
sparse_matrix_t<index_type, value_type> const& csr_m)
: laplacian_matrix_t<index_type, value_type>(raft_handle, csr_m)
{
edge_sum_ = laplacian_matrix_t<index_type, value_type>::diagonal_.nrm1();
}
// y = alpha*A*x + beta*y
//
void mv(value_type alpha,
value_type* __restrict__ x,
value_type beta,
value_type* __restrict__ y,
sparse_mv_alg_t alg = sparse_mv_alg_t::SPARSE_MV_ALG1,
bool transpose = false,
bool symmetric = false) const override
{
auto n = sparse_matrix_t<index_type, value_type>::nrows_;
auto handle = sparse_matrix_t<index_type, value_type>::get_handle();
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// y = A*x
//
sparse_matrix_t<index_type, value_type>::mv(alpha, x, 0, y, alg, transpose, symmetric);
value_type dot_res;
// gamma = d'*x
//
// Cublas::dot(this->n, D.raw(), 1, x, 1, &dot_res);
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasdot(cublas_h,
n,
laplacian_matrix_t<index_type, value_type>::diagonal_.raw(),
1,
x,
1,
&dot_res,
stream));
// y = y -(gamma/edge_sum)*d
//
value_type gamma_ = -dot_res / edge_sum_;
// TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasaxpy(cublas_h,
n,
&gamma_,
laplacian_matrix_t<index_type, value_type>::diagonal_.raw(),
1,
y,
1,
stream));
}
value_type edge_sum_;
};
} // namespace detail
} // namespace matrix
} // namespace spectral
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spectral | rapidsai_public_repos/raft/cpp/include/raft/spectral/detail/modularity_maximization.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <math.h>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <stdio.h>
#include <cuda.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <tuple>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/spectral/cluster_solvers.cuh>
#include <raft/spectral/detail/spectral_util.cuh>
#include <raft/spectral/eigen_solvers.cuh>
#include <raft/spectral/matrix_wrappers.hpp>
namespace raft {
namespace spectral {
namespace detail {
// =========================================================
// Spectral modularity_maximization
// =========================================================
/** Compute partition for a weighted undirected graph. This
* partition attempts to minimize the cost function:
* Cost = \sum_i (Edges cut by ith partition)/(Vertices in ith partition)
*
* @param G Weighted graph in CSR format
* @param nClusters Number of partitions.
* @param nEigVecs Number of eigenvectors to compute.
* @param maxIter_lanczos Maximum number of Lanczos iterations.
* @param restartIter_lanczos Maximum size of Lanczos system before
* implicit restart.
* @param tol_lanczos Convergence tolerance for Lanczos method.
* @param maxIter_kmeans Maximum number of k-means iterations.
* @param tol_kmeans Convergence tolerance for k-means algorithm.
* @param clusters (Output, device memory, n entries) Cluster
* assignments.
* @param iters_lanczos On exit, number of Lanczos iterations
* performed.
* @param iters_kmeans On exit, number of k-means iterations
* performed.
* @return error flag.
*/
template <typename vertex_t, typename weight_t, typename EigenSolver, typename ClusterSolver>
std::tuple<vertex_t, weight_t, vertex_t> modularity_maximization(
raft::resources const& handle,
raft::spectral::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
EigenSolver const& eigen_solver,
ClusterSolver const& cluster_solver,
vertex_t* __restrict__ clusters,
weight_t* eigVals,
weight_t* eigVecs)
{
RAFT_EXPECTS(clusters != nullptr, "Null clusters buffer.");
RAFT_EXPECTS(eigVals != nullptr, "Null eigVals buffer.");
RAFT_EXPECTS(eigVecs != nullptr, "Null eigVecs buffer.");
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
std::tuple<vertex_t, weight_t, vertex_t>
stats; // # iters eigen solver, cluster solver residual, # iters cluster solver
vertex_t n = csr_m.nrows_;
// Compute eigenvectors of Modularity Matrix
// Initialize Modularity Matrix
raft::spectral::matrix::modularity_matrix_t<vertex_t, weight_t> B{handle, csr_m};
auto eigen_config = eigen_solver.get_config();
auto nEigVecs = eigen_config.n_eigVecs;
// Compute eigenvectors corresponding to largest eigenvalues
std::get<0>(stats) = eigen_solver.solve_largest_eigenvectors(handle, B, eigVals, eigVecs);
// Whiten eigenvector matrix
transform_eigen_matrix(handle, n, nEigVecs, eigVecs);
// notice that at this point the matrix has already been transposed, so we are scaling
// columns
scale_obs(nEigVecs, n, eigVecs);
RAFT_CHECK_CUDA(stream);
// Find partition clustering
auto pair_cluster = cluster_solver.solve(handle, n, nEigVecs, eigVecs, clusters);
std::get<1>(stats) = pair_cluster.first;
std::get<2>(stats) = pair_cluster.second;
return stats;
}
//===================================================
// Analysis of graph partition
// =========================================================
/// Compute modularity
/** This function determines the modularity based on a graph and cluster assignments
* @param G Weighted graph in CSR format
* @param nClusters Number of clusters.
* @param clusters (Input, device memory, n entries) Cluster assignments.
* @param modularity On exit, modularity
*/
template <typename vertex_t, typename weight_t>
void analyzeModularity(raft::resources const& handle,
raft::spectral::matrix::sparse_matrix_t<vertex_t, weight_t> const& csr_m,
vertex_t nClusters,
vertex_t const* __restrict__ clusters,
weight_t& modularity)
{
RAFT_EXPECTS(clusters != nullptr, "Null clusters buffer.");
vertex_t i;
vertex_t n = csr_m.nrows_;
weight_t partModularity, clustersize;
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// Device memory
raft::spectral::matrix::vector_t<weight_t> part_i(handle, n);
raft::spectral::matrix::vector_t<weight_t> Bx(handle, n);
// Initialize cuBLAS
RAFT_CUBLAS_TRY(linalg::detail::cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// Initialize Modularity
raft::spectral::matrix::modularity_matrix_t<vertex_t, weight_t> B{handle, csr_m};
// Initialize output
modularity = 0;
// Iterate through partitions
for (i = 0; i < nClusters; ++i) {
if (!construct_indicator(handle, i, n, clustersize, partModularity, clusters, part_i, Bx, B)) {
WARNING("empty partition");
continue;
}
// Record results
modularity += partModularity;
}
modularity = modularity / B.diagonal_.nrm1();
}
} // namespace detail
} // namespace spectral
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/lap/lap.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use lap.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuh version instead.")
#include <raft/solver/linear_assignment.cuh>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/lap/lap.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use lap.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft/solver version instead.")
#include <raft/solver/linear_assignment.cuh>
using raft::solver::VertexData;
using raft::solver::Vertices;
namespace raft::lap {
using raft::solver::LinearAssignmentProblem;
}
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/sparse/csr.hpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/sparse/detail/csr.cuh>
namespace raft {
namespace sparse {
constexpr int TPB_X = 256;
using WeakCCState = detail::WeakCCState;
/**
* @brief Partial calculation of the weakly connected components in the
* context of a batched algorithm: the labels are computed wrt the sub-graph
* represented by the given CSR matrix of dimensions batch_size * N.
* Note that this overwrites the labels array and it is the responsibility of
* the caller to combine the results from different batches
* (cf label/merge_labels.cuh)
*
* @tparam Index_ the numeric type of non-floating point elements
* @tparam TPB_X the threads to use per block when configuring the kernel
* @param labels an array for the output labels
* @param row_ind the compressed row index of the CSR array
* @param row_ind_ptr the row index pointer of the CSR array
* @param nnz the size of row_ind_ptr array
* @param N number of vertices
* @param start_vertex_id the starting vertex index for the current batch
* @param batch_size number of vertices for current batch
* @param state instance of inter-batch state management
* @param stream the cuda stream to use
* @param filter_op an optional filtering function to determine which points
* should get considered for labeling. It gets global indexes (not batch-wide!)
*/
template <typename Index_, typename Lambda>
void weak_cc_batched(Index_* labels,
const Index_* row_ind,
const Index_* row_ind_ptr,
Index_ nnz,
Index_ N,
Index_ start_vertex_id,
Index_ batch_size,
WeakCCState* state,
cudaStream_t stream,
Lambda filter_op)
{
detail::weak_cc_batched<Index_, TPB_X, Lambda>(
labels, row_ind, row_ind_ptr, nnz, N, start_vertex_id, batch_size, state, stream, filter_op);
}
/**
* @brief Partial calculation of the weakly connected components in the
* context of a batched algorithm: the labels are computed wrt the sub-graph
* represented by the given CSR matrix of dimensions batch_size * N.
* Note that this overwrites the labels array and it is the responsibility of
* the caller to combine the results from different batches
* (cf label/merge_labels.cuh)
*
* @tparam Index_ the numeric type of non-floating point elements
* @tparam TPB_X the threads to use per block when configuring the kernel
* @param labels an array for the output labels
* @param row_ind the compressed row index of the CSR array
* @param row_ind_ptr the row index pointer of the CSR array
* @param nnz the size of row_ind_ptr array
* @param N number of vertices
* @param start_vertex_id the starting vertex index for the current batch
* @param batch_size number of vertices for current batch
* @param state instance of inter-batch state management
* @param stream the cuda stream to use
*/
template <typename Index_>
void weak_cc_batched(Index_* labels,
const Index_* row_ind,
const Index_* row_ind_ptr,
Index_ nnz,
Index_ N,
Index_ start_vertex_id,
Index_ batch_size,
WeakCCState* state,
cudaStream_t stream)
{
weak_cc_batched(labels,
row_ind,
row_ind_ptr,
nnz,
N,
start_vertex_id,
batch_size,
state,
stream,
[] __device__(Index_ tid) { return true; });
}
/**
* @brief Compute weakly connected components. Note that the resulting labels
* may not be taken from a monotonically increasing set (eg. numbers may be
* skipped). The MLCommon::Label package contains a primitive `make_monotonic`,
* which will make a monotonically increasing set of labels.
*
* This implementation comes from [1] and solves component labeling problem in
* parallel on CSR-indexes based upon the vertex degree and adjacency graph.
*
* [1] Hawick, K.A et al, 2010. "Parallel graph component labelling with GPUs and CUDA"
*
* @tparam Type the numeric type of non-floating point elements
* @tparam TPB_X the threads to use per block when configuring the kernel
* @tparam Lambda the type of an optional filter function (int)->bool
* @param labels an array for the output labels
* @param row_ind the compressed row index of the CSR array
* @param row_ind_ptr the row index pointer of the CSR array
* @param nnz the size of row_ind_ptr array
* @param N number of vertices
* @param stream the cuda stream to use
* @param filter_op an optional filtering function to determine which points
* should get considered for labeling. It gets global indexes (not batch-wide!)
*/
template <typename Index_ = int, typename Lambda>
void weak_cc(Index_* labels,
const Index_* row_ind,
const Index_* row_ind_ptr,
Index_ nnz,
Index_ N,
cudaStream_t stream,
Lambda filter_op)
{
rmm::device_scalar<bool> m(stream);
WeakCCState state(m.data());
weak_cc_batched<Index_, TPB_X>(labels, row_ind, row_ind_ptr, nnz, N, 0, N, stream, filter_op);
}
/**
* @brief Compute weakly connected components. Note that the resulting labels
* may not be taken from a monotonically increasing set (eg. numbers may be
* skipped). The MLCommon::Label package contains a primitive `make_monotonic`,
* which will make a monotonically increasing set of labels.
*
* This implementation comes from [1] and solves component labeling problem in
* parallel on CSR-indexes based upon the vertex degree and adjacency graph.
*
* [1] Hawick, K.A et al, 2010. "Parallel graph component labelling with GPUs and CUDA"
*
* @tparam Type the numeric type of non-floating point elements
* @tparam TPB_X the threads to use per block when configuring the kernel
* @tparam Lambda the type of an optional filter function (int)->bool
* @param labels an array for the output labels
* @param row_ind the compressed row index of the CSR array
* @param row_ind_ptr the row index pointer of the CSR array
* @param nnz the size of row_ind_ptr array
* @param N number of vertices
* @param stream the cuda stream to use
*/
template <typename Index_>
void weak_cc(Index_* labels,
const Index_* row_ind,
const Index_* row_ind_ptr,
Index_ nnz,
Index_ N,
cudaStream_t stream)
{
rmm::device_scalar<bool> m(stream);
WeakCCState state(m.data());
weak_cc_batched<Index_, TPB_X>(
labels, row_ind, row_ind_ptr, nnz, N, 0, N, stream, [](Index_) { return true; });
}
}; // namespace sparse
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/sparse/coo.hpp | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/sparse/detail/coo.cuh>
namespace raft {
namespace sparse {
/** @brief A Container object for sparse coordinate. There are two motivations
* behind using a container for COO arrays.
*
* The first motivation is that it simplifies code, rather than always having
* to pass three arrays as function arguments.
*
* The second is more subtle, but much more important. The size
* of the resulting COO from a sparse operation is often not known ahead of time,
* since it depends on the contents of the underlying graph. The COO object can
* allocate the underlying arrays lazily so that the object can be created by the
* user and passed as an output argument in a sparse primitive. The sparse primitive
* would have the responsibility for allocating and populating the output arrays,
* while the original caller still maintains ownership of the underlying memory.
*
* @tparam value_t: the type of the value array.
* @tparam value_idx: the type of index array
*
*/
template <typename value_t, typename value_idx = int>
using COO = detail::COO<value_t, value_idx>;
}; // namespace sparse
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/op/reduce.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SPARSE_REDUCE_H
#define __SPARSE_REDUCE_H
#pragma once
#include <raft/core/resources.hpp>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/op/detail/reduce.cuh>
namespace raft {
namespace sparse {
namespace op {
/**
* Computes a mask from a sorted COO matrix where 0's denote
* duplicate values and 1's denote new values. This mask can
* be useful for computing an exclusive scan to pre-build offsets
* for reducing duplicates, such as when symmetrizing
* or taking the min of each duplicated value.
*
* Note that this function always marks the first value as 0 so that
* a cumulative sum can be performed as a follow-on. However, even
* if the mask is used directly, any duplicates should always have a
* 1 when first encountered so it can be assumed that the first element
* is always a 1 otherwise.
*
* @tparam value_idx
* @param[out] mask output mask, size nnz
* @param[in] rows COO rows array, size nnz
* @param[in] cols COO cols array, size nnz
* @param[in] nnz number of nonzeros in input arrays
* @param[in] stream cuda ops will be ordered wrt this stream
*/
template <typename value_idx>
void compute_duplicates_mask(
value_idx* mask, const value_idx* rows, const value_idx* cols, size_t nnz, cudaStream_t stream)
{
detail::compute_duplicates_mask(mask, rows, cols, nnz, stream);
}
/**
* Performs a COO reduce of duplicate columns per row, taking the max weight
* for duplicate columns in each row. This function assumes the input COO
* has been sorted by both row and column but makes no assumption on
* the sorting of values.
* @tparam value_idx
* @tparam value_t
* @param[in] handle
* @param[out] out output COO, the nnz will be computed allocate() will be called in this function.
* @param[in] rows COO rows array, size nnz
* @param[in] cols COO cols array, size nnz
* @param[in] vals COO vals array, size nnz
* @param[in] nnz number of nonzeros in COO input arrays
* @param[in] m number of rows in COO input matrix
* @param[in] n number of columns in COO input matrix
*/
template <typename value_idx, typename value_t>
void max_duplicates(raft::resources const& handle,
raft::sparse::COO<value_t, value_idx>& out,
const value_idx* rows,
const value_idx* cols,
const value_t* vals,
size_t nnz,
size_t m,
size_t n)
{
detail::max_duplicates(handle, out, rows, cols, vals, nnz, m, n);
}
}; // END namespace op
}; // END namespace sparse
}; // END namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/op/filter.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __FILTER_H
#define __FILTER_H
#pragma once
#include <raft/core/resources.hpp>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/op/detail/filter.cuh>
namespace raft {
namespace sparse {
namespace op {
/**
* @brief Removes the values matching a particular scalar from a COO formatted sparse matrix.
*
* @param rows: input array of rows (size n)
* @param cols: input array of cols (size n)
* @param vals: input array of vals (size n)
* @param nnz: size of current rows/cols/vals arrays
* @param crows: compressed array of rows
* @param ccols: compressed array of cols
* @param cvals: compressed array of vals
* @param cnnz: array of non-zero counts per row
* @param cur_cnnz array of counts per row
* @param scalar: scalar to remove from arrays
* @param n: number of rows in dense matrix
* @param stream: cuda stream to use
*/
template <typename T>
void coo_remove_scalar(const int* rows,
const int* cols,
const T* vals,
int nnz,
int* crows,
int* ccols,
T* cvals,
int* cnnz,
int* cur_cnnz,
T scalar,
int n,
cudaStream_t stream)
{
detail::coo_remove_scalar<128, T>(
rows, cols, vals, nnz, crows, ccols, cvals, cnnz, cur_cnnz, scalar, n, stream);
}
/**
* @brief Removes the values matching a particular scalar from a COO formatted sparse matrix.
*
* @param in: input COO matrix
* @param out: output COO matrix
* @param scalar: scalar to remove from arrays
* @param stream: cuda stream to use
*/
template <typename T>
void coo_remove_scalar(COO<T>* in, COO<T>* out, T scalar, cudaStream_t stream)
{
detail::coo_remove_scalar<128, T>(in, out, scalar, stream);
}
/**
* @brief Removes zeros from a COO formatted sparse matrix.
*
* @param in: input COO matrix
* @param out: output COO matrix
* @param stream: cuda stream to use
*/
template <typename T>
void coo_remove_zeros(COO<T>* in, COO<T>* out, cudaStream_t stream)
{
coo_remove_scalar<T>(in, out, T(0.0), stream);
}
}; // namespace op
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/op/slice.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SLICE_H
#define __SLICE_H
#pragma once
#include <raft/core/resources.hpp>
#include <raft/sparse/op/detail/slice.cuh>
namespace raft {
namespace sparse {
namespace op {
/**
* Slice consecutive rows from a CSR array and populate newly sliced indptr array
* @tparam value_idx
* @param[in] start_row : beginning row to slice
* @param[in] stop_row : ending row to slice
* @param[in] indptr : indptr of input CSR to slice
* @param[out] indptr_out : output sliced indptr to populate
* @param[in] start_offset : beginning column offset of input indptr
* @param[in] stop_offset : ending column offset of input indptr
* @param[in] stream : cuda stream for ordering events
*/
template <typename value_idx>
void csr_row_slice_indptr(value_idx start_row,
value_idx stop_row,
const value_idx* indptr,
value_idx* indptr_out,
value_idx* start_offset,
value_idx* stop_offset,
cudaStream_t stream)
{
detail::csr_row_slice_indptr(
start_row, stop_row, indptr, indptr_out, start_offset, stop_offset, stream);
}
/**
* Slice rows from a CSR, populate column and data arrays
* @tparam value_idx : data type of CSR index arrays
* @tparam value_t : data type of CSR data array
* @param[in] start_offset : beginning column offset to slice
* @param[in] stop_offset : ending column offset to slice
* @param[in] indices : column indices array from input CSR
* @param[in] data : data array from input CSR
* @param[out] indices_out : output column indices array
* @param[out] data_out : output data array
* @param[in] stream : cuda stream for ordering events
*/
template <typename value_idx, typename value_t>
void csr_row_slice_populate(value_idx start_offset,
value_idx stop_offset,
const value_idx* indices,
const value_t* data,
value_idx* indices_out,
value_t* data_out,
cudaStream_t stream)
{
detail::csr_row_slice_populate(
start_offset, stop_offset, indices, data, indices_out, data_out, stream);
}
}; // namespace op
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/op/sort.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SPARSE_SORT_H
#define __SPARSE_SORT_H
#pragma once
#include <raft/core/resources.hpp>
#include <raft/sparse/op/detail/sort.h>
namespace raft {
namespace sparse {
namespace op {
/**
* @brief Sorts the arrays that comprise the coo matrix
* by row and then by column.
*
* @param m number of rows in coo matrix
* @param n number of cols in coo matrix
* @param nnz number of non-zeros
* @param rows rows array from coo matrix
* @param cols cols array from coo matrix
* @param vals vals array from coo matrix
* @param stream: cuda stream to use
*/
template <typename T>
void coo_sort(int m, int n, int nnz, int* rows, int* cols, T* vals, cudaStream_t stream)
{
detail::coo_sort(m, n, nnz, rows, cols, vals, stream);
}
/**
* @brief Sort the underlying COO arrays by row
* @tparam T: the type name of the underlying value array
* @param in: COO to sort by row
* @param stream: the cuda stream to use
*/
template <typename T>
void coo_sort(COO<T>* const in, cudaStream_t stream)
{
coo_sort<T>(in->n_rows, in->n_cols, in->nnz, in->rows(), in->cols(), in->vals(), stream);
}
/**
* Sorts a COO by its weight
* @tparam value_idx
* @tparam value_t
* @param[inout] rows source edges
* @param[inout] cols dest edges
* @param[inout] data edge weights
* @param[in] nnz number of edges in edge list
* @param[in] stream cuda stream for which to order cuda operations
*/
template <typename value_idx, typename value_t>
void coo_sort_by_weight(
value_idx* rows, value_idx* cols, value_t* data, value_idx nnz, cudaStream_t stream)
{
detail::coo_sort_by_weight(rows, cols, data, nnz, stream);
}
}; // namespace op
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/op/row_op.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SPARSE_ROW_OP_H
#define __SPARSE_ROW_OP_H
#pragma once
#include <raft/core/resources.hpp>
#include <raft/sparse/op/detail/row_op.cuh>
namespace raft {
namespace sparse {
namespace op {
/**
* @brief Perform a custom row operation on a CSR matrix in batches.
* @tparam T numerical type of row_ind array
* @tparam TPB_X number of threads per block to use for underlying kernel
* @tparam Lambda type of custom operation function
* @param row_ind the CSR row_ind array to perform parallel operations over
* @param n_rows total number vertices in graph
* @param nnz number of non-zeros
* @param op custom row operation functor accepting the row and beginning index.
* @param stream cuda stream to use
*/
template <typename Index_, typename Lambda = auto(Index_, Index_, Index_)->void>
void csr_row_op(const Index_* row_ind, Index_ n_rows, Index_ nnz, Lambda op, cudaStream_t stream)
{
detail::csr_row_op<Index_, 128, Lambda>(row_ind, n_rows, nnz, op, stream);
}
}; // namespace op
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/op | rapidsai_public_repos/raft/cpp/include/raft/sparse/op/detail/reduce.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/sparse/op/sort.cuh>
#include <raft/util/device_atomics.cuh>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <rmm/device_uvector.hpp>
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <raft/sparse/convert/csr.cuh>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/detail/utils.h>
namespace raft {
namespace sparse {
namespace op {
namespace detail {
template <typename value_idx>
RAFT_KERNEL compute_duplicates_diffs_kernel(const value_idx* rows,
const value_idx* cols,
value_idx* diff,
size_t nnz)
{
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= nnz) return;
value_idx d = 1;
if (tid == 0 || (rows[tid - 1] == rows[tid] && cols[tid - 1] == cols[tid])) d = 0;
diff[tid] = d;
}
template <typename value_idx, typename value_t>
RAFT_KERNEL max_duplicates_kernel(const value_idx* src_rows,
const value_idx* src_cols,
const value_t* src_vals,
const value_idx* index,
value_idx* out_rows,
value_idx* out_cols,
value_t* out_vals,
size_t nnz)
{
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < nnz) {
value_idx idx = index[tid];
atomicMax(&out_vals[idx], src_vals[tid]);
out_rows[idx] = src_rows[tid];
out_cols[idx] = src_cols[tid];
}
}
/**
* Computes a mask from a sorted COO matrix where 0's denote
* duplicate values and 1's denote new values. This mask can
* be useful for computing an exclusive scan to pre-build offsets
* for reducing duplicates, such as when symmetrizing
* or taking the min of each duplicated value.
*
* Note that this function always marks the first value as 0 so that
* a cumulative sum can be performed as a follow-on. However, even
* if the mask is used directly, any duplicates should always have a
* 1 when first encountered so it can be assumed that the first element
* is always a 1 otherwise.
*
* @tparam value_idx
* @param[out] mask output mask, size nnz
* @param[in] rows COO rows array, size nnz
* @param[in] cols COO cols array, size nnz
* @param[in] nnz number of nonzeros in input arrays
* @param[in] stream cuda ops will be ordered wrt this stream
*/
template <typename value_idx>
void compute_duplicates_mask(
value_idx* mask, const value_idx* rows, const value_idx* cols, size_t nnz, cudaStream_t stream)
{
RAFT_CUDA_TRY(cudaMemsetAsync(mask, 0, nnz * sizeof(value_idx), stream));
compute_duplicates_diffs_kernel<<<raft::ceildiv(nnz, (size_t)256), 256, 0, stream>>>(
rows, cols, mask, nnz);
}
/**
* Performs a COO reduce of duplicate columns per row, taking the max weight
* for duplicate columns in each row. This function assumes the input COO
* has been sorted by both row and column but makes no assumption on
* the sorting of values.
* @tparam value_idx
* @tparam value_t
* @param[out] out output COO, the nnz will be computed allocate() will be called in this function.
* @param[in] rows COO rows array, size nnz
* @param[in] cols COO cols array, size nnz
* @param[in] vals COO vals array, size nnz
* @param[in] nnz number of nonzeros in COO input arrays
* @param[in] m number of rows in COO input matrix
* @param[in] n number of columns in COO input matrix
* @param[in] stream cuda ops will be ordered wrt this stream
*/
template <typename value_idx, typename value_t>
void max_duplicates(raft::resources const& handle,
raft::sparse::COO<value_t, value_idx>& out,
const value_idx* rows,
const value_idx* cols,
const value_t* vals,
size_t nnz,
size_t m,
size_t n)
{
auto stream = resource::get_cuda_stream(handle);
auto thrust_policy = resource::get_thrust_policy(handle);
// compute diffs & take exclusive scan
rmm::device_uvector<value_idx> diff(nnz + 1, stream);
compute_duplicates_mask(diff.data(), rows, cols, nnz, stream);
thrust::exclusive_scan(thrust_policy, diff.data(), diff.data() + diff.size(), diff.data());
// compute final size
value_idx size = 0;
raft::update_host(&size, diff.data() + (diff.size() - 1), 1, stream);
resource::sync_stream(handle, stream);
size++;
out.allocate(size, m, n, true, stream);
// perform reduce
max_duplicates_kernel<<<raft::ceildiv(nnz, (size_t)256), 256, 0, stream>>>(
rows, cols, vals, diff.data() + 1, out.rows(), out.cols(), out.vals(), nnz);
}
}; // END namespace detail
}; // END namespace op
}; // END namespace sparse
}; // END namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/op | rapidsai_public_repos/raft/cpp/include/raft/sparse/op/detail/filter.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <cstdio>
#include <iostream>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/detail/utils.h>
#include <raft/sparse/linalg/degree.cuh>
namespace raft {
namespace sparse {
namespace op {
namespace detail {
template <int TPB_X, typename T>
RAFT_KERNEL coo_remove_scalar_kernel(const int* rows,
const int* cols,
const T* vals,
int nnz,
int* crows,
int* ccols,
T* cvals,
int* ex_scan,
int* cur_ex_scan,
int m,
T scalar)
{
int row = (blockIdx.x * TPB_X) + threadIdx.x;
if (row < m) {
int start = cur_ex_scan[row];
int stop = get_stop_idx(row, m, nnz, cur_ex_scan);
int cur_out_idx = ex_scan[row];
for (int idx = start; idx < stop; idx++) {
if (vals[idx] != scalar) {
crows[cur_out_idx] = rows[idx];
ccols[cur_out_idx] = cols[idx];
cvals[cur_out_idx] = vals[idx];
++cur_out_idx;
}
}
}
}
/**
* @brief Removes the values matching a particular scalar from a COO formatted sparse matrix.
*
* @param rows: input array of rows (size n)
* @param cols: input array of cols (size n)
* @param vals: input array of vals (size n)
* @param nnz: size of current rows/cols/vals arrays
* @param crows: compressed array of rows
* @param ccols: compressed array of cols
* @param cvals: compressed array of vals
* @param cnnz: array of non-zero counts per row
* @param cur_cnnz array of counts per row
* @param scalar: scalar to remove from arrays
* @param n: number of rows in dense matrix
* @param d_alloc device allocator for temporary buffers
* @param stream: cuda stream to use
*/
template <int TPB_X, typename T>
void coo_remove_scalar(const int* rows,
const int* cols,
const T* vals,
int nnz,
int* crows,
int* ccols,
T* cvals,
int* cnnz,
int* cur_cnnz,
T scalar,
int n,
cudaStream_t stream)
{
rmm::device_uvector<int> ex_scan(n, stream);
rmm::device_uvector<int> cur_ex_scan(n, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(ex_scan.data(), 0, n * sizeof(int), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(cur_ex_scan.data(), 0, n * sizeof(int), stream));
thrust::device_ptr<int> dev_cnnz = thrust::device_pointer_cast(cnnz);
thrust::device_ptr<int> dev_ex_scan = thrust::device_pointer_cast(ex_scan.data());
thrust::exclusive_scan(rmm::exec_policy(stream), dev_cnnz, dev_cnnz + n, dev_ex_scan);
RAFT_CUDA_TRY(cudaPeekAtLastError());
thrust::device_ptr<int> dev_cur_cnnz = thrust::device_pointer_cast(cur_cnnz);
thrust::device_ptr<int> dev_cur_ex_scan = thrust::device_pointer_cast(cur_ex_scan.data());
thrust::exclusive_scan(rmm::exec_policy(stream), dev_cur_cnnz, dev_cur_cnnz + n, dev_cur_ex_scan);
RAFT_CUDA_TRY(cudaPeekAtLastError());
dim3 grid(raft::ceildiv(n, TPB_X), 1, 1);
dim3 blk(TPB_X, 1, 1);
coo_remove_scalar_kernel<TPB_X><<<grid, blk, 0, stream>>>(rows,
cols,
vals,
nnz,
crows,
ccols,
cvals,
dev_ex_scan.get(),
dev_cur_ex_scan.get(),
n,
scalar);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* @brief Removes the values matching a particular scalar from a COO formatted sparse matrix.
*
* @param in: input COO matrix
* @param out: output COO matrix
* @param scalar: scalar to remove from arrays
* @param stream: cuda stream to use
*/
template <int TPB_X, typename T>
void coo_remove_scalar(COO<T>* in, COO<T>* out, T scalar, cudaStream_t stream)
{
rmm::device_uvector<int> row_count_nz(in->n_rows, stream);
rmm::device_uvector<int> row_count(in->n_rows, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(row_count_nz.data(), 0, in->n_rows * sizeof(int), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(row_count.data(), 0, in->n_rows * sizeof(int), stream));
linalg::coo_degree(in->rows(), in->nnz, row_count.data(), stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
linalg::coo_degree_scalar(in->rows(), in->vals(), in->nnz, scalar, row_count_nz.data(), stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
thrust::device_ptr<int> d_row_count_nz = thrust::device_pointer_cast(row_count_nz.data());
int out_nnz =
thrust::reduce(rmm::exec_policy(stream), d_row_count_nz, d_row_count_nz + in->n_rows);
out->allocate(out_nnz, in->n_rows, in->n_cols, false, stream);
coo_remove_scalar<TPB_X, T>(in->rows(),
in->cols(),
in->vals(),
in->nnz,
out->rows(),
out->cols(),
out->vals(),
row_count_nz.data(),
row_count.data(),
scalar,
in->n_rows,
stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* @brief Removes zeros from a COO formatted sparse matrix.
*
* @param in: input COO matrix
* @param out: output COO matrix
* @param stream: cuda stream to use
*/
template <int TPB_X, typename T>
void coo_remove_zeros(COO<T>* in, COO<T>* out, cudaStream_t stream)
{
coo_remove_scalar<TPB_X, T>(in, out, T(0.0), stream);
}
}; // namespace detail
}; // namespace op
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/op | rapidsai_public_repos/raft/cpp/include/raft/sparse/op/detail/sort.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/sparse/coo.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/sparse/detail/utils.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <cusparse_v2.h>
#include <cuda_runtime.h>
#include <algorithm>
namespace raft {
namespace sparse {
namespace op {
namespace detail {
struct TupleComp {
template <typename one, typename two>
__host__ __device__
bool
operator()(const one& t1, const two& t2)
{
// sort first by each sample's color,
if (thrust::get<0>(t1) < thrust::get<0>(t2)) return true;
if (thrust::get<0>(t1) > thrust::get<0>(t2)) return false;
// then sort by value in descending order
return thrust::get<1>(t1) < thrust::get<1>(t2);
}
};
/**
* @brief Sorts the arrays that comprise the coo matrix
* by row and then by column.
*
* @param m number of rows in coo matrix
* @param n number of cols in coo matrix
* @param nnz number of non-zeros
* @param rows rows array from coo matrix
* @param cols cols array from coo matrix
* @param vals vals array from coo matrix
* @param stream: cuda stream to use
*/
template <typename T>
void coo_sort(int m, int n, int nnz, int* rows, int* cols, T* vals, cudaStream_t stream)
{
auto coo_indices = thrust::make_zip_iterator(thrust::make_tuple(rows, cols));
// get all the colors in contiguous locations so we can map them to warps.
thrust::sort_by_key(rmm::exec_policy(stream), coo_indices, coo_indices + nnz, vals, TupleComp());
}
/**
* @brief Sort the underlying COO arrays by row
* @tparam T: the type name of the underlying value array
* @param in: COO to sort by row
* @param stream: the cuda stream to use
*/
template <typename T>
void coo_sort(COO<T>* const in, cudaStream_t stream)
{
coo_sort<T>(in->n_rows, in->n_cols, in->nnz, in->rows(), in->cols(), in->vals(), stream);
}
/**
* Sorts a COO by its weight
* @tparam value_idx
* @tparam value_t
* @param[inout] rows source edges
* @param[inout] cols dest edges
* @param[inout] data edge weights
* @param[in] nnz number of edges in edge list
* @param[in] stream cuda stream for which to order cuda operations
*/
template <typename value_idx, typename value_t>
void coo_sort_by_weight(
value_idx* rows, value_idx* cols, value_t* data, value_idx nnz, cudaStream_t stream)
{
thrust::device_ptr<value_t> t_data = thrust::device_pointer_cast(data);
auto first = thrust::make_zip_iterator(thrust::make_tuple(rows, cols));
thrust::sort_by_key(rmm::exec_policy(stream), t_data, t_data + nnz, first);
}
}; // namespace detail
}; // namespace op
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/op | rapidsai_public_repos/raft/cpp/include/raft/sparse/op/detail/slice.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/core/operators.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <raft/sparse/detail/utils.h>
namespace raft {
namespace sparse {
namespace op {
namespace detail {
/**
* Slice consecutive rows from a CSR array and populate newly sliced indptr array
* @tparam value_idx
* @param[in] start_row : beginning row to slice
* @param[in] stop_row : ending row to slice
* @param[in] indptr : indptr of input CSR to slice
* @param[out] indptr_out : output sliced indptr to populate
* @param[in] start_offset : beginning column offset of input indptr
* @param[in] stop_offset : ending column offset of input indptr
* @param[in] stream : cuda stream for ordering events
*/
template <typename value_idx>
void csr_row_slice_indptr(value_idx start_row,
value_idx stop_row,
const value_idx* indptr,
value_idx* indptr_out,
value_idx* start_offset,
value_idx* stop_offset,
cudaStream_t stream)
{
raft::update_host(start_offset, indptr + start_row, 1, stream);
raft::update_host(stop_offset, indptr + stop_row + 1, 1, stream);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
value_idx s_offset = *start_offset;
// 0-based indexing so we need to add 1 to stop row. Because we want n_rows+1,
// we add another 1 to stop row.
raft::copy_async(indptr_out, indptr + start_row, (stop_row + 2) - start_row, stream);
raft::linalg::unaryOp<value_idx>(indptr_out,
indptr_out,
(stop_row + 2) - start_row,
raft::sub_const_op<value_idx>(s_offset),
stream);
}
/**
* Slice rows from a CSR, populate column and data arrays
* @tparam[in] value_idx : data type of CSR index arrays
* @tparam[in] value_t : data type of CSR data array
* @param[in] start_offset : beginning column offset to slice
* @param[in] stop_offset : ending column offset to slice
* @param[in] indices : column indices array from input CSR
* @param[in] data : data array from input CSR
* @param[out] indices_out : output column indices array
* @param[out] data_out : output data array
* @param[in] stream : cuda stream for ordering events
*/
template <typename value_idx, typename value_t>
void csr_row_slice_populate(value_idx start_offset,
value_idx stop_offset,
const value_idx* indices,
const value_t* data,
value_idx* indices_out,
value_t* data_out,
cudaStream_t stream)
{
raft::copy(indices_out, indices + start_offset, stop_offset - start_offset, stream);
raft::copy(data_out, data + start_offset, stop_offset - start_offset, stream);
}
}; // namespace detail
}; // namespace op
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/op | rapidsai_public_repos/raft/cpp/include/raft/sparse/op/detail/row_op.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <raft/sparse/detail/utils.h>
namespace raft {
namespace sparse {
namespace op {
namespace detail {
template <typename T, int TPB_X = 256, typename Lambda = auto(T, T, T)->void>
RAFT_KERNEL csr_row_op_kernel(const T* row_ind, T n_rows, T nnz, Lambda op)
{
T row = blockIdx.x * TPB_X + threadIdx.x;
if (row < n_rows) {
T start_idx = row_ind[row];
T stop_idx = row < n_rows - 1 ? row_ind[row + 1] : nnz;
op(row, start_idx, stop_idx);
}
}
/**
* @brief Perform a custom row operation on a CSR matrix in batches.
* @tparam T numerical type of row_ind array
* @tparam TPB_X number of threads per block to use for underlying kernel
* @tparam Lambda type of custom operation function
* @param row_ind the CSR row_ind array to perform parallel operations over
* @param n_rows total number vertices in graph
* @param nnz number of non-zeros
* @param op custom row operation functor accepting the row and beginning index.
* @param stream cuda stream to use
*/
template <typename Index_, int TPB_X = 256, typename Lambda = auto(Index_, Index_, Index_)->void>
void csr_row_op(const Index_* row_ind, Index_ n_rows, Index_ nnz, Lambda op, cudaStream_t stream)
{
dim3 grid(raft::ceildiv(n_rows, Index_(TPB_X)), 1, 1);
dim3 blk(TPB_X, 1, 1);
csr_row_op_kernel<Index_, TPB_X><<<grid, blk, 0, stream>>>(row_ind, n_rows, nnz, op);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}; // namespace detail
}; // namespace op
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/solver/lanczos.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __LANCZOS_H
#define __LANCZOS_H
#pragma once
#include <raft/sparse/solver/detail/lanczos.cuh>
#include <raft/spectral/matrix_wrappers.hpp>
namespace raft::sparse::solver {
// =========================================================
// Eigensolver
// =========================================================
/**
* @brief Compute smallest eigenvectors of symmetric matrix
* Computes eigenvalues and eigenvectors that are least
* positive. If matrix is positive definite or positive
* semidefinite, the computed eigenvalues are smallest in
* magnitude.
* The largest eigenvalue is estimated by performing several
* Lanczos iterations. An implicitly restarted Lanczos method is
* then applied to A+s*I, where s is negative the largest
* eigenvalue.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param A Matrix.
* @param nEigVecs Number of eigenvectors to compute.
* @param maxIter Maximum number of Lanczos steps. Does not include
* Lanczos steps used to estimate largest eigenvalue.
* @param restartIter Maximum size of Lanczos system before
* performing an implicit restart. Should be at least 4.
* @param tol Convergence tolerance. Lanczos iteration will
* terminate when the residual norm is less than tol*theta, where
* theta is an estimate for the smallest unwanted eigenvalue
* (i.e. the (nEigVecs+1)th smallest eigenvalue).
* @param reorthogonalize Whether to reorthogonalize Lanczos
* vectors.
* @param iter On exit, pointer to total number of Lanczos
* iterations performed. Does not include Lanczos steps used to
* estimate largest eigenvalue.
* @param eigVals_dev (Output, device memory, nEigVecs entries)
* Smallest eigenvalues of matrix.
* @param eigVecs_dev (Output, device memory, n*nEigVecs entries)
* Eigenvectors corresponding to smallest eigenvalues of
* matrix. Vectors are stored as columns of a column-major matrix
* with dimensions n x nEigVecs.
* @param seed random seed.
* @return error flag.
*/
template <typename index_type_t, typename value_type_t>
int computeSmallestEigenvectors(
raft::resources const& handle,
raft::spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const& A,
index_type_t nEigVecs,
index_type_t maxIter,
index_type_t restartIter,
value_type_t tol,
bool reorthogonalize,
index_type_t& iter,
value_type_t* __restrict__ eigVals_dev,
value_type_t* __restrict__ eigVecs_dev,
unsigned long long seed = 1234567)
{
return detail::computeSmallestEigenvectors(handle,
A,
nEigVecs,
maxIter,
restartIter,
tol,
reorthogonalize,
iter,
eigVals_dev,
eigVecs_dev,
seed);
}
/**
* @brief Compute largest eigenvectors of symmetric matrix
* Computes eigenvalues and eigenvectors that are least
* positive. If matrix is positive definite or positive
* semidefinite, the computed eigenvalues are largest in
* magnitude.
* The largest eigenvalue is estimated by performing several
* Lanczos iterations. An implicitly restarted Lanczos method is
* then applied to A+s*I, where s is negative the largest
* eigenvalue.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param A Matrix.
* @param nEigVecs Number of eigenvectors to compute.
* @param maxIter Maximum number of Lanczos steps. Does not include
* Lanczos steps used to estimate largest eigenvalue.
* @param restartIter Maximum size of Lanczos system before
* performing an implicit restart. Should be at least 4.
* @param tol Convergence tolerance. Lanczos iteration will
* terminate when the residual norm is less than tol*theta, where
* theta is an estimate for the largest unwanted eigenvalue
* (i.e. the (nEigVecs+1)th largest eigenvalue).
* @param reorthogonalize Whether to reorthogonalize Lanczos
* vectors.
* @param iter On exit, pointer to total number of Lanczos
* iterations performed. Does not include Lanczos steps used to
* estimate largest eigenvalue.
* @param eigVals_dev (Output, device memory, nEigVecs entries)
* Largest eigenvalues of matrix.
* @param eigVecs_dev (Output, device memory, n*nEigVecs entries)
* Eigenvectors corresponding to largest eigenvalues of
* matrix. Vectors are stored as columns of a column-major matrix
* with dimensions n x nEigVecs.
* @param seed random seed.
* @return error flag.
*/
template <typename index_type_t, typename value_type_t>
int computeLargestEigenvectors(
raft::resources const& handle,
raft::spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const& A,
index_type_t nEigVecs,
index_type_t maxIter,
index_type_t restartIter,
value_type_t tol,
bool reorthogonalize,
index_type_t& iter,
value_type_t* __restrict__ eigVals_dev,
value_type_t* __restrict__ eigVecs_dev,
unsigned long long seed = 123456)
{
return detail::computeLargestEigenvectors(handle,
A,
nEigVecs,
maxIter,
restartIter,
tol,
reorthogonalize,
iter,
eigVals_dev,
eigVecs_dev,
seed);
}
} // namespace raft::sparse::solver
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/solver/mst.cuh |
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/sparse/solver/mst_solver.cuh>
namespace raft::sparse::solver {
/**
* Compute the minimum spanning tree (MST) or minimum spanning forest (MSF) depending on
* the connected components of the given graph.
*
* @tparam vertex_t integral type for precision of vertex indexing
* @tparam edge_t integral type for precision of edge indexing
* @tparam weight_t type of weights array
* @tparam alteration_t type to use for random alteration
*
* @param handle
* @param offsets csr inptr array of row offsets (size v+1)
* @param indices csr array of column indices (size e)
* @param weights csr array of weights (size e)
* @param v number of vertices in graph
* @param e number of edges in graph
* @param color array to store resulting colors for MSF
* @param stream cuda stream for ordering operations
* @param symmetrize_output should the resulting output edge list should be symmetrized?
* @param initialize_colors should the colors array be initialized inside the MST?
* @param iterations maximum number of iterations to perform
* @return a list of edges containing the mst (or a subset of the edges guaranteed to be in the mst
* when an msf is encountered)
*/
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t = weight_t>
Graph_COO<vertex_t, edge_t, weight_t> mst(raft::resources const& handle,
edge_t const* offsets,
vertex_t const* indices,
weight_t const* weights,
vertex_t const v,
edge_t const e,
vertex_t* color,
cudaStream_t stream,
bool symmetrize_output = true,
bool initialize_colors = true,
int iterations = 0)
{
MST_solver<vertex_t, edge_t, weight_t, alteration_t> mst_solver(handle,
offsets,
indices,
weights,
v,
e,
color,
stream,
symmetrize_output,
initialize_colors,
iterations);
return mst_solver.solve();
}
} // end namespace raft::sparse::solver
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/solver/mst_solver.cuh |
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resources.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::sparse::solver {
template <typename vertex_t, typename edge_t, typename weight_t>
struct Graph_COO {
rmm::device_uvector<vertex_t> src;
rmm::device_uvector<vertex_t> dst;
rmm::device_uvector<weight_t> weights;
edge_t n_edges;
Graph_COO(vertex_t size, cudaStream_t stream)
: src(size, stream), dst(size, stream), weights(size, stream)
{
}
};
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
class MST_solver {
public:
MST_solver(raft::resources const& handle_,
const edge_t* offsets_,
const vertex_t* indices_,
const weight_t* weights_,
const vertex_t v_,
const edge_t e_,
vertex_t* color_,
cudaStream_t stream_,
bool symmetrize_output_,
bool initialize_colors_,
int iterations_);
Graph_COO<vertex_t, edge_t, weight_t> solve();
~MST_solver() {}
private:
raft::resources const& handle;
cudaStream_t stream;
bool symmetrize_output, initialize_colors;
int iterations;
// CSR
const edge_t* offsets;
const vertex_t* indices;
const weight_t* weights;
const vertex_t v;
const edge_t e;
vertex_t max_blocks;
vertex_t max_threads;
vertex_t sm_count;
vertex_t* color_index; // represent each supervertex as a color
rmm::device_uvector<alteration_t> min_edge_color; // minimum incident edge weight per color
rmm::device_uvector<edge_t> new_mst_edge; // new minimum edge per vertex
rmm::device_uvector<alteration_t> altered_weights; // weights to be used for mst
rmm::device_scalar<edge_t> mst_edge_count; // total number of edges added after every iteration
rmm::device_scalar<edge_t>
prev_mst_edge_count; // total number of edges up to the previous iteration
rmm::device_uvector<bool> mst_edge; // mst output - true if the edge belongs in mst
rmm::device_uvector<vertex_t> next_color; // next iteration color
rmm::device_uvector<vertex_t> color; // index of color that vertex points to
// new src-dst pairs found per iteration
rmm::device_uvector<vertex_t> temp_src;
rmm::device_uvector<vertex_t> temp_dst;
rmm::device_uvector<weight_t> temp_weights;
void label_prop(vertex_t* mst_src, vertex_t* mst_dst);
void min_edge_per_vertex();
void min_edge_per_supervertex();
void check_termination();
void alteration();
alteration_t alteration_max();
void append_src_dst_pair(vertex_t* mst_src, vertex_t* mst_dst, weight_t* mst_weights);
};
} // namespace raft::sparse::solver
#include <raft/sparse/solver/detail/mst_solver_inl.cuh>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/solver | rapidsai_public_repos/raft/cpp/include/raft/sparse/solver/detail/lanczos.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// for cmath:
#define _USE_MATH_DEFINES
#include <cmath>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <vector>
#include <cuda.h>
#include <curand.h>
#include <raft/core/resources.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/spectral/detail/lapack.hpp>
#include <raft/spectral/detail/warn_dbg.hpp>
#include <raft/spectral/matrix_wrappers.hpp>
#include <raft/util/cudart_utils.hpp>
namespace raft::sparse::solver::detail {
// curandGeneratorNormalX
inline curandStatus_t curandGenerateNormalX(
curandGenerator_t generator, float* outputPtr, size_t n, float mean, float stddev)
{
return curandGenerateNormal(generator, outputPtr, n, mean, stddev);
}
inline curandStatus_t curandGenerateNormalX(
curandGenerator_t generator, double* outputPtr, size_t n, double mean, double stddev)
{
return curandGenerateNormalDouble(generator, outputPtr, n, mean, stddev);
}
// =========================================================
// Helper functions
// =========================================================
/**
* @brief Perform Lanczos iteration
* Lanczos iteration is performed on a shifted matrix A+shift*I.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param A Matrix.
* @param iter Pointer to current Lanczos iteration. On exit, the
* variable is set equal to the final Lanczos iteration.
* @param maxIter Maximum Lanczos iteration. This function will
* perform a maximum of maxIter-*iter iterations.
* @param shift Matrix shift.
* @param tol Convergence tolerance. Lanczos iteration will
* terminate when the residual norm (i.e. entry in beta_host) is
* less than tol.
* @param reorthogonalize Whether to reorthogonalize Lanczos
* vectors.
* @param alpha_host (Output, host memory, maxIter entries)
* Diagonal entries of Lanczos system.
* @param beta_host (Output, host memory, maxIter entries)
* Off-diagonal entries of Lanczos system.
* @param lanczosVecs_dev (Input/output, device memory,
* n*(maxIter+1) entries) Lanczos vectors. Vectors are stored as
* columns of a column-major matrix with dimensions
* n x (maxIter+1).
* @param work_dev (Output, device memory, maxIter entries)
* Workspace. Not needed if full reorthogonalization is disabled.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename index_type_t, typename value_type_t>
int performLanczosIteration(raft::resources const& handle,
spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const* A,
index_type_t* iter,
index_type_t maxIter,
value_type_t shift,
value_type_t tol,
bool reorthogonalize,
value_type_t* __restrict__ alpha_host,
value_type_t* __restrict__ beta_host,
value_type_t* __restrict__ lanczosVecs_dev,
value_type_t* __restrict__ work_dev)
{
// -------------------------------------------------------
// Variable declaration
// -------------------------------------------------------
// Useful variables
constexpr value_type_t one = 1;
constexpr value_type_t negOne = -1;
constexpr value_type_t zero = 0;
value_type_t alpha;
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
RAFT_EXPECTS(A != nullptr, "Null matrix pointer.");
index_type_t n = A->nrows_;
// -------------------------------------------------------
// Compute second Lanczos vector
// -------------------------------------------------------
if (*iter <= 0) {
*iter = 1;
// Apply matrix
if (shift != 0)
RAFT_CUDA_TRY(cudaMemcpyAsync(lanczosVecs_dev + n,
lanczosVecs_dev,
n * sizeof(value_type_t),
cudaMemcpyDeviceToDevice,
stream));
A->mv(1, lanczosVecs_dev, shift, lanczosVecs_dev + n);
// Orthogonalize Lanczos vector
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasdot(
cublas_h, n, lanczosVecs_dev, 1, lanczosVecs_dev + IDX(0, 1, n), 1, alpha_host, stream));
alpha = -alpha_host[0];
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasaxpy(
cublas_h, n, &alpha, lanczosVecs_dev, 1, lanczosVecs_dev + IDX(0, 1, n), 1, stream));
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasnrm2(
cublas_h, n, lanczosVecs_dev + IDX(0, 1, n), 1, beta_host, stream));
// Check if Lanczos has converged
if (beta_host[0] <= tol) return 0;
// Normalize Lanczos vector
alpha = 1 / beta_host[0];
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasscal(
cublas_h, n, &alpha, lanczosVecs_dev + IDX(0, 1, n), 1, stream));
}
// -------------------------------------------------------
// Compute remaining Lanczos vectors
// -------------------------------------------------------
while (*iter < maxIter) {
++(*iter);
// Apply matrix
if (shift != 0)
RAFT_CUDA_TRY(cudaMemcpyAsync(lanczosVecs_dev + (*iter) * n,
lanczosVecs_dev + (*iter - 1) * n,
n * sizeof(value_type_t),
cudaMemcpyDeviceToDevice,
stream));
A->mv(1, lanczosVecs_dev + IDX(0, *iter - 1, n), shift, lanczosVecs_dev + IDX(0, *iter, n));
// Full reorthogonalization
// "Twice is enough" algorithm per Kahan and Parlett
if (reorthogonalize) {
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(cublas_h,
CUBLAS_OP_T,
n,
*iter,
&one,
lanczosVecs_dev,
n,
lanczosVecs_dev + IDX(0, *iter, n),
1,
&zero,
work_dev,
1,
stream));
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(cublas_h,
CUBLAS_OP_N,
n,
*iter,
&negOne,
lanczosVecs_dev,
n,
work_dev,
1,
&one,
lanczosVecs_dev + IDX(0, *iter, n),
1,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(alpha_host + (*iter - 1),
work_dev + (*iter - 1),
sizeof(value_type_t),
cudaMemcpyDeviceToHost,
stream));
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(cublas_h,
CUBLAS_OP_T,
n,
*iter,
&one,
lanczosVecs_dev,
n,
lanczosVecs_dev + IDX(0, *iter, n),
1,
&zero,
work_dev,
1,
stream));
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(cublas_h,
CUBLAS_OP_N,
n,
*iter,
&negOne,
lanczosVecs_dev,
n,
work_dev,
1,
&one,
lanczosVecs_dev + IDX(0, *iter, n),
1,
stream));
}
// Orthogonalization with 3-term recurrence relation
else {
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasdot(cublas_h,
n,
lanczosVecs_dev + IDX(0, *iter - 1, n),
1,
lanczosVecs_dev + IDX(0, *iter, n),
1,
alpha_host + (*iter - 1),
stream));
auto alpha = -alpha_host[*iter - 1];
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasaxpy(cublas_h,
n,
&alpha,
lanczosVecs_dev + IDX(0, *iter - 1, n),
1,
lanczosVecs_dev + IDX(0, *iter, n),
1,
stream));
alpha = -beta_host[*iter - 2];
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasaxpy(cublas_h,
n,
&alpha,
lanczosVecs_dev + IDX(0, *iter - 2, n),
1,
lanczosVecs_dev + IDX(0, *iter, n),
1,
stream));
}
// Compute residual
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasnrm2(
cublas_h, n, lanczosVecs_dev + IDX(0, *iter, n), 1, beta_host + *iter - 1, stream));
// Check if Lanczos has converged
if (beta_host[*iter - 1] <= tol) break;
// Normalize Lanczos vector
alpha = 1 / beta_host[*iter - 1];
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasscal(
cublas_h, n, &alpha, lanczosVecs_dev + IDX(0, *iter, n), 1, stream));
}
resource::sync_stream(handle, stream);
return 0;
}
/**
* @brief Find Householder transform for 3-dimensional system
* Given an input vector v=[x,y,z]', this function finds a
* Householder transform P such that P*v is a multiple of
* e_1=[1,0,0]'. The input vector v is overwritten with the
* Householder vector such that P=I-2*v*v'.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param v (Input/output, host memory, 3 entries) Input
* 3-dimensional vector. On exit, the vector is set to the
* Householder vector.
* @param Pv (Output, host memory, 1 entry) First entry of P*v
* (here v is the input vector). Either equal to ||v||_2 or
* -||v||_2.
* @param P (Output, host memory, 9 entries) Householder transform
* matrix. Matrix dimensions are 3 x 3.
*/
template <typename index_type_t, typename value_type_t>
static void findHouseholder3(value_type_t* v, value_type_t* Pv, value_type_t* P)
{
// Compute norm of vector
*Pv = std::sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
// Choose whether to reflect to e_1 or -e_1
// This choice avoids catastrophic cancellation
if (v[0] >= 0) *Pv = -(*Pv);
v[0] -= *Pv;
// Normalize Householder vector
value_type_t normHouseholder = std::sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
if (normHouseholder != 0) {
v[0] /= normHouseholder;
v[1] /= normHouseholder;
v[2] /= normHouseholder;
} else {
v[0] = 0;
v[1] = 0;
v[2] = 0;
}
// Construct Householder matrix
index_type_t i, j;
for (j = 0; j < 3; ++j)
for (i = 0; i < 3; ++i)
P[IDX(i, j, 3)] = -2 * v[i] * v[j];
for (i = 0; i < 3; ++i)
P[IDX(i, i, 3)] += 1;
}
/**
* @brief Apply 3-dimensional Householder transform to 4 x 4 matrix
* The Householder transform is pre-applied to the top three rows
* of the matrix and post-applied to the left three columns. The
* 4 x 4 matrix is intended to contain the bulge that is produced
* in the Francis QR algorithm.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param v (Input, host memory, 3 entries) Householder vector.
* @param A (Input/output, host memory, 16 entries) 4 x 4 matrix.
*/
template <typename index_type_t, typename value_type_t>
static void applyHouseholder3(const value_type_t* v, value_type_t* A)
{
// Loop indices
index_type_t i, j;
// Dot product between Householder vector and matrix row/column
value_type_t vDotA;
// Pre-apply Householder transform
for (j = 0; j < 4; ++j) {
vDotA = 0;
for (i = 0; i < 3; ++i)
vDotA += v[i] * A[IDX(i, j, 4)];
for (i = 0; i < 3; ++i)
A[IDX(i, j, 4)] -= 2 * v[i] * vDotA;
}
// Post-apply Householder transform
for (i = 0; i < 4; ++i) {
vDotA = 0;
for (j = 0; j < 3; ++j)
vDotA += A[IDX(i, j, 4)] * v[j];
for (j = 0; j < 3; ++j)
A[IDX(i, j, 4)] -= 2 * vDotA * v[j];
}
}
/**
* @brief Perform one step of Francis QR algorithm
* Equivalent to two steps of the classical QR algorithm on a
* tridiagonal matrix.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param n Matrix dimension.
* @param shift1 QR algorithm shift.
* @param shift2 QR algorithm shift.
* @param alpha (Input/output, host memory, n entries) Diagonal
* entries of tridiagonal matrix.
* @param beta (Input/output, host memory, n-1 entries)
* Off-diagonal entries of tridiagonal matrix.
* @param V (Input/output, host memory, n*n entries) Orthonormal
* transforms from previous steps of QR algorithm. Matrix
* dimensions are n x n. On exit, the orthonormal transform from
* this Francis QR step is post-applied to the matrix.
* @param work (Output, host memory, 3*n entries) Workspace.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename index_type_t, typename value_type_t>
static int francisQRIteration(index_type_t n,
value_type_t shift1,
value_type_t shift2,
value_type_t* alpha,
value_type_t* beta,
value_type_t* V,
value_type_t* work)
{
// -------------------------------------------------------
// Variable declaration
// -------------------------------------------------------
// Temporary storage of 4x4 bulge and Householder vector
value_type_t bulge[16];
// Householder vector
value_type_t householder[3];
// Householder matrix
value_type_t householderMatrix[3 * 3];
// Shifts are roots of the polynomial p(x)=x^2+b*x+c
value_type_t b = -shift1 - shift2;
value_type_t c = shift1 * shift2;
// Loop indices
index_type_t i, j, pos;
// Temporary variable
value_type_t temp;
// -------------------------------------------------------
// Implementation
// -------------------------------------------------------
// Compute initial Householder transform
householder[0] = alpha[0] * alpha[0] + beta[0] * beta[0] + b * alpha[0] + c;
householder[1] = beta[0] * (alpha[0] + alpha[1] + b);
householder[2] = beta[0] * beta[1];
findHouseholder3<index_type_t, value_type_t>(householder, &temp, householderMatrix);
// Apply initial Householder transform to create bulge
memset(bulge, 0, 16 * sizeof(value_type_t));
for (i = 0; i < 4; ++i)
bulge[IDX(i, i, 4)] = alpha[i];
for (i = 0; i < 3; ++i) {
bulge[IDX(i + 1, i, 4)] = beta[i];
bulge[IDX(i, i + 1, 4)] = beta[i];
}
applyHouseholder3<index_type_t, value_type_t>(householder, bulge);
Lapack<value_type_t>::gemm(false, false, n, 3, 3, 1, V, n, householderMatrix, 3, 0, work, n);
memcpy(V, work, 3 * n * sizeof(value_type_t));
// Chase bulge to bottom-right of matrix with Householder transforms
for (pos = 0; pos < n - 4; ++pos) {
// Move to next position
alpha[pos] = bulge[IDX(0, 0, 4)];
householder[0] = bulge[IDX(1, 0, 4)];
householder[1] = bulge[IDX(2, 0, 4)];
householder[2] = bulge[IDX(3, 0, 4)];
for (j = 0; j < 3; ++j)
for (i = 0; i < 3; ++i)
bulge[IDX(i, j, 4)] = bulge[IDX(i + 1, j + 1, 4)];
bulge[IDX(3, 0, 4)] = 0;
bulge[IDX(3, 1, 4)] = 0;
bulge[IDX(3, 2, 4)] = beta[pos + 3];
bulge[IDX(0, 3, 4)] = 0;
bulge[IDX(1, 3, 4)] = 0;
bulge[IDX(2, 3, 4)] = beta[pos + 3];
bulge[IDX(3, 3, 4)] = alpha[pos + 4];
// Apply Householder transform
findHouseholder3<index_type_t, value_type_t>(householder, beta + pos, householderMatrix);
applyHouseholder3<index_type_t, value_type_t>(householder, bulge);
Lapack<value_type_t>::gemm(
false, false, n, 3, 3, 1, V + IDX(0, pos + 1, n), n, householderMatrix, 3, 0, work, n);
memcpy(V + IDX(0, pos + 1, n), work, 3 * n * sizeof(value_type_t));
}
// Apply penultimate Householder transform
// Values in the last row and column are zero
alpha[n - 4] = bulge[IDX(0, 0, 4)];
householder[0] = bulge[IDX(1, 0, 4)];
householder[1] = bulge[IDX(2, 0, 4)];
householder[2] = bulge[IDX(3, 0, 4)];
for (j = 0; j < 3; ++j)
for (i = 0; i < 3; ++i)
bulge[IDX(i, j, 4)] = bulge[IDX(i + 1, j + 1, 4)];
bulge[IDX(3, 0, 4)] = 0;
bulge[IDX(3, 1, 4)] = 0;
bulge[IDX(3, 2, 4)] = 0;
bulge[IDX(0, 3, 4)] = 0;
bulge[IDX(1, 3, 4)] = 0;
bulge[IDX(2, 3, 4)] = 0;
bulge[IDX(3, 3, 4)] = 0;
findHouseholder3<index_type_t, value_type_t>(householder, beta + n - 4, householderMatrix);
applyHouseholder3<index_type_t, value_type_t>(householder, bulge);
Lapack<value_type_t>::gemm(
false, false, n, 3, 3, 1, V + IDX(0, n - 3, n), n, householderMatrix, 3, 0, work, n);
memcpy(V + IDX(0, n - 3, n), work, 3 * n * sizeof(value_type_t));
// Apply final Householder transform
// Values in the last two rows and columns are zero
alpha[n - 3] = bulge[IDX(0, 0, 4)];
householder[0] = bulge[IDX(1, 0, 4)];
householder[1] = bulge[IDX(2, 0, 4)];
householder[2] = 0;
for (j = 0; j < 3; ++j)
for (i = 0; i < 3; ++i)
bulge[IDX(i, j, 4)] = bulge[IDX(i + 1, j + 1, 4)];
findHouseholder3<index_type_t, value_type_t>(householder, beta + n - 3, householderMatrix);
applyHouseholder3<index_type_t, value_type_t>(householder, bulge);
Lapack<value_type_t>::gemm(
false, false, n, 2, 2, 1, V + IDX(0, n - 2, n), n, householderMatrix, 3, 0, work, n);
memcpy(V + IDX(0, n - 2, n), work, 2 * n * sizeof(value_type_t));
// Bulge has been eliminated
alpha[n - 2] = bulge[IDX(0, 0, 4)];
alpha[n - 1] = bulge[IDX(1, 1, 4)];
beta[n - 2] = bulge[IDX(1, 0, 4)];
return 0;
}
/**
* @brief Perform implicit restart of Lanczos algorithm
* Shifts are Chebyshev nodes of unwanted region of matrix spectrum.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param n Matrix dimension.
* @param iter Current Lanczos iteration.
* @param iter_new Lanczos iteration after restart.
* @param shiftUpper Pointer (host memory) to upper bound for unwanted
* region. Value is ignored if less than *shiftLower. If a
* stronger upper bound has been found, the value is updated on
* exit.
* @param shiftLower Pointer (host memory) to lower bound for unwanted
* region. Value is ignored if greater than *shiftUpper. If a
* stronger lower bound has been found, the value is updated on
* exit.
* @param alpha_host (Input/output, host memory, iter entries)
* Diagonal entries of Lanczos system.
* @param beta_host (Input/output, host memory, iter entries)
* Off-diagonal entries of Lanczos system.
* @param V_host (Output, host memory, iter*iter entries)
* Orthonormal transform used to obtain restarted system. Matrix
* dimensions are iter x iter.
* @param work_host (Output, host memory, 4*iter entries)
* Workspace.
* @param lanczosVecs_dev (Input/output, device memory, n*(iter+1)
* entries) Lanczos vectors. Vectors are stored as columns of a
* column-major matrix with dimensions n x (iter+1).
* @param work_dev (Output, device memory, (n+iter)*iter entries)
* Workspace.
* @param smallest_eig specifies whether smallest (true) or largest
* (false) eigenvalues are to be calculated.
* @return error flag.
*/
template <typename index_type_t, typename value_type_t>
static int lanczosRestart(raft::resources const& handle,
index_type_t n,
index_type_t iter,
index_type_t iter_new,
value_type_t* shiftUpper,
value_type_t* shiftLower,
value_type_t* __restrict__ alpha_host,
value_type_t* __restrict__ beta_host,
value_type_t* __restrict__ V_host,
value_type_t* __restrict__ work_host,
value_type_t* __restrict__ lanczosVecs_dev,
value_type_t* __restrict__ work_dev,
bool smallest_eig)
{
// -------------------------------------------------------
// Variable declaration
// -------------------------------------------------------
// Useful constants
constexpr value_type_t zero = 0;
constexpr value_type_t one = 1;
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// Loop index
index_type_t i;
// Number of implicit restart steps
// Assumed to be even since each call to Francis algorithm is
// equivalent to two calls of QR algorithm
index_type_t restartSteps = iter - iter_new;
// Ritz values from Lanczos method
value_type_t* ritzVals_host = work_host + 3 * iter;
// Shifts for implicit restart
value_type_t* shifts_host;
// Orthonormal matrix for similarity transform
value_type_t* V_dev = work_dev + n * iter;
// -------------------------------------------------------
// Implementation
// -------------------------------------------------------
// Compute Ritz values
memcpy(ritzVals_host, alpha_host, iter * sizeof(value_type_t));
memcpy(work_host, beta_host, (iter - 1) * sizeof(value_type_t));
Lapack<value_type_t>::sterf(iter, ritzVals_host, work_host);
// Debug: Print largest eigenvalues
// for (int i = iter-iter_new; i < iter; ++i)
// std::cout <<*(ritzVals_host+i)<< " ";
// std::cout <<std::endl;
// Initialize similarity transform with identity matrix
memset(V_host, 0, iter * iter * sizeof(value_type_t));
for (i = 0; i < iter; ++i)
V_host[IDX(i, i, iter)] = 1;
// Determine interval to suppress eigenvalues
if (smallest_eig) {
if (*shiftLower > *shiftUpper) {
*shiftUpper = ritzVals_host[iter - 1];
*shiftLower = ritzVals_host[iter_new];
} else {
*shiftUpper = std::max(*shiftUpper, ritzVals_host[iter - 1]);
*shiftLower = std::min(*shiftLower, ritzVals_host[iter_new]);
}
} else {
if (*shiftLower > *shiftUpper) {
*shiftUpper = ritzVals_host[iter - iter_new - 1];
*shiftLower = ritzVals_host[0];
} else {
*shiftUpper = std::max(*shiftUpper, ritzVals_host[iter - iter_new - 1]);
*shiftLower = std::min(*shiftLower, ritzVals_host[0]);
}
}
// Calculate Chebyshev nodes as shifts
shifts_host = ritzVals_host;
for (i = 0; i < restartSteps; ++i) {
shifts_host[i] = cos((i + 0.5) * static_cast<value_type_t>(M_PI) / restartSteps);
shifts_host[i] *= 0.5 * ((*shiftUpper) - (*shiftLower));
shifts_host[i] += 0.5 * ((*shiftUpper) + (*shiftLower));
}
// Apply Francis QR algorithm to implicitly restart Lanczos
for (i = 0; i < restartSteps; i += 2)
if (francisQRIteration(
iter, shifts_host[i], shifts_host[i + 1], alpha_host, beta_host, V_host, work_host))
WARNING("error in implicitly shifted QR algorithm");
// Obtain new residual
RAFT_CUDA_TRY(cudaMemcpyAsync(
V_dev, V_host, iter * iter * sizeof(value_type_t), cudaMemcpyHostToDevice, stream));
beta_host[iter - 1] = beta_host[iter - 1] * V_host[IDX(iter - 1, iter_new - 1, iter)];
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemv(cublas_h,
CUBLAS_OP_N,
n,
iter,
beta_host + iter_new - 1,
lanczosVecs_dev,
n,
V_dev + IDX(0, iter_new, iter),
1,
beta_host + iter - 1,
lanczosVecs_dev + IDX(0, iter, n),
1,
stream));
// Obtain new Lanczos vectors
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_h,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
iter_new,
iter,
&one,
lanczosVecs_dev,
n,
V_dev,
iter,
&zero,
work_dev,
n,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(lanczosVecs_dev,
work_dev,
n * iter_new * sizeof(value_type_t),
cudaMemcpyDeviceToDevice,
stream));
// Normalize residual to obtain new Lanczos vector
RAFT_CUDA_TRY(cudaMemcpyAsync(lanczosVecs_dev + IDX(0, iter_new, n),
lanczosVecs_dev + IDX(0, iter, n),
n * sizeof(value_type_t),
cudaMemcpyDeviceToDevice,
stream));
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasnrm2(
cublas_h, n, lanczosVecs_dev + IDX(0, iter_new, n), 1, beta_host + iter_new - 1, stream));
auto h_beta = 1 / beta_host[iter_new - 1];
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasscal(
cublas_h, n, &h_beta, lanczosVecs_dev + IDX(0, iter_new, n), 1, stream));
return 0;
}
/**
* @brief Compute smallest eigenvectors of symmetric matrix
* Computes eigenvalues and eigenvectors that are least
* positive. If matrix is positive definite or positive
* semidefinite, the computed eigenvalues are smallest in
* magnitude.
* The largest eigenvalue is estimated by performing several
* Lanczos iterations. An implicitly restarted Lanczos method is
* then applied to A+s*I, where s is negative the largest
* eigenvalue.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param A Matrix.
* @param nEigVecs Number of eigenvectors to compute.
* @param maxIter Maximum number of Lanczos steps. Does not include
* Lanczos steps used to estimate largest eigenvalue.
* @param restartIter Maximum size of Lanczos system before
* performing an implicit restart. Should be at least 4.
* @param tol Convergence tolerance. Lanczos iteration will
* terminate when the residual norm is less than tol*theta, where
* theta is an estimate for the smallest unwanted eigenvalue
* (i.e. the (nEigVecs+1)th smallest eigenvalue).
* @param reorthogonalize Whether to reorthogonalize Lanczos
* vectors.
* @param effIter On exit, pointer to final size of Lanczos system.
* @param totalIter On exit, pointer to total number of Lanczos
* iterations performed. Does not include Lanczos steps used to
* estimate largest eigenvalue.
* @param shift On exit, pointer to matrix shift (estimate for
* largest eigenvalue).
* @param alpha_host (Output, host memory, restartIter entries)
* Diagonal entries of Lanczos system.
* @param beta_host (Output, host memory, restartIter entries)
* Off-diagonal entries of Lanczos system.
* @param lanczosVecs_dev (Output, device memory, n*(restartIter+1)
* entries) Lanczos vectors. Vectors are stored as columns of a
* column-major matrix with dimensions n x (restartIter+1).
* @param work_dev (Output, device memory,
* (n+restartIter)*restartIter entries) Workspace.
* @param eigVals_dev (Output, device memory, nEigVecs entries)
* Largest eigenvalues of matrix.
* @param eigVecs_dev (Output, device memory, n*nEigVecs entries)
* Eigenvectors corresponding to smallest eigenvalues of
* matrix. Vectors are stored as columns of a column-major matrix
* with dimensions n x nEigVecs.
* @param seed random seed.
* @return error flag.
*/
template <typename index_type_t, typename value_type_t>
int computeSmallestEigenvectors(
raft::resources const& handle,
spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const* A,
index_type_t nEigVecs,
index_type_t maxIter,
index_type_t restartIter,
value_type_t tol,
bool reorthogonalize,
index_type_t* effIter,
index_type_t* totalIter,
value_type_t* shift,
value_type_t* __restrict__ alpha_host,
value_type_t* __restrict__ beta_host,
value_type_t* __restrict__ lanczosVecs_dev,
value_type_t* __restrict__ work_dev,
value_type_t* __restrict__ eigVals_dev,
value_type_t* __restrict__ eigVecs_dev,
unsigned long long seed)
{
// Useful constants
constexpr value_type_t one = 1;
constexpr value_type_t zero = 0;
// Matrix dimension
index_type_t n = A->nrows_;
// Shift for implicit restart
value_type_t shiftUpper;
value_type_t shiftLower;
// Lanczos iteration counters
index_type_t maxIter_curr = restartIter; // Maximum size of Lanczos system
// Status flags
int status;
// Loop index
index_type_t i;
// Host memory
value_type_t* Z_host; // Eigenvectors in Lanczos basis
value_type_t* work_host; // Workspace
// -------------------------------------------------------
// Check that parameters are valid
// -------------------------------------------------------
RAFT_EXPECTS(nEigVecs > 0 && nEigVecs <= n, "Invalid number of eigenvectors.");
RAFT_EXPECTS(restartIter > 0, "Invalid restartIter.");
RAFT_EXPECTS(tol > 0, "Invalid tolerance.");
RAFT_EXPECTS(maxIter >= nEigVecs, "Invalid maxIter.");
RAFT_EXPECTS(restartIter >= nEigVecs, "Invalid restartIter.");
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// -------------------------------------------------------
// Variable initialization
// -------------------------------------------------------
// Total number of Lanczos iterations
*totalIter = 0;
// Allocate host memory
std::vector<value_type_t> Z_host_v(restartIter * restartIter);
std::vector<value_type_t> work_host_v(4 * restartIter);
Z_host = Z_host_v.data();
work_host = work_host_v.data();
// Initialize cuBLAS
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// -------------------------------------------------------
// Compute largest eigenvalue to determine shift
// -------------------------------------------------------
// Random number generator
curandGenerator_t randGen;
// Initialize random number generator
curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_PHILOX4_32_10);
curandSetPseudoRandomGeneratorSeed(randGen, seed);
// Initialize initial Lanczos vector
curandGenerateNormalX(randGen, lanczosVecs_dev, n + n % 2, zero, one);
value_type_t normQ1;
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasnrm2(cublas_h, n, lanczosVecs_dev, 1, &normQ1, stream));
auto h_val = 1 / normQ1;
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasscal(cublas_h, n, &h_val, lanczosVecs_dev, 1, stream));
// Obtain tridiagonal matrix with Lanczos
*effIter = 0;
*shift = 0;
status = performLanczosIteration<index_type_t, value_type_t>(handle,
A,
effIter,
maxIter_curr,
*shift,
0.0,
reorthogonalize,
alpha_host,
beta_host,
lanczosVecs_dev,
work_dev);
if (status) WARNING("error in Lanczos iteration");
// Determine largest eigenvalue
Lapack<value_type_t>::sterf(*effIter, alpha_host, beta_host);
*shift = -alpha_host[*effIter - 1];
// -------------------------------------------------------
// Compute eigenvectors of shifted matrix
// -------------------------------------------------------
// Obtain tridiagonal matrix with Lanczos
*effIter = 0;
status = performLanczosIteration<index_type_t, value_type_t>(handle,
A,
effIter,
maxIter_curr,
*shift,
0,
reorthogonalize,
alpha_host,
beta_host,
lanczosVecs_dev,
work_dev);
if (status) WARNING("error in Lanczos iteration");
*totalIter += *effIter;
// Apply Lanczos method until convergence
shiftLower = 1;
shiftUpper = -1;
while (*totalIter < maxIter && beta_host[*effIter - 1] > tol * shiftLower) {
// Determine number of restart steps
// Number of steps must be even due to Francis algorithm
index_type_t iter_new = nEigVecs + 1;
if (restartIter - (maxIter - *totalIter) > nEigVecs + 1)
iter_new = restartIter - (maxIter - *totalIter);
if ((restartIter - iter_new) % 2) iter_new -= 1;
if (iter_new == *effIter) break;
// Implicit restart of Lanczos method
status = lanczosRestart<index_type_t, value_type_t>(handle,
n,
*effIter,
iter_new,
&shiftUpper,
&shiftLower,
alpha_host,
beta_host,
Z_host,
work_host,
lanczosVecs_dev,
work_dev,
true);
if (status) WARNING("error in Lanczos implicit restart");
*effIter = iter_new;
// Check for convergence
if (beta_host[*effIter - 1] <= tol * fabs(shiftLower)) break;
// Proceed with Lanczos method
status = performLanczosIteration<index_type_t, value_type_t>(handle,
A,
effIter,
maxIter_curr,
*shift,
tol * fabs(shiftLower),
reorthogonalize,
alpha_host,
beta_host,
lanczosVecs_dev,
work_dev);
if (status) WARNING("error in Lanczos iteration");
*totalIter += *effIter - iter_new;
}
// Warning if Lanczos has failed to converge
if (beta_host[*effIter - 1] > tol * fabs(shiftLower)) {
WARNING("implicitly restarted Lanczos failed to converge");
}
// Solve tridiagonal system
memcpy(work_host + 2 * (*effIter), alpha_host, (*effIter) * sizeof(value_type_t));
memcpy(work_host + 3 * (*effIter), beta_host, (*effIter - 1) * sizeof(value_type_t));
Lapack<value_type_t>::steqr('I',
*effIter,
work_host + 2 * (*effIter),
work_host + 3 * (*effIter),
Z_host,
*effIter,
work_host);
// Obtain desired eigenvalues by applying shift
for (i = 0; i < *effIter; ++i)
work_host[i + 2 * (*effIter)] -= *shift;
for (i = *effIter; i < nEigVecs; ++i)
work_host[i + 2 * (*effIter)] = 0;
// Copy results to device memory
RAFT_CUDA_TRY(cudaMemcpyAsync(eigVals_dev,
work_host + 2 * (*effIter),
nEigVecs * sizeof(value_type_t),
cudaMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(work_dev,
Z_host,
(*effIter) * nEigVecs * sizeof(value_type_t),
cudaMemcpyHostToDevice,
stream));
RAFT_CHECK_CUDA(stream);
// Convert eigenvectors from Lanczos basis to standard basis
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_h,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
nEigVecs,
*effIter,
&one,
lanczosVecs_dev,
n,
work_dev,
*effIter,
&zero,
eigVecs_dev,
n,
stream));
// Clean up and exit
curandDestroyGenerator(randGen);
return 0;
}
template <typename index_type_t, typename value_type_t>
int computeSmallestEigenvectors(
raft::resources const& handle,
spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const& A,
index_type_t nEigVecs,
index_type_t maxIter,
index_type_t restartIter,
value_type_t tol,
bool reorthogonalize,
index_type_t& iter,
value_type_t* __restrict__ eigVals_dev,
value_type_t* __restrict__ eigVecs_dev,
unsigned long long seed = 1234567)
{
// Matrix dimension
index_type_t n = A.nrows_;
// Check that parameters are valid
RAFT_EXPECTS(nEigVecs > 0 && nEigVecs <= n, "Invalid number of eigenvectors.");
RAFT_EXPECTS(restartIter > 0, "Invalid restartIter.");
RAFT_EXPECTS(tol > 0, "Invalid tolerance.");
RAFT_EXPECTS(maxIter >= nEigVecs, "Invalid maxIter.");
RAFT_EXPECTS(restartIter >= nEigVecs, "Invalid restartIter.");
// Allocate memory
std::vector<value_type_t> alpha_host_v(restartIter);
std::vector<value_type_t> beta_host_v(restartIter);
value_type_t* alpha_host = alpha_host_v.data();
value_type_t* beta_host = beta_host_v.data();
spectral::matrix::vector_t<value_type_t> lanczosVecs_dev(handle, n * (restartIter + 1));
spectral::matrix::vector_t<value_type_t> work_dev(handle, (n + restartIter) * restartIter);
// Perform Lanczos method
index_type_t effIter;
value_type_t shift;
int status = computeSmallestEigenvectors(handle,
&A,
nEigVecs,
maxIter,
restartIter,
tol,
reorthogonalize,
&effIter,
&iter,
&shift,
alpha_host,
beta_host,
lanczosVecs_dev.raw(),
work_dev.raw(),
eigVals_dev,
eigVecs_dev,
seed);
// Clean up and return
return status;
}
/**
* @brief Compute largest eigenvectors of symmetric matrix
* Computes eigenvalues and eigenvectors that are least
* positive. If matrix is positive definite or positive
* semidefinite, the computed eigenvalues are largest in
* magnitude.
* The largest eigenvalue is estimated by performing several
* Lanczos iterations. An implicitly restarted Lanczos method is
* then applied.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param A Matrix.
* @param nEigVecs Number of eigenvectors to compute.
* @param maxIter Maximum number of Lanczos steps.
* @param restartIter Maximum size of Lanczos system before
* performing an implicit restart. Should be at least 4.
* @param tol Convergence tolerance. Lanczos iteration will
* terminate when the residual norm is less than tol*theta, where
* theta is an estimate for the largest unwanted eigenvalue
* (i.e. the (nEigVecs+1)th largest eigenvalue).
* @param reorthogonalize Whether to reorthogonalize Lanczos
* vectors.
* @param effIter On exit, pointer to final size of Lanczos system.
* @param totalIter On exit, pointer to total number of Lanczos
* iterations performed.
* @param alpha_host (Output, host memory, restartIter entries)
* Diagonal entries of Lanczos system.
* @param beta_host (Output, host memory, restartIter entries)
* Off-diagonal entries of Lanczos system.
* @param lanczosVecs_dev (Output, device memory, n*(restartIter+1)
* entries) Lanczos vectors. Vectors are stored as columns of a
* column-major matrix with dimensions n x (restartIter+1).
* @param work_dev (Output, device memory,
* (n+restartIter)*restartIter entries) Workspace.
* @param eigVals_dev (Output, device memory, nEigVecs entries)
* Largest eigenvalues of matrix.
* @param eigVecs_dev (Output, device memory, n*nEigVecs entries)
* Eigenvectors corresponding to largest eigenvalues of
* matrix. Vectors are stored as columns of a column-major matrix
* with dimensions n x nEigVecs.
* @param seed random seed.
* @return error flag.
*/
template <typename index_type_t, typename value_type_t>
int computeLargestEigenvectors(
raft::resources const& handle,
spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const* A,
index_type_t nEigVecs,
index_type_t maxIter,
index_type_t restartIter,
value_type_t tol,
bool reorthogonalize,
index_type_t* effIter,
index_type_t* totalIter,
value_type_t* __restrict__ alpha_host,
value_type_t* __restrict__ beta_host,
value_type_t* __restrict__ lanczosVecs_dev,
value_type_t* __restrict__ work_dev,
value_type_t* __restrict__ eigVals_dev,
value_type_t* __restrict__ eigVecs_dev,
unsigned long long seed)
{
// Useful constants
constexpr value_type_t one = 1;
constexpr value_type_t zero = 0;
// Matrix dimension
index_type_t n = A->nrows_;
// Lanczos iteration counters
index_type_t maxIter_curr = restartIter; // Maximum size of Lanczos system
// Status flags
int status;
// Loop index
index_type_t i;
// Host memory
value_type_t* Z_host; // Eigenvectors in Lanczos basis
value_type_t* work_host; // Workspace
// -------------------------------------------------------
// Check that LAPACK is enabled
// -------------------------------------------------------
// Lapack<value_type_t>::check_lapack_enabled();
// -------------------------------------------------------
// Check that parameters are valid
// -------------------------------------------------------
RAFT_EXPECTS(nEigVecs > 0 && nEigVecs <= n, "Invalid number of eigenvectors.");
RAFT_EXPECTS(restartIter > 0, "Invalid restartIter.");
RAFT_EXPECTS(tol > 0, "Invalid tolerance.");
RAFT_EXPECTS(maxIter >= nEigVecs, "Invalid maxIter.");
RAFT_EXPECTS(restartIter >= nEigVecs, "Invalid restartIter.");
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// -------------------------------------------------------
// Variable initialization
// -------------------------------------------------------
// Total number of Lanczos iterations
*totalIter = 0;
// Allocate host memory
std::vector<value_type_t> Z_host_v(restartIter * restartIter);
std::vector<value_type_t> work_host_v(4 * restartIter);
Z_host = Z_host_v.data();
work_host = work_host_v.data();
// Initialize cuBLAS
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// -------------------------------------------------------
// Compute largest eigenvalue
// -------------------------------------------------------
// Random number generator
curandGenerator_t randGen;
// Initialize random number generator
curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_PHILOX4_32_10);
curandSetPseudoRandomGeneratorSeed(randGen, seed);
// Initialize initial Lanczos vector
curandGenerateNormalX(randGen, lanczosVecs_dev, n + n % 2, zero, one);
value_type_t normQ1;
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasnrm2(cublas_h, n, lanczosVecs_dev, 1, &normQ1, stream));
auto h_val = 1 / normQ1;
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasscal(cublas_h, n, &h_val, lanczosVecs_dev, 1, stream));
// Obtain tridiagonal matrix with Lanczos
*effIter = 0;
value_type_t shift_val = 0.0;
value_type_t* shift = &shift_val;
status = performLanczosIteration<index_type_t, value_type_t>(handle,
A,
effIter,
maxIter_curr,
*shift,
0,
reorthogonalize,
alpha_host,
beta_host,
lanczosVecs_dev,
work_dev);
if (status) WARNING("error in Lanczos iteration");
*totalIter += *effIter;
// Apply Lanczos method until convergence
value_type_t shiftLower = 1;
value_type_t shiftUpper = -1;
while (*totalIter < maxIter && beta_host[*effIter - 1] > tol * shiftLower) {
// Determine number of restart steps
// Number of steps must be even due to Francis algorithm
index_type_t iter_new = nEigVecs + 1;
if (restartIter - (maxIter - *totalIter) > nEigVecs + 1)
iter_new = restartIter - (maxIter - *totalIter);
if ((restartIter - iter_new) % 2) iter_new -= 1;
if (iter_new == *effIter) break;
// Implicit restart of Lanczos method
status = lanczosRestart<index_type_t, value_type_t>(handle,
n,
*effIter,
iter_new,
&shiftUpper,
&shiftLower,
alpha_host,
beta_host,
Z_host,
work_host,
lanczosVecs_dev,
work_dev,
false);
if (status) WARNING("error in Lanczos implicit restart");
*effIter = iter_new;
// Check for convergence
if (beta_host[*effIter - 1] <= tol * fabs(shiftLower)) break;
// Proceed with Lanczos method
status = performLanczosIteration<index_type_t, value_type_t>(handle,
A,
effIter,
maxIter_curr,
*shift,
tol * fabs(shiftLower),
reorthogonalize,
alpha_host,
beta_host,
lanczosVecs_dev,
work_dev);
if (status) WARNING("error in Lanczos iteration");
*totalIter += *effIter - iter_new;
}
// Warning if Lanczos has failed to converge
if (beta_host[*effIter - 1] > tol * fabs(shiftLower)) {
WARNING("implicitly restarted Lanczos failed to converge");
}
for (int i = 0; i < restartIter; ++i) {
for (int j = 0; j < restartIter; ++j)
Z_host[i * restartIter + j] = 0;
}
// Solve tridiagonal system
memcpy(work_host + 2 * (*effIter), alpha_host, (*effIter) * sizeof(value_type_t));
memcpy(work_host + 3 * (*effIter), beta_host, (*effIter - 1) * sizeof(value_type_t));
Lapack<value_type_t>::steqr('I',
*effIter,
work_host + 2 * (*effIter),
work_host + 3 * (*effIter),
Z_host,
*effIter,
work_host);
// note: We need to pick the top nEigVecs eigenvalues
// but effItter can be larger than nEigVecs
// hence we add an offset for that case, because we want to access top nEigVecs eigenpairs in the
// matrix of size effIter. remember the array is sorted, so it is not needed for smallest
// eigenvalues case because the first ones are the smallest ones
index_type_t top_eigenparis_idx_offset = *effIter - nEigVecs;
// Debug : print nEigVecs largest eigenvalues
// for (int i = top_eigenparis_idx_offset; i < *effIter; ++i)
// std::cout <<*(work_host+(2*(*effIter)+i))<< " ";
// std::cout <<std::endl;
// Debug : print nEigVecs largest eigenvectors
// for (int i = top_eigenparis_idx_offset; i < *effIter; ++i)
//{
// for (int j = 0; j < *effIter; ++j)
// std::cout <<Z_host[i*(*effIter)+j]<< " ";
// std::cout <<std::endl;
//}
// Obtain desired eigenvalues by applying shift
for (i = 0; i < *effIter; ++i)
work_host[i + 2 * (*effIter)] -= *shift;
for (i = 0; i < top_eigenparis_idx_offset; ++i)
work_host[i + 2 * (*effIter)] = 0;
// Copy results to device memory
// skip smallest eigenvalue if needed
RAFT_CUDA_TRY(cudaMemcpyAsync(eigVals_dev,
work_host + 2 * (*effIter) + top_eigenparis_idx_offset,
nEigVecs * sizeof(value_type_t),
cudaMemcpyHostToDevice,
stream));
// skip smallest eigenvector if needed
RAFT_CUDA_TRY(cudaMemcpyAsync(work_dev,
Z_host + (top_eigenparis_idx_offset * (*effIter)),
(*effIter) * nEigVecs * sizeof(value_type_t),
cudaMemcpyHostToDevice,
stream));
RAFT_CHECK_CUDA(stream);
// Convert eigenvectors from Lanczos basis to standard basis
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_h,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
nEigVecs,
*effIter,
&one,
lanczosVecs_dev,
n,
work_dev,
*effIter,
&zero,
eigVecs_dev,
n,
stream));
// Clean up and exit
curandDestroyGenerator(randGen);
return 0;
}
template <typename index_type_t, typename value_type_t>
int computeLargestEigenvectors(
raft::resources const& handle,
spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const& A,
index_type_t nEigVecs,
index_type_t maxIter,
index_type_t restartIter,
value_type_t tol,
bool reorthogonalize,
index_type_t& iter,
value_type_t* __restrict__ eigVals_dev,
value_type_t* __restrict__ eigVecs_dev,
unsigned long long seed = 123456)
{
// Matrix dimension
index_type_t n = A.nrows_;
// Check that parameters are valid
RAFT_EXPECTS(nEigVecs > 0 && nEigVecs <= n, "Invalid number of eigenvectors.");
RAFT_EXPECTS(restartIter > 0, "Invalid restartIter.");
RAFT_EXPECTS(tol > 0, "Invalid tolerance.");
RAFT_EXPECTS(maxIter >= nEigVecs, "Invalid maxIter.");
RAFT_EXPECTS(restartIter >= nEigVecs, "Invalid restartIter.");
// Allocate memory
std::vector<value_type_t> alpha_host_v(restartIter);
std::vector<value_type_t> beta_host_v(restartIter);
value_type_t* alpha_host = alpha_host_v.data();
value_type_t* beta_host = beta_host_v.data();
spectral::matrix::vector_t<value_type_t> lanczosVecs_dev(handle, n * (restartIter + 1));
spectral::matrix::vector_t<value_type_t> work_dev(handle, (n + restartIter) * restartIter);
// Perform Lanczos method
index_type_t effIter;
int status = computeLargestEigenvectors(handle,
&A,
nEigVecs,
maxIter,
restartIter,
tol,
reorthogonalize,
&effIter,
&iter,
alpha_host,
beta_host,
lanczosVecs_dev.raw(),
work_dev.raw(),
eigVals_dev,
eigVecs_dev,
seed);
// Clean up and return
return status;
}
} // namespace raft::sparse::solver::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/solver | rapidsai_public_repos/raft/cpp/include/raft/sparse/solver/detail/mst_kernels.cuh |
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/sparse/solver/detail/mst_utils.cuh>
#include <limits>
#include <raft/util/device_atomics.cuh>
namespace raft::sparse::solver::detail {
template <typename vertex_t, typename edge_t, typename alteration_t>
RAFT_KERNEL kernel_min_edge_per_vertex(const edge_t* offsets,
const vertex_t* indices,
const alteration_t* weights,
const vertex_t* color,
const vertex_t* color_index,
edge_t* new_mst_edge,
const bool* mst_edge,
alteration_t* min_edge_color,
const vertex_t v)
{
edge_t tid = threadIdx.x + blockIdx.x * blockDim.x;
unsigned warp_id = tid / 32;
unsigned lane_id = tid % 32;
__shared__ edge_t min_edge_index[32];
__shared__ alteration_t min_edge_weight[32];
__shared__ vertex_t min_color[32];
min_edge_index[lane_id] = std::numeric_limits<edge_t>::max();
min_edge_weight[lane_id] = std::numeric_limits<alteration_t>::max();
min_color[lane_id] = std::numeric_limits<vertex_t>::max();
__syncthreads();
vertex_t self_color_idx = color_index[warp_id];
vertex_t self_color = color[self_color_idx];
// find the minimum edge associated per row
// each thread in warp holds the minimum edge for
// only the edges that thread scanned
if (warp_id < v) {
// one row is associated with one warp
edge_t row_start = offsets[warp_id];
edge_t row_end = offsets[warp_id + 1];
// assuming one warp per row
// find min for each thread in warp
for (edge_t e = row_start + lane_id; e < row_end; e += 32) {
alteration_t curr_edge_weight = weights[e];
vertex_t successor_color_idx = color_index[indices[e]];
vertex_t successor_color = color[successor_color_idx];
if (!mst_edge[e] && self_color != successor_color) {
if (curr_edge_weight < min_edge_weight[lane_id]) {
min_color[lane_id] = successor_color;
min_edge_weight[lane_id] = curr_edge_weight;
min_edge_index[lane_id] = e;
}
}
}
}
__syncthreads();
// reduce across threads in warp
// each thread in warp holds min edge scanned by itself
// reduce across all those warps
for (int offset = 16; offset > 0; offset >>= 1) {
if (lane_id < offset) {
if (min_edge_weight[lane_id] > min_edge_weight[lane_id + offset]) {
min_color[lane_id] = min_color[lane_id + offset];
min_edge_weight[lane_id] = min_edge_weight[lane_id + offset];
min_edge_index[lane_id] = min_edge_index[lane_id + offset];
}
}
__syncthreads();
}
// min edge may now be found in first thread
if (lane_id == 0) {
if (min_edge_weight[0] != std::numeric_limits<alteration_t>::max()) {
new_mst_edge[warp_id] = min_edge_index[0];
// atomically set min edge per color
// takes care of super vertex case
atomicMin(&min_edge_color[self_color], min_edge_weight[0]);
}
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
RAFT_KERNEL min_edge_per_supervertex(const vertex_t* color,
const vertex_t* color_index,
edge_t* new_mst_edge,
bool* mst_edge,
const vertex_t* indices,
const weight_t* weights,
const alteration_t* altered_weights,
vertex_t* temp_src,
vertex_t* temp_dst,
weight_t* temp_weights,
const alteration_t* min_edge_color,
const vertex_t v,
bool symmetrize_output)
{
auto tid = get_1D_idx<vertex_t>();
if (tid < v) {
vertex_t vertex_color_idx = color_index[tid];
vertex_t vertex_color = color[vertex_color_idx];
edge_t edge_idx = new_mst_edge[tid];
// check if valid outgoing edge was found
// find minimum edge is same as minimum edge of whole supervertex
// if yes, that is part of mst
if (edge_idx != std::numeric_limits<edge_t>::max()) {
alteration_t vertex_weight = altered_weights[edge_idx];
bool add_edge = false;
if (min_edge_color[vertex_color] == vertex_weight) {
add_edge = true;
auto dst = indices[edge_idx];
if (!symmetrize_output) {
auto dst_edge_idx = new_mst_edge[dst];
auto dst_color = color[color_index[dst]];
// vertices added each other
// only if destination has found an edge
// the edge points back to source
// the edge is minimum edge found for dst color
if (dst_edge_idx != std::numeric_limits<edge_t>::max() && indices[dst_edge_idx] == tid &&
min_edge_color[dst_color] == altered_weights[dst_edge_idx]) {
if (vertex_color > dst_color) { add_edge = false; }
}
}
if (add_edge) {
temp_src[tid] = tid;
temp_dst[tid] = dst;
temp_weights[tid] = weights[edge_idx];
mst_edge[edge_idx] = true;
}
}
if (!add_edge) { new_mst_edge[tid] = std::numeric_limits<edge_t>::max(); }
}
}
}
template <typename vertex_t, typename edge_t, typename weight_t>
RAFT_KERNEL add_reverse_edge(const edge_t* new_mst_edge,
const vertex_t* indices,
const weight_t* weights,
vertex_t* temp_src,
vertex_t* temp_dst,
weight_t* temp_weights,
const vertex_t v,
bool symmetrize_output)
{
auto tid = get_1D_idx<vertex_t>();
if (tid < v) {
bool reverse_needed = false;
edge_t edge_idx = new_mst_edge[tid];
if (edge_idx != std::numeric_limits<edge_t>::max()) {
vertex_t neighbor_vertex = indices[edge_idx];
edge_t neighbor_edge_idx = new_mst_edge[neighbor_vertex];
// if neighbor picked no vertex then reverse edge is
// definitely needed
if (neighbor_edge_idx == std::numeric_limits<edge_t>::max()) {
reverse_needed = true;
} else {
// check what vertex the neighbor vertex picked
if (symmetrize_output) {
vertex_t neighbor_vertex_neighbor = indices[neighbor_edge_idx];
// if vertices did not pick each other
// add a reverse edge
if (tid != neighbor_vertex_neighbor) { reverse_needed = true; }
}
}
// if reverse was needed, add the edge
if (reverse_needed) {
// it is assumed the each vertex only picks one valid min edge
// per cycle
// hence, we store at index tid + v for the reverse edge scenario
temp_src[tid + v] = neighbor_vertex;
temp_dst[tid + v] = tid;
temp_weights[tid + v] = weights[edge_idx];
}
}
}
}
// executes for newly added mst edges and updates the colors of both vertices to the lower color
template <typename vertex_t, typename edge_t>
RAFT_KERNEL min_pair_colors(const vertex_t v,
const vertex_t* indices,
const edge_t* new_mst_edge,
const vertex_t* color,
const vertex_t* color_index,
vertex_t* next_color)
{
auto i = get_1D_idx<vertex_t>();
if (i < v) {
edge_t edge_idx = new_mst_edge[i];
if (edge_idx != std::numeric_limits<edge_t>::max()) {
vertex_t neighbor_vertex = indices[edge_idx];
// vertex_t self_color = color[i];
vertex_t self_color_idx = color_index[i];
vertex_t self_color = color[self_color_idx];
vertex_t neighbor_color_idx = color_index[neighbor_vertex];
vertex_t neighbor_super_color = color[neighbor_color_idx];
// update my own color as source of edge
// update neighbour color index directly
// this will ensure v1 updates supervertex color
// while v2 will update the color of its supervertex
// thus, allowing the colors to progress towards 0
atomicMin(&next_color[self_color_idx], neighbor_super_color);
atomicMin(&next_color[neighbor_color_idx], self_color);
}
}
}
// for each vertex, update color if it was changed in min_pair_colors kernel
template <typename vertex_t>
RAFT_KERNEL update_colors(const vertex_t v,
vertex_t* color,
const vertex_t* color_index,
const vertex_t* next_color,
bool* done)
{
auto i = get_1D_idx<vertex_t>();
if (i < v) {
vertex_t self_color = color[i];
vertex_t self_color_idx = color_index[i];
vertex_t new_color = next_color[self_color_idx];
// update self color to new smaller color
if (self_color > new_color) {
color[i] = new_color;
*done = false;
}
}
}
// point vertices to their final color index
template <typename vertex_t>
RAFT_KERNEL final_color_indices(const vertex_t v, const vertex_t* color, vertex_t* color_index)
{
auto i = get_1D_idx<vertex_t>();
if (i < v) {
vertex_t self_color_idx = color_index[i];
vertex_t self_color = color[self_color_idx];
// if self color is not equal to self color index,
// it means self is not supervertex
// in which case, iterate until we can find
// parent supervertex
while (self_color_idx != self_color) {
self_color_idx = color_index[self_color];
self_color = color[self_color_idx];
}
// point to new supervertex
color_index[i] = self_color_idx;
}
}
// Alterate the weights, make all undirected edge weight unique while keeping Wuv == Wvu
// Consider using curand device API instead of precomputed random_values array
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
RAFT_KERNEL alteration_kernel(const vertex_t v,
const edge_t e,
const edge_t* offsets,
const vertex_t* indices,
const weight_t* weights,
alteration_t max,
alteration_t* random_values,
alteration_t* altered_weights)
{
auto row = get_1D_idx<vertex_t>();
if (row < v) {
auto row_begin = offsets[row];
auto row_end = offsets[row + 1];
for (auto i = row_begin; i < row_end; i++) {
auto column = indices[i];
altered_weights[i] = weights[i] + max * (random_values[row] + random_values[column]);
}
}
}
template <typename vertex_t, typename edge_t>
RAFT_KERNEL kernel_count_new_mst_edges(const vertex_t* mst_src,
edge_t* mst_edge_count,
const vertex_t v)
{
auto tid = get_1D_idx<vertex_t>();
// count number of new mst edges added
bool predicate = tid < v && (mst_src[tid] != std::numeric_limits<vertex_t>::max());
vertex_t block_count = __syncthreads_count(predicate);
if (threadIdx.x == 0 && block_count > 0) { atomicAdd(mst_edge_count, block_count); }
}
} // namespace raft::sparse::solver::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/solver | rapidsai_public_repos/raft/cpp/include/raft/sparse/solver/detail/mst_solver_inl.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <curand.h>
#include <raft/core/resource/device_properties.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/sparse/solver/detail/mst_kernels.cuh>
#include <raft/sparse/solver/detail/mst_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
#include <thrust/tuple.h>
#include <thrust/unique.h>
#include <iostream>
namespace raft::sparse::solver {
// curand generator uniform
inline curandStatus_t curand_generate_uniformX(curandGenerator_t generator,
float* outputPtr,
size_t n)
{
return curandGenerateUniform(generator, outputPtr, n);
}
inline curandStatus_t curand_generate_uniformX(curandGenerator_t generator,
double* outputPtr,
size_t n)
{
return curandGenerateUniformDouble(generator, outputPtr, n);
}
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
MST_solver<vertex_t, edge_t, weight_t, alteration_t>::MST_solver(raft::resources const& handle_,
const edge_t* offsets_,
const vertex_t* indices_,
const weight_t* weights_,
const vertex_t v_,
const edge_t e_,
vertex_t* color_,
cudaStream_t stream_,
bool symmetrize_output_,
bool initialize_colors_,
int iterations_)
: handle(handle_),
offsets(offsets_),
indices(indices_),
weights(weights_),
altered_weights(e_, stream_),
v(v_),
e(e_),
color_index(color_),
color(v_, stream_),
next_color(v_, stream_),
min_edge_color(v_, stream_),
new_mst_edge(v_, stream_),
mst_edge(e_, stream_),
temp_src(2 * v_, stream_),
temp_dst(2 * v_, stream_),
temp_weights(2 * v_, stream_),
mst_edge_count(1, stream_),
prev_mst_edge_count(1, stream_),
stream(stream_),
symmetrize_output(symmetrize_output_),
initialize_colors(initialize_colors_),
iterations(iterations_)
{
max_blocks = resource::get_device_properties(handle_).maxGridSize[0];
max_threads = resource::get_device_properties(handle_).maxThreadsPerBlock;
sm_count = resource::get_device_properties(handle_).multiProcessorCount;
mst_edge_count.set_value_to_zero_async(stream);
prev_mst_edge_count.set_value_to_zero_async(stream);
RAFT_CUDA_TRY(cudaMemsetAsync(mst_edge.data(), 0, mst_edge.size() * sizeof(bool), stream));
// Initially, color holds the vertex id as color
auto policy = resource::get_thrust_policy(handle);
if (initialize_colors_) {
thrust::sequence(policy, color.begin(), color.end(), 0);
thrust::sequence(policy, color_index, color_index + v, 0);
} else {
raft::copy(color.data(), color_index, v, stream);
}
thrust::sequence(policy, next_color.begin(), next_color.end(), 0);
}
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
Graph_COO<vertex_t, edge_t, weight_t> MST_solver<vertex_t, edge_t, weight_t, alteration_t>::solve()
{
RAFT_EXPECTS(v > 0, "0 vertices");
RAFT_EXPECTS(e > 0, "0 edges");
RAFT_EXPECTS(offsets != nullptr, "Null offsets.");
RAFT_EXPECTS(indices != nullptr, "Null indices.");
RAFT_EXPECTS(weights != nullptr, "Null weights.");
// Alterating the weights
// this is done by identifying the lowest cost edge weight gap that is not 0, call this theta.
// For each edge, add noise that is less than theta. That is, generate a random number in the
// range [0.0, theta) and add it to each edge weight.
alteration();
auto max_mst_edges = symmetrize_output ? 2 * v - 2 : v - 1;
Graph_COO<vertex_t, edge_t, weight_t> mst_result(max_mst_edges, stream);
// Boruvka original formulation says "while more than 1 supervertex remains"
// Here we adjust it to support disconnected components (spanning forest)
// track completion with mst_edge_found status and v as upper bound
auto mst_iterations = iterations > 0 ? iterations : v;
for (auto i = 0; i < mst_iterations; i++) {
// Finds the minimum edge from each vertex to the lowest color
// by working at each vertex of the supervertex
min_edge_per_vertex();
// Finds the minimum edge from each supervertex to the lowest color
min_edge_per_supervertex();
// check if msf/mst done, count new edges added
check_termination();
auto curr_mst_edge_count = mst_edge_count.value(stream);
RAFT_EXPECTS(curr_mst_edge_count <= max_mst_edges,
"Number of edges found by MST is invalid. This may be due to "
"loss in precision. Try increasing precision of weights.");
if (curr_mst_edge_count == prev_mst_edge_count.value(stream)) {
// exit here when reaching steady state
break;
}
// append the newly found MST edges to the final output
append_src_dst_pair(mst_result.src.data(), mst_result.dst.data(), mst_result.weights.data());
// updates colors of vertices by propagating the lower color to the higher
label_prop(mst_result.src.data(), mst_result.dst.data());
// copy this iteration's results and store
prev_mst_edge_count.set_value_async(curr_mst_edge_count, stream);
}
// result packaging
mst_result.n_edges = mst_edge_count.value(stream);
mst_result.src.resize(mst_result.n_edges, stream);
mst_result.dst.resize(mst_result.n_edges, stream);
mst_result.weights.resize(mst_result.n_edges, stream);
return mst_result;
}
// ||y|-|x||
template <typename weight_t>
struct alteration_functor {
__host__ __device__ weight_t operator()(const thrust::tuple<weight_t, weight_t>& t)
{
auto x = thrust::get<0>(t);
auto y = thrust::get<1>(t);
x = x < 0 ? -x : x;
y = y < 0 ? -y : y;
return x < y ? y - x : x - y;
}
};
// Compute the uper bound for the alteration
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
alteration_t MST_solver<vertex_t, edge_t, weight_t, alteration_t>::alteration_max()
{
auto policy = resource::get_thrust_policy(handle);
rmm::device_uvector<weight_t> tmp(e, stream);
thrust::device_ptr<const weight_t> weights_ptr(weights);
thrust::copy(policy, weights_ptr, weights_ptr + e, tmp.begin());
// sort tmp weights
thrust::sort(policy, tmp.begin(), tmp.end());
// remove duplicates
auto new_end = thrust::unique(policy, tmp.begin(), tmp.end());
// min(a[i+1]-a[i])/2
auto begin = thrust::make_zip_iterator(thrust::make_tuple(tmp.begin(), tmp.begin() + 1));
auto end = thrust::make_zip_iterator(thrust::make_tuple(new_end - 1, new_end));
auto init = tmp.element(1, stream) - tmp.element(0, stream);
auto max = thrust::transform_reduce(
policy, begin, end, alteration_functor<weight_t>(), init, thrust::minimum<weight_t>());
return max / static_cast<alteration_t>(2);
}
// Compute the alteration to make all undirected edge weight unique
// Preserves weights order
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
void MST_solver<vertex_t, edge_t, weight_t, alteration_t>::alteration()
{
auto nthreads = std::min(v, max_threads);
auto nblocks = std::min((v + nthreads - 1) / nthreads, max_blocks);
// maximum alteration that does not change relative weights order
alteration_t max = alteration_max();
// pool of rand values
rmm::device_uvector<alteration_t> rand_values(v, stream);
// Random number generator
curandGenerator_t randGen;
curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(randGen, 1234567);
// Initialize rand values
auto curand_status = curand_generate_uniformX(randGen, rand_values.data(), v);
RAFT_EXPECTS(curand_status == CURAND_STATUS_SUCCESS, "MST: CURAND failed");
curand_status = curandDestroyGenerator(randGen);
RAFT_EXPECTS(curand_status == CURAND_STATUS_SUCCESS, "MST: CURAND cleanup failed");
// Alterate the weights, make all undirected edge weight unique while keeping Wuv == Wvu
detail::alteration_kernel<<<nblocks, nthreads, 0, stream>>>(
v, e, offsets, indices, weights, max, rand_values.data(), altered_weights.data());
}
// updates colors of vertices by propagating the lower color to the higher
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
void MST_solver<vertex_t, edge_t, weight_t, alteration_t>::label_prop(vertex_t* mst_src,
vertex_t* mst_dst)
{
// update the colors of both ends its until there is no change in colors
edge_t curr_mst_edge_count = mst_edge_count.value(stream);
auto min_pair_nthreads = std::min(v, (vertex_t)max_threads);
auto min_pair_nblocks =
std::min((v + min_pair_nthreads - 1) / min_pair_nthreads, (vertex_t)max_blocks);
edge_t* new_mst_edge_ptr = new_mst_edge.data();
vertex_t* color_ptr = color.data();
vertex_t* next_color_ptr = next_color.data();
rmm::device_scalar<bool> done(stream);
done.set_value_to_zero_async(stream);
bool* done_ptr = done.data();
const bool true_val = true;
auto i = 0;
while (!done.value(stream)) {
done.set_value_async(true_val, stream);
detail::min_pair_colors<<<min_pair_nblocks, min_pair_nthreads, 0, stream>>>(
v, indices, new_mst_edge_ptr, color_ptr, color_index, next_color_ptr);
detail::update_colors<<<min_pair_nblocks, min_pair_nthreads, 0, stream>>>(
v, color_ptr, color_index, next_color_ptr, done_ptr);
i++;
}
detail::final_color_indices<<<min_pair_nblocks, min_pair_nthreads, 0, stream>>>(
v, color_ptr, color_index);
}
// Finds the minimum edge from each vertex to the lowest color
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
void MST_solver<vertex_t, edge_t, weight_t, alteration_t>::min_edge_per_vertex()
{
auto policy = resource::get_thrust_policy(handle);
thrust::fill(
policy, min_edge_color.begin(), min_edge_color.end(), std::numeric_limits<alteration_t>::max());
thrust::fill(
policy, new_mst_edge.begin(), new_mst_edge.end(), std::numeric_limits<weight_t>::max());
int n_threads = 32;
vertex_t* color_ptr = color.data();
edge_t* new_mst_edge_ptr = new_mst_edge.data();
bool* mst_edge_ptr = mst_edge.data();
alteration_t* min_edge_color_ptr = min_edge_color.data();
alteration_t* altered_weights_ptr = altered_weights.data();
detail::kernel_min_edge_per_vertex<<<v, n_threads, 0, stream>>>(offsets,
indices,
altered_weights_ptr,
color_ptr,
color_index,
new_mst_edge_ptr,
mst_edge_ptr,
min_edge_color_ptr,
v);
}
// Finds the minimum edge from each supervertex to the lowest color
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
void MST_solver<vertex_t, edge_t, weight_t, alteration_t>::min_edge_per_supervertex()
{
auto nthreads = std::min(v, max_threads);
auto nblocks = std::min((v + nthreads - 1) / nthreads, max_blocks);
auto policy = resource::get_thrust_policy(handle);
thrust::fill(policy, temp_src.begin(), temp_src.end(), std::numeric_limits<vertex_t>::max());
vertex_t* color_ptr = color.data();
edge_t* new_mst_edge_ptr = new_mst_edge.data();
bool* mst_edge_ptr = mst_edge.data();
alteration_t* min_edge_color_ptr = min_edge_color.data();
alteration_t* altered_weights_ptr = altered_weights.data();
vertex_t* temp_src_ptr = temp_src.data();
vertex_t* temp_dst_ptr = temp_dst.data();
weight_t* temp_weights_ptr = temp_weights.data();
detail::min_edge_per_supervertex<<<nblocks, nthreads, 0, stream>>>(color_ptr,
color_index,
new_mst_edge_ptr,
mst_edge_ptr,
indices,
weights,
altered_weights_ptr,
temp_src_ptr,
temp_dst_ptr,
temp_weights_ptr,
min_edge_color_ptr,
v,
symmetrize_output);
// the above kernel only adds directed mst edges in the case where
// a pair of vertices don't pick the same min edge between them
// so, now we add the reverse edge to make it undirected
if (symmetrize_output) {
detail::add_reverse_edge<<<nblocks, nthreads, 0, stream>>>(new_mst_edge_ptr,
indices,
weights,
temp_src_ptr,
temp_dst_ptr,
temp_weights_ptr,
v,
symmetrize_output);
}
}
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
void MST_solver<vertex_t, edge_t, weight_t, alteration_t>::check_termination()
{
vertex_t nthreads = std::min(2 * v, (vertex_t)max_threads);
vertex_t nblocks = std::min((2 * v + nthreads - 1) / nthreads, (vertex_t)max_blocks);
// count number of new mst edges
edge_t* mst_edge_count_ptr = mst_edge_count.data();
vertex_t* temp_src_ptr = temp_src.data();
detail::kernel_count_new_mst_edges<<<nblocks, nthreads, 0, stream>>>(
temp_src_ptr, mst_edge_count_ptr, 2 * v);
}
template <typename vertex_t, typename weight_t>
struct new_edges_functor {
__host__ __device__ bool operator()(const thrust::tuple<vertex_t, vertex_t, weight_t>& t)
{
auto src = thrust::get<0>(t);
return src != std::numeric_limits<vertex_t>::max() ? true : false;
}
};
template <typename vertex_t, typename edge_t, typename weight_t, typename alteration_t>
void MST_solver<vertex_t, edge_t, weight_t, alteration_t>::append_src_dst_pair(
vertex_t* mst_src, vertex_t* mst_dst, weight_t* mst_weights)
{
auto policy = resource::get_thrust_policy(handle);
edge_t curr_mst_edge_count = prev_mst_edge_count.value(stream);
// iterator to end of mst edges added to final output in previous iteration
auto src_dst_zip_end =
thrust::make_zip_iterator(thrust::make_tuple(mst_src + curr_mst_edge_count,
mst_dst + curr_mst_edge_count,
mst_weights + curr_mst_edge_count));
// iterator to new mst edges found
auto temp_src_dst_zip_begin = thrust::make_zip_iterator(
thrust::make_tuple(temp_src.begin(), temp_dst.begin(), temp_weights.begin()));
auto temp_src_dst_zip_end = thrust::make_zip_iterator(
thrust::make_tuple(temp_src.end(), temp_dst.end(), temp_weights.end()));
// copy new mst edges to final output
thrust::copy_if(policy,
temp_src_dst_zip_begin,
temp_src_dst_zip_end,
src_dst_zip_end,
new_edges_functor<vertex_t, weight_t>());
}
} // namespace raft::sparse::solver
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/solver | rapidsai_public_repos/raft/cpp/include/raft/sparse/solver/detail/mst_utils.cuh |
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <iostream>
#include <rmm/device_uvector.hpp>
namespace raft::sparse::solver::detail {
template <typename idx_t>
__device__ idx_t get_1D_idx()
{
return blockIdx.x * blockDim.x + threadIdx.x;
}
} // namespace raft::sparse::solver::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/hierarchy/common.h | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use raft/cluster/single_linkage_types.hpp instead.")
#include <raft/cluster/single_linkage_types.hpp>
namespace raft::hierarchy {
using raft::cluster::linkage_output;
using raft::cluster::linkage_output_int;
using raft::cluster::linkage_output_int64;
using raft::cluster::LinkageDistance;
} // namespace raft::hierarchy | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/hierarchy/single_linkage.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft/cluster version instead.")
#include <raft/cluster/single_linkage.cuh>
#include <raft/sparse/hierarchy/common.h>
namespace raft::hierarchy {
using raft::cluster::single_linkage;
} | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/detail/csr.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <raft/sparse/detail/utils.h>
namespace raft {
namespace sparse {
namespace detail {
//@TODO: Pull this out into a separate file
struct WeakCCState {
public:
bool* m;
WeakCCState(bool* m) : m(m) {}
};
template <typename Index_, int TPB_X = 256, typename Lambda>
RAFT_KERNEL weak_cc_label_device(Index_* __restrict__ labels,
const Index_* __restrict__ row_ind,
const Index_* __restrict__ row_ind_ptr,
Index_ nnz,
bool* __restrict__ m,
Index_ start_vertex_id,
Index_ batch_size,
Index_ N,
Lambda filter_op)
{
Index_ tid = threadIdx.x + blockIdx.x * TPB_X;
Index_ global_id = tid + start_vertex_id;
if (tid < batch_size && global_id < N) {
Index_ start = __ldg(row_ind + tid);
Index_ ci, cj;
bool ci_mod = false;
ci = labels[global_id];
bool ci_allow_prop = filter_op(global_id);
Index_ end = get_stop_idx(tid, batch_size, nnz, row_ind);
/// TODO: add one element to row_ind and avoid get_stop_idx
for (Index_ j = start; j < end; j++) {
Index_ j_ind = __ldg(row_ind_ptr + j);
cj = labels[j_ind];
bool cj_allow_prop = filter_op(j_ind);
if (ci < cj && ci_allow_prop) {
if (sizeof(Index_) == 4)
atomicMin((int*)(labels + j_ind), ci);
else if (sizeof(Index_) == 8)
atomicMin((long long int*)(labels + j_ind), ci);
if (cj_allow_prop) *m = true;
} else if (ci > cj && cj_allow_prop) {
ci = cj;
ci_mod = true;
}
}
if (ci_mod) {
if (sizeof(Index_) == 4)
atomicMin((int*)(labels + global_id), ci);
else if (sizeof(Index_) == 8)
atomicMin((long long int*)(labels + global_id), ci);
if (ci_allow_prop) *m = true;
}
}
}
template <typename Index_, int TPB_X = 256, typename Lambda>
RAFT_KERNEL weak_cc_init_all_kernel(Index_* labels, Index_ N, Index_ MAX_LABEL, Lambda filter_op)
{
Index_ tid = threadIdx.x + blockIdx.x * TPB_X;
if (tid < N) {
if (filter_op(tid))
labels[tid] = tid + 1;
else
labels[tid] = MAX_LABEL;
}
} // namespace sparse
/**
* @brief Partial calculation of the weakly connected components in the
* context of a batched algorithm: the labels are computed wrt the sub-graph
* represented by the given CSR matrix of dimensions batch_size * N.
* Note that this overwrites the labels array and it is the responsibility of
* the caller to combine the results from different batches
* (cf label/merge_labels.cuh)
*
* @tparam Index_ the numeric type of non-floating point elements
* @tparam TPB_X the threads to use per block when configuring the kernel
* @param labels an array for the output labels
* @param row_ind the compressed row index of the CSR array
* @param row_ind_ptr the row index pointer of the CSR array
* @param nnz the size of row_ind_ptr array
* @param N number of vertices
* @param start_vertex_id the starting vertex index for the current batch
* @param batch_size number of vertices for current batch
* @param state instance of inter-batch state management
* @param stream the cuda stream to use
* @param filter_op an optional filtering function to determine which points
* should get considered for labeling. It gets global indexes (not batch-wide!)
*/
template <typename Index_, int TPB_X = 256, typename Lambda = auto(Index_)->bool>
void weak_cc_batched(Index_* labels,
const Index_* row_ind,
const Index_* row_ind_ptr,
Index_ nnz,
Index_ N,
Index_ start_vertex_id,
Index_ batch_size,
WeakCCState* state,
cudaStream_t stream,
Lambda filter_op)
{
ASSERT(sizeof(Index_) == 4 || sizeof(Index_) == 8, "Index_ should be 4 or 8 bytes");
bool host_m;
Index_ MAX_LABEL = std::numeric_limits<Index_>::max();
weak_cc_init_all_kernel<Index_, TPB_X>
<<<raft::ceildiv(N, Index_(TPB_X)), TPB_X, 0, stream>>>(labels, N, MAX_LABEL, filter_op);
RAFT_CUDA_TRY(cudaPeekAtLastError());
int n_iters = 0;
do {
RAFT_CUDA_TRY(cudaMemsetAsync(state->m, false, sizeof(bool), stream));
weak_cc_label_device<Index_, TPB_X>
<<<raft::ceildiv(batch_size, Index_(TPB_X)), TPB_X, 0, stream>>>(
labels, row_ind, row_ind_ptr, nnz, state->m, start_vertex_id, batch_size, N, filter_op);
RAFT_CUDA_TRY(cudaPeekAtLastError());
//** Updating m *
raft::update_host(&host_m, state->m, 1, stream);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
n_iters++;
} while (host_m);
}
}; // namespace detail
}; // namespace sparse
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/detail/utils.h | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft {
namespace sparse {
/**
* Quantizes ncols to a valid blockdim, which is
* a multiple of 32.
*
* @param[in] ncols number of blocks to quantize
*/
template <typename value_idx>
inline int block_dim(value_idx ncols)
{
int blockdim;
if (ncols <= 32)
blockdim = 32;
else if (ncols <= 64)
blockdim = 64;
else if (ncols <= 128)
blockdim = 128;
else if (ncols <= 256)
blockdim = 256;
else if (ncols <= 512)
blockdim = 512;
else
blockdim = 1024;
return blockdim;
}
// add similar semantics for __match_any_sync pre-volta (SM_70)
#if __CUDA_ARCH__ < 700
/**
* Returns a warp-level mask with 1's for all the threads
* in the current warp that have the same key.
* @tparam G
* @param key
* @return
*/
template <typename G>
__device__ __inline__ unsigned int __match_any_sync(unsigned int init_mask, G key)
{
unsigned int mask = __ballot_sync(init_mask, true);
unsigned int peer_group = 0;
bool is_peer;
do {
// fetch key of first unclaimed lane and compare with this key
is_peer = (key == __shfl_sync(mask, key, __ffs(mask) - 1));
// determine which lanes had a match
peer_group = __ballot_sync(mask, is_peer);
// remove lanes with matching keys from the pool
mask = mask ^ peer_group;
// quit if we had a match
} while (!is_peer);
return peer_group;
}
#endif
__device__ __inline__ unsigned int get_lowest_peer(unsigned int peer_group)
{
return __ffs(peer_group) - 1;
}
template <typename value_idx>
RAFT_KERNEL iota_fill_block_kernel(value_idx* indices, value_idx ncols)
{
int row = blockIdx.x;
int tid = threadIdx.x;
for (int i = tid; i < ncols; i += blockDim.x) {
uint64_t idx = (uint64_t)row * (uint64_t)ncols;
indices[idx + i] = i;
}
}
template <typename value_idx>
void iota_fill(value_idx* indices, value_idx nrows, value_idx ncols, cudaStream_t stream)
{
int blockdim = block_dim(ncols);
iota_fill_block_kernel<<<nrows, blockdim, 0, stream>>>(indices, ncols);
}
template <typename T>
__device__ int get_stop_idx(T row, T m, T nnz, const T* ind)
{
int stop_idx = 0;
if (row < (m - 1))
stop_idx = ind[row + 1];
else
stop_idx = nnz;
return stop_idx;
}
}; // namespace sparse
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/detail/cusparse_macros.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
#pragma once
#include <raft/core/cusparse_macros.hpp> | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/detail/coo.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#pragma once
namespace raft {
namespace sparse {
namespace detail {
/** @brief A Container object for sparse coordinate. There are two motivations
* behind using a container for COO arrays.
*
* The first motivation is that it simplifies code, rather than always having
* to pass three arrays as function arguments.
*
* The second is more subtle, but much more important. The size
* of the resulting COO from a sparse operation is often not known ahead of time,
* since it depends on the contents of the underlying graph. The COO object can
* allocate the underlying arrays lazily so that the object can be created by the
* user and passed as an output argument in a sparse primitive. The sparse primitive
* would have the responsibility for allocating and populating the output arrays,
* while the original caller still maintains ownership of the underlying memory.
*
* @tparam T: the type of the value array.
* @tparam Index_Type: the type of index array
*
*/
template <typename T, typename Index_Type = int>
class COO {
protected:
rmm::device_uvector<Index_Type> rows_arr;
rmm::device_uvector<Index_Type> cols_arr;
rmm::device_uvector<T> vals_arr;
public:
Index_Type nnz;
Index_Type n_rows;
Index_Type n_cols;
/**
* @param stream: CUDA stream to use
*/
COO(cudaStream_t stream)
: rows_arr(0, stream), cols_arr(0, stream), vals_arr(0, stream), nnz(0), n_rows(0), n_cols(0)
{
}
/**
* @param rows: coo rows array
* @param cols: coo cols array
* @param vals: coo vals array
* @param nnz: size of the rows/cols/vals arrays
* @param n_rows: number of rows in the dense matrix
* @param n_cols: number of cols in the dense matrix
*/
COO(rmm::device_uvector<Index_Type>& rows,
rmm::device_uvector<Index_Type>& cols,
rmm::device_uvector<T>& vals,
Index_Type nnz,
Index_Type n_rows = 0,
Index_Type n_cols = 0)
: rows_arr(rows), cols_arr(cols), vals_arr(vals), nnz(nnz), n_rows(n_rows), n_cols(n_cols)
{
}
/**
* @param stream: CUDA stream to use
* @param nnz: size of the rows/cols/vals arrays
* @param n_rows: number of rows in the dense matrix
* @param n_cols: number of cols in the dense matrix
* @param init: initialize arrays with zeros
*/
COO(cudaStream_t stream,
Index_Type nnz,
Index_Type n_rows = 0,
Index_Type n_cols = 0,
bool init = true)
: rows_arr(nnz, stream),
cols_arr(nnz, stream),
vals_arr(nnz, stream),
nnz(nnz),
n_rows(n_rows),
n_cols(n_cols)
{
if (init) init_arrays(stream);
}
void init_arrays(cudaStream_t stream)
{
RAFT_CUDA_TRY(
cudaMemsetAsync(this->rows_arr.data(), 0, this->nnz * sizeof(Index_Type), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(this->cols_arr.data(), 0, this->nnz * sizeof(Index_Type), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(this->vals_arr.data(), 0, this->nnz * sizeof(T), stream));
}
~COO() {}
/**
* @brief Size should be > 0, with the number of rows
* and cols in the dense matrix being > 0.
*/
bool validate_size() const
{
if (this->nnz < 0 || n_rows < 0 || n_cols < 0) return false;
return true;
}
/**
* @brief If the underlying arrays have not been set,
* return false. Otherwise true.
*/
bool validate_mem() const
{
if (this->rows_arr.size() == 0 || this->cols_arr.size() == 0 || this->vals_arr.size() == 0) {
return false;
}
return true;
}
/*
* @brief Returns the rows array
*/
Index_Type* rows() { return this->rows_arr.data(); }
/**
* @brief Returns the cols array
*/
Index_Type* cols() { return this->cols_arr.data(); }
/**
* @brief Returns the vals array
*/
T* vals() { return this->vals_arr.data(); }
/**
* @brief Send human-readable state information to output stream
*/
friend std::ostream& operator<<(std::ostream& out, const COO<T, Index_Type>& c)
{
if (c.validate_size() && c.validate_mem()) {
cudaStream_t stream;
RAFT_CUDA_TRY(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
out << raft::arr2Str(c.rows_arr.data(), c.nnz, "rows", stream) << std::endl;
out << raft::arr2Str(c.cols_arr.data(), c.nnz, "cols", stream) << std::endl;
out << raft::arr2Str(c.vals_arr.data(), c.nnz, "vals", stream) << std::endl;
out << "nnz=" << c.nnz << std::endl;
out << "n_rows=" << c.n_rows << std::endl;
out << "n_cols=" << c.n_cols << std::endl;
RAFT_CUDA_TRY(cudaStreamDestroy(stream));
} else {
out << "Cannot print COO object: Uninitialized or invalid." << std::endl;
}
return out;
}
/**
* @brief Set the number of rows and cols
* @param n_rows: number of rows in the dense matrix
* @param n_cols: number of columns in the dense matrix
*/
void setSize(int n_rows, int n_cols)
{
this->n_rows = n_rows;
this->n_cols = n_cols;
}
/**
* @brief Set the number of rows and cols for a square dense matrix
* @param n: number of rows and cols
*/
void setSize(int n)
{
this->n_rows = n;
this->n_cols = n;
}
/**
* @brief Allocate the underlying arrays
* @param nnz: size of underlying row/col/val arrays
* @param init: should values be initialized to 0?
* @param stream: CUDA stream to use
*/
void allocate(int nnz, bool init, cudaStream_t stream) { this->allocate(nnz, 0, init, stream); }
/**
* @brief Allocate the underlying arrays
* @param nnz: size of the underlying row/col/val arrays
* @param size: the number of rows/cols in a square dense matrix
* @param init: should values be initialized to 0?
* @param stream: CUDA stream to use
*/
void allocate(int nnz, int size, bool init, cudaStream_t stream)
{
this->allocate(nnz, size, size, init, stream);
}
/**
* @brief Allocate the underlying arrays
* @param nnz: size of the underlying row/col/val arrays
* @param n_rows: number of rows in the dense matrix
* @param n_cols: number of columns in the dense matrix
* @param init: should values be initialized to 0?
* @param stream: stream to use for init
*/
void allocate(int nnz, int n_rows, int n_cols, bool init, cudaStream_t stream)
{
this->n_rows = n_rows;
this->n_cols = n_cols;
this->nnz = nnz;
this->rows_arr.resize(this->nnz, stream);
this->cols_arr.resize(this->nnz, stream);
this->vals_arr.resize(this->nnz, stream);
if (init) init_arrays(stream);
}
};
}; // namespace detail
}; // namespace sparse
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/detail/cusparse_wrappers.h | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse.h>
#include <raft/core/cusparse_macros.hpp>
#include <raft/core/error.hpp>
#include <raft/linalg/transpose.cuh>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace sparse {
namespace detail {
/**
* @defgroup gather cusparse gather methods
* @{
*/
inline cusparseStatus_t cusparsegather(cusparseHandle_t handle,
cusparseDnVecDescr_t vecY,
cusparseSpVecDescr_t vecX,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseGather(handle, vecY, vecX);
}
template <
typename T,
typename std::enable_if_t<std::is_same_v<T, float> || std::is_same_v<T, double>>* = nullptr>
cusparseStatus_t cusparsegthr(
cusparseHandle_t handle, int nnz, const T* vals, T* vals_sorted, int* d_P, cudaStream_t stream)
{
auto constexpr float_type = []() constexpr {
if constexpr (std::is_same_v<T, float>) {
return CUDA_R_32F;
} else if constexpr (std::is_same_v<T, double>) {
return CUDA_R_64F;
}
}();
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
auto dense_vector_descr = cusparseDnVecDescr_t{};
auto sparse_vector_descr = cusparseSpVecDescr_t{};
CUSPARSE_CHECK(cusparseCreateDnVec(
&dense_vector_descr, nnz, static_cast<void*>(const_cast<T*>(vals)), float_type));
CUSPARSE_CHECK(cusparseCreateSpVec(&sparse_vector_descr,
nnz,
nnz,
static_cast<void*>(d_P),
static_cast<void*>(vals_sorted),
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
float_type));
auto return_value = cusparseGather(handle, dense_vector_descr, sparse_vector_descr);
CUSPARSE_CHECK(cusparseDestroyDnVec(dense_vector_descr));
CUSPARSE_CHECK(cusparseDestroySpVec(sparse_vector_descr));
return return_value;
}
/** @} */
/**
* @defgroup coo2csr cusparse COO to CSR converter methods
* @{
*/
template <typename T>
void cusparsecoo2csr(
cusparseHandle_t handle, const T* cooRowInd, int nnz, int m, T* csrRowPtr, cudaStream_t stream);
template <>
inline void cusparsecoo2csr(cusparseHandle_t handle,
const int* cooRowInd,
int nnz,
int m,
int* csrRowPtr,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
CUSPARSE_CHECK(cusparseXcoo2csr(handle, cooRowInd, nnz, m, csrRowPtr, CUSPARSE_INDEX_BASE_ZERO));
}
/** @} */
/**
* @defgroup coosort cusparse coo sort methods
* @{
*/
template <typename T>
size_t cusparsecoosort_bufferSizeExt( // NOLINT
cusparseHandle_t handle,
int m,
int n,
int nnz,
const T* cooRows,
const T* cooCols,
cudaStream_t stream);
template <>
inline size_t cusparsecoosort_bufferSizeExt( // NOLINT
cusparseHandle_t handle,
int m,
int n,
int nnz,
const int* cooRows,
const int* cooCols,
cudaStream_t stream)
{
size_t val;
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt(handle, m, n, nnz, cooRows, cooCols, &val));
return val;
}
template <typename T>
void cusparsecoosortByRow( // NOLINT
cusparseHandle_t handle,
int m,
int n,
int nnz,
T* cooRows,
T* cooCols,
T* P,
void* pBuffer,
cudaStream_t stream);
template <>
inline void cusparsecoosortByRow( // NOLINT
cusparseHandle_t handle,
int m,
int n,
int nnz,
int* cooRows,
int* cooCols,
int* P,
void* pBuffer,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
CUSPARSE_CHECK(cusparseXcoosortByRow(handle, m, n, nnz, cooRows, cooCols, P, pBuffer));
}
/** @} */
#if not defined CUDA_ENFORCE_LOWER and CUDA_VER_10_1_UP
/**
* @defgroup cusparse Create CSR operations
* @{
*/
template <typename ValueT, typename IndptrType, typename IndicesType>
cusparseStatus_t cusparsecreatecsr(cusparseSpMatDescr_t* spMatDescr,
int64_t rows,
int64_t cols,
int64_t nnz,
IndptrType* csrRowOffsets,
IndicesType* csrColInd,
ValueT* csrValues);
template <>
inline cusparseStatus_t cusparsecreatecsr(cusparseSpMatDescr_t* spMatDescr,
int64_t rows,
int64_t cols,
int64_t nnz,
int32_t* csrRowOffsets,
int32_t* csrColInd,
float* csrValues)
{
return cusparseCreateCsr(spMatDescr,
rows,
cols,
nnz,
csrRowOffsets,
csrColInd,
csrValues,
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
CUDA_R_32F);
}
template <>
inline cusparseStatus_t cusparsecreatecsr(cusparseSpMatDescr_t* spMatDescr,
int64_t rows,
int64_t cols,
int64_t nnz,
int32_t* csrRowOffsets,
int32_t* csrColInd,
double* csrValues)
{
return cusparseCreateCsr(spMatDescr,
rows,
cols,
nnz,
csrRowOffsets,
csrColInd,
csrValues,
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
CUDA_R_64F);
}
template <>
inline cusparseStatus_t cusparsecreatecsr(cusparseSpMatDescr_t* spMatDescr,
int64_t rows,
int64_t cols,
int64_t nnz,
int64_t* csrRowOffsets,
int64_t* csrColInd,
float* csrValues)
{
return cusparseCreateCsr(spMatDescr,
rows,
cols,
nnz,
csrRowOffsets,
csrColInd,
csrValues,
CUSPARSE_INDEX_64I,
CUSPARSE_INDEX_64I,
CUSPARSE_INDEX_BASE_ZERO,
CUDA_R_32F);
}
template <>
inline cusparseStatus_t cusparsecreatecsr(cusparseSpMatDescr_t* spMatDescr,
int64_t rows,
int64_t cols,
int64_t nnz,
int64_t* csrRowOffsets,
int64_t* csrColInd,
double* csrValues)
{
return cusparseCreateCsr(spMatDescr,
rows,
cols,
nnz,
csrRowOffsets,
csrColInd,
csrValues,
CUSPARSE_INDEX_64I,
CUSPARSE_INDEX_64I,
CUSPARSE_INDEX_BASE_ZERO,
CUDA_R_64F);
}
/** @} */
/**
* @defgroup cusparse CreateDnVec operations
* @{
*/
template <typename T>
cusparseStatus_t cusparsecreatednvec(cusparseDnVecDescr_t* dnVecDescr, int64_t size, T* values);
template <>
inline cusparseStatus_t cusparsecreatednvec(cusparseDnVecDescr_t* dnVecDescr,
int64_t size,
float* values)
{
return cusparseCreateDnVec(dnVecDescr, size, values, CUDA_R_32F);
}
template <>
inline cusparseStatus_t cusparsecreatednvec(cusparseDnVecDescr_t* dnVecDescr,
int64_t size,
double* values)
{
return cusparseCreateDnVec(dnVecDescr, size, values, CUDA_R_64F);
}
/** @} */
/**
* @defgroup cusparse CreateDnMat operations
* @{
*/
template <typename T>
cusparseStatus_t cusparsecreatednmat(cusparseDnMatDescr_t* dnMatDescr,
int64_t rows,
int64_t cols,
int64_t ld,
T* values,
cusparseOrder_t order);
template <>
inline cusparseStatus_t cusparsecreatednmat(cusparseDnMatDescr_t* dnMatDescr,
int64_t rows,
int64_t cols,
int64_t ld,
float* values,
cusparseOrder_t order)
{
return cusparseCreateDnMat(dnMatDescr, rows, cols, ld, values, CUDA_R_32F, order);
}
template <>
inline cusparseStatus_t cusparsecreatednmat(cusparseDnMatDescr_t* dnMatDescr,
int64_t rows,
int64_t cols,
int64_t ld,
double* values,
cusparseOrder_t order)
{
return cusparseCreateDnMat(dnMatDescr, rows, cols, ld, values, CUDA_R_64F, order);
}
/** @} */
/**
* @defgroup Csrmv cusparse SpMV operations
* @{
*/
template <typename T>
cusparseStatus_t cusparsespmv_buffersize(cusparseHandle_t handle,
cusparseOperation_t opA,
const T* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnVecDescr_t vecX,
const T* beta,
const cusparseDnVecDescr_t vecY,
cusparseSpMVAlg_t alg,
size_t* bufferSize,
cudaStream_t stream);
template <>
inline cusparseStatus_t cusparsespmv_buffersize(cusparseHandle_t handle,
cusparseOperation_t opA,
const float* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnVecDescr_t vecX,
const float* beta,
const cusparseDnVecDescr_t vecY,
cusparseSpMVAlg_t alg,
size_t* bufferSize,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseSpMV_bufferSize(
handle, opA, alpha, matA, vecX, beta, vecY, CUDA_R_32F, alg, bufferSize);
}
template <>
inline cusparseStatus_t cusparsespmv_buffersize(cusparseHandle_t handle,
cusparseOperation_t opA,
const double* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnVecDescr_t vecX,
const double* beta,
const cusparseDnVecDescr_t vecY,
cusparseSpMVAlg_t alg,
size_t* bufferSize,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseSpMV_bufferSize(
handle, opA, alpha, matA, vecX, beta, vecY, CUDA_R_64F, alg, bufferSize);
}
template <typename T>
cusparseStatus_t cusparsespmv(cusparseHandle_t handle,
cusparseOperation_t opA,
const T* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnVecDescr_t vecX,
const T* beta,
const cusparseDnVecDescr_t vecY,
cusparseSpMVAlg_t alg,
T* externalBuffer,
cudaStream_t stream);
template <>
inline cusparseStatus_t cusparsespmv(cusparseHandle_t handle,
cusparseOperation_t opA,
const float* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnVecDescr_t vecX,
const float* beta,
const cusparseDnVecDescr_t vecY,
cusparseSpMVAlg_t alg,
float* externalBuffer,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseSpMV(handle, opA, alpha, matA, vecX, beta, vecY, CUDA_R_32F, alg, externalBuffer);
}
template <>
inline cusparseStatus_t cusparsespmv(cusparseHandle_t handle,
cusparseOperation_t opA,
const double* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnVecDescr_t vecX,
const double* beta,
const cusparseDnVecDescr_t vecY,
cusparseSpMVAlg_t alg,
double* externalBuffer,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseSpMV(handle, opA, alpha, matA, vecX, beta, vecY, CUDA_R_64F, alg, externalBuffer);
}
/** @} */
#else
/**
* @defgroup Csrmv cusparse csrmv operations
* @{
*/
template <typename T>
cusparseStatus_t cusparsecsrmv( // NOLINT
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const T* alpha,
const cusparseMatDescr_t descr,
const T* csrVal,
const int* csrRowPtr,
const int* csrColInd,
const T* x,
const T* beta,
T* y,
cudaStream_t stream);
template <>
inline cusparseStatus_t cusparsecsrmv(cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const float* alpha,
const cusparseMatDescr_t descr,
const float* csrVal,
const int* csrRowPtr,
const int* csrColInd,
const float* x,
const float* beta,
float* y,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseScsrmv(
handle, trans, m, n, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, beta, y);
}
template <>
inline cusparseStatus_t cusparsecsrmv(cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int nnz,
const double* alpha,
const cusparseMatDescr_t descr,
const double* csrVal,
const int* csrRowPtr,
const int* csrColInd,
const double* x,
const double* beta,
double* y,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseDcsrmv(
handle, trans, m, n, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, beta, y);
}
/** @} */
#endif
#if not defined CUDA_ENFORCE_LOWER and CUDA_VER_10_1_UP
/**
* @defgroup Csrmm cusparse csrmm operations
* @{
*/
template <typename T>
cusparseStatus_t cusparsespmm_bufferSize(cusparseHandle_t handle,
cusparseOperation_t opA,
cusparseOperation_t opB,
const T* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnMatDescr_t matB,
const T* beta,
cusparseDnMatDescr_t matC,
cusparseSpMMAlg_t alg,
size_t* bufferSize,
cudaStream_t stream);
template <>
inline cusparseStatus_t cusparsespmm_bufferSize(cusparseHandle_t handle,
cusparseOperation_t opA,
cusparseOperation_t opB,
const float* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnMatDescr_t matB,
const float* beta,
cusparseDnMatDescr_t matC,
cusparseSpMMAlg_t alg,
size_t* bufferSize,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseSpMM_bufferSize(
handle, opA, opB, alpha, matA, matB, beta, matC, CUDA_R_32F, alg, bufferSize);
}
template <>
inline cusparseStatus_t cusparsespmm_bufferSize(cusparseHandle_t handle,
cusparseOperation_t opA,
cusparseOperation_t opB,
const double* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnMatDescr_t matB,
const double* beta,
cusparseDnMatDescr_t matC,
cusparseSpMMAlg_t alg,
size_t* bufferSize,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseSpMM_bufferSize(
handle, opA, opB, alpha, matA, matB, beta, matC, CUDA_R_64F, alg, bufferSize);
}
template <typename T>
inline cusparseStatus_t cusparsespmm(cusparseHandle_t handle,
cusparseOperation_t opA,
cusparseOperation_t opB,
const T* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnMatDescr_t matB,
const T* beta,
cusparseDnMatDescr_t matC,
cusparseSpMMAlg_t alg,
T* externalBuffer,
cudaStream_t stream);
template <>
inline cusparseStatus_t cusparsespmm(cusparseHandle_t handle,
cusparseOperation_t opA,
cusparseOperation_t opB,
const float* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnMatDescr_t matB,
const float* beta,
cusparseDnMatDescr_t matC,
cusparseSpMMAlg_t alg,
float* externalBuffer,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseSpMM(handle,
opA,
opB,
static_cast<void const*>(alpha),
matA,
matB,
static_cast<void const*>(beta),
matC,
CUDA_R_32F,
alg,
static_cast<void*>(externalBuffer));
}
template <>
inline cusparseStatus_t cusparsespmm(cusparseHandle_t handle,
cusparseOperation_t opA,
cusparseOperation_t opB,
const double* alpha,
const cusparseSpMatDescr_t matA,
const cusparseDnMatDescr_t matB,
const double* beta,
cusparseDnMatDescr_t matC,
cusparseSpMMAlg_t alg,
double* externalBuffer,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseSpMM(handle,
opA,
opB,
static_cast<void const*>(alpha),
matA,
matB,
static_cast<void const*>(beta),
matC,
CUDA_R_64F,
alg,
static_cast<void*>(externalBuffer));
}
/** @} */
#else
/**
* @defgroup Csrmm cusparse csrmm operations
* @{
*/
template <typename T>
cusparseStatus_t cusparsecsrmm( // NOLINT
cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int k,
int nnz,
const T* alpha,
const cusparseMatDescr_t descr,
const T* csrVal,
const int* csrRowPtr,
const int* csrColInd,
const T* x,
const int ldx,
const T* beta,
T* y,
const int ldy,
cudaStream_t stream);
template <>
inline cusparseStatus_t cusparsecsrmm(cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int k,
int nnz,
const float* alpha,
const cusparseMatDescr_t descr,
const float* csrVal,
const int* csrRowPtr,
const int* csrColInd,
const float* x,
const int ldx,
const float* beta,
float* y,
const int ldy,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseScsrmm(
handle, trans, m, n, k, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, ldx, beta, y, ldy);
}
template <>
inline cusparseStatus_t cusparsecsrmm(cusparseHandle_t handle,
cusparseOperation_t trans,
int m,
int n,
int k,
int nnz,
const double* alpha,
const cusparseMatDescr_t descr,
const double* csrVal,
const int* csrRowPtr,
const int* csrColInd,
const double* x,
const int ldx,
const double* beta,
double* y,
const int ldy,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseDcsrmm(
handle, trans, m, n, k, nnz, alpha, descr, csrVal, csrRowPtr, csrColInd, x, ldx, beta, y, ldy);
}
/** @} */
#endif
/**
* @defgroup Gemmi cusparse gemmi operations
* @{
*/
#if CUDART_VERSION < 12000
template <typename T>
cusparseStatus_t cusparsegemmi( // NOLINT
cusparseHandle_t handle,
int m,
int n,
int k,
int nnz,
const T* alpha,
const T* A,
int lda,
const T* cscValB,
const int* cscColPtrB,
const int* cscRowIndB,
const T* beta,
T* C,
int ldc,
cudaStream_t stream);
template <>
inline cusparseStatus_t cusparsegemmi(cusparseHandle_t handle,
int m,
int n,
int k,
int nnz,
const float* alpha,
const float* A,
int lda,
const float* cscValB,
const int* cscColPtrB,
const int* cscRowIndB,
const float* beta,
float* C,
int ldc,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
return cusparseSgemmi(
handle, m, n, k, nnz, alpha, A, lda, cscValB, cscColPtrB, cscRowIndB, beta, C, ldc);
#pragma GCC diagnostic pop
}
template <>
inline cusparseStatus_t cusparsegemmi(cusparseHandle_t handle,
int m,
int n,
int k,
int nnz,
const double* alpha,
const double* A,
int lda,
const double* cscValB,
const int* cscColPtrB,
const int* cscRowIndB,
const double* beta,
double* C,
int ldc,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
return cusparseDgemmi(
handle, m, n, k, nnz, alpha, A, lda, cscValB, cscColPtrB, cscRowIndB, beta, C, ldc);
#pragma GCC diagnostic pop
}
#else // CUDART >= 12.0
template <typename T>
cusparseStatus_t cusparsegemmi( // NOLINT
cusparseHandle_t handle,
int m,
int n,
int k,
int nnz,
const T* alpha,
const T* A,
int lda,
const T* cscValB,
const int* cscColPtrB,
const int* cscRowIndB,
const T* beta,
T* C,
int ldc,
cudaStream_t stream)
{
static_assert(std::is_same_v<T, float> || std::is_same_v<T, double>, "Unsupported data type");
cusparseDnMatDescr_t matA;
cusparseSpMatDescr_t matB;
cusparseDnMatDescr_t matC;
rmm::device_uvector<T> CT(m * n, stream);
auto constexpr math_type = std::is_same_v<T, float> ? CUDA_R_32F : CUDA_R_64F;
// Create sparse matrix B
CUSPARSE_CHECK(cusparseCreateCsc(&matB,
k,
n,
nnz,
static_cast<void*>(const_cast<int*>(cscColPtrB)),
static_cast<void*>(const_cast<int*>(cscRowIndB)),
static_cast<void*>(const_cast<T*>(cscValB)),
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
math_type));
/**
* Create dense matrices.
* Note: Since this is replacing `cusparse_gemmi`, it assumes dense inputs are
* column-ordered
*/
CUSPARSE_CHECK(cusparseCreateDnMat(
&matA, m, k, lda, static_cast<void*>(const_cast<T*>(A)), math_type, CUSPARSE_ORDER_COL));
CUSPARSE_CHECK(cusparseCreateDnMat(
&matC, n, m, n, static_cast<void*>(CT.data()), math_type, CUSPARSE_ORDER_COL));
auto opA = CUSPARSE_OPERATION_TRANSPOSE;
auto opB = CUSPARSE_OPERATION_TRANSPOSE;
auto alg = CUSPARSE_SPMM_CSR_ALG1;
auto buffer_size = std::size_t{};
CUSPARSE_CHECK(cusparsespmm_bufferSize(
handle, opB, opA, alpha, matB, matA, beta, matC, alg, &buffer_size, stream));
buffer_size = buffer_size / sizeof(T);
rmm::device_uvector<T> external_buffer(buffer_size, stream);
auto ext_buf = static_cast<T*>(static_cast<void*>(external_buffer.data()));
auto return_value =
cusparsespmm(handle, opB, opA, alpha, matB, matA, beta, matC, alg, ext_buf, stream);
raft::resources rhandle;
raft::linalg::transpose(rhandle, CT.data(), C, n, m, stream);
// destroy matrix/vector descriptors
CUSPARSE_CHECK(cusparseDestroyDnMat(matA));
CUSPARSE_CHECK(cusparseDestroySpMat(matB));
CUSPARSE_CHECK(cusparseDestroyDnMat(matC));
return return_value;
}
#endif
/** @} */
/**
* @defgroup csr2coo cusparse CSR to COO converter methods
* @{
*/
template <typename T>
void cusparsecsr2coo( // NOLINT
cusparseHandle_t handle,
const int n,
const int nnz,
const T* csrRowPtr,
T* cooRowInd,
cudaStream_t stream);
template <>
inline void cusparsecsr2coo(cusparseHandle_t handle,
const int n,
const int nnz,
const int* csrRowPtr,
int* cooRowInd,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
CUSPARSE_CHECK(cusparseXcsr2coo(handle, csrRowPtr, nnz, n, cooRowInd, CUSPARSE_INDEX_BASE_ZERO));
}
/** @} */
/**
* @defgroup setpointermode cusparse set pointer mode method
* @{
*/
// no T dependency...
// template <typename T>
// cusparseStatus_t cusparsesetpointermode( // NOLINT
// cusparseHandle_t handle,
// cusparsePointerMode_t mode,
// cudaStream_t stream);
// template<>
inline cusparseStatus_t cusparsesetpointermode(cusparseHandle_t handle,
cusparsePointerMode_t mode,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseSetPointerMode(handle, mode);
}
/** @} */
/**
* @defgroup Csr2cscEx2 cusparse csr->csc conversion
* @{
*/
template <typename T>
cusparseStatus_t cusparsecsr2csc_bufferSize(cusparseHandle_t handle,
int m,
int n,
int nnz,
const T* csrVal,
const int* csrRowPtr,
const int* csrColInd,
void* cscVal,
int* cscColPtr,
int* cscRowInd,
cusparseAction_t copyValues,
cusparseIndexBase_t idxBase,
cusparseCsr2CscAlg_t alg,
size_t* bufferSize,
cudaStream_t stream);
template <>
inline cusparseStatus_t cusparsecsr2csc_bufferSize(cusparseHandle_t handle,
int m,
int n,
int nnz,
const float* csrVal,
const int* csrRowPtr,
const int* csrColInd,
void* cscVal,
int* cscColPtr,
int* cscRowInd,
cusparseAction_t copyValues,
cusparseIndexBase_t idxBase,
cusparseCsr2CscAlg_t alg,
size_t* bufferSize,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseCsr2cscEx2_bufferSize(handle,
m,
n,
nnz,
csrVal,
csrRowPtr,
csrColInd,
cscVal,
cscColPtr,
cscRowInd,
CUDA_R_32F,
copyValues,
idxBase,
alg,
bufferSize);
}
template <>
inline cusparseStatus_t cusparsecsr2csc_bufferSize(cusparseHandle_t handle,
int m,
int n,
int nnz,
const double* csrVal,
const int* csrRowPtr,
const int* csrColInd,
void* cscVal,
int* cscColPtr,
int* cscRowInd,
cusparseAction_t copyValues,
cusparseIndexBase_t idxBase,
cusparseCsr2CscAlg_t alg,
size_t* bufferSize,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseCsr2cscEx2_bufferSize(handle,
m,
n,
nnz,
csrVal,
csrRowPtr,
csrColInd,
cscVal,
cscColPtr,
cscRowInd,
CUDA_R_64F,
copyValues,
idxBase,
alg,
bufferSize);
}
template <typename T>
cusparseStatus_t cusparsecsr2csc(cusparseHandle_t handle,
int m,
int n,
int nnz,
const T* csrVal,
const int* csrRowPtr,
const int* csrColInd,
void* cscVal,
int* cscColPtr,
int* cscRowInd,
cusparseAction_t copyValues,
cusparseIndexBase_t idxBase,
cusparseCsr2CscAlg_t alg,
void* buffer,
cudaStream_t stream);
template <>
inline cusparseStatus_t cusparsecsr2csc(cusparseHandle_t handle,
int m,
int n,
int nnz,
const float* csrVal,
const int* csrRowPtr,
const int* csrColInd,
void* cscVal,
int* cscColPtr,
int* cscRowInd,
cusparseAction_t copyValues,
cusparseIndexBase_t idxBase,
cusparseCsr2CscAlg_t alg,
void* buffer,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseCsr2cscEx2(handle,
m,
n,
nnz,
csrVal,
csrRowPtr,
csrColInd,
cscVal,
cscColPtr,
cscRowInd,
CUDA_R_32F,
copyValues,
idxBase,
alg,
buffer);
}
template <>
inline cusparseStatus_t cusparsecsr2csc(cusparseHandle_t handle,
int m,
int n,
int nnz,
const double* csrVal,
const int* csrRowPtr,
const int* csrColInd,
void* cscVal,
int* cscColPtr,
int* cscRowInd,
cusparseAction_t copyValues,
cusparseIndexBase_t idxBase,
cusparseCsr2CscAlg_t alg,
void* buffer,
cudaStream_t stream)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
return cusparseCsr2cscEx2(handle,
m,
n,
nnz,
csrVal,
csrRowPtr,
csrColInd,
cscVal,
cscColPtr,
cscRowInd,
CUDA_R_64F,
copyValues,
idxBase,
alg,
buffer);
}
/** @} */
/**
* @defgroup csrgemm2 cusparse sparse gemm operations
* @{
*/
template <typename T>
cusparseStatus_t cusparsecsr2dense_buffersize(cusparseHandle_t handle,
int m,
int n,
int nnz,
const cusparseMatDescr_t descrA,
const T* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
T* A,
int lda,
size_t* buffer_size,
cudaStream_t stream,
bool row_major = false);
template <>
inline cusparseStatus_t cusparsecsr2dense_buffersize(cusparseHandle_t handle,
int m,
int n,
int nnz,
const cusparseMatDescr_t descrA,
const float* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
float* A,
int lda,
size_t* buffer_size,
cudaStream_t stream,
bool row_major)
{
#if CUDART_VERSION >= 11020
cusparseOrder_t order = row_major ? CUSPARSE_ORDER_ROW : CUSPARSE_ORDER_COL;
cusparseSpMatDescr_t matA;
cusparsecreatecsr(&matA,
static_cast<int64_t>(m),
static_cast<int64_t>(n),
static_cast<int64_t>(nnz),
const_cast<int*>(csrRowPtrA),
const_cast<int*>(csrColIndA),
const_cast<float*>(csrValA));
cusparseDnMatDescr_t matB;
cusparsecreatednmat(&matB,
static_cast<int64_t>(m),
static_cast<int64_t>(n),
static_cast<int64_t>(lda),
const_cast<float*>(A),
order);
cusparseStatus_t result = cusparseSparseToDense_bufferSize(
handle, matA, matB, CUSPARSE_SPARSETODENSE_ALG_DEFAULT, buffer_size);
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroySpMat(matA));
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroyDnMat(matB));
#else
cusparseStatus_t result = CUSPARSE_STATUS_SUCCESS;
buffer_size[0] = 0;
#endif
return result;
}
template <>
inline cusparseStatus_t cusparsecsr2dense_buffersize(cusparseHandle_t handle,
int m,
int n,
int nnz,
const cusparseMatDescr_t descrA,
const double* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
double* A,
int lda,
size_t* buffer_size,
cudaStream_t stream,
bool row_major)
{
#if CUDART_VERSION >= 11020
cusparseOrder_t order = row_major ? CUSPARSE_ORDER_ROW : CUSPARSE_ORDER_COL;
cusparseSpMatDescr_t matA;
cusparsecreatecsr(&matA,
static_cast<int64_t>(m),
static_cast<int64_t>(n),
static_cast<int64_t>(nnz),
const_cast<int*>(csrRowPtrA),
const_cast<int*>(csrColIndA),
const_cast<double*>(csrValA));
cusparseDnMatDescr_t matB;
cusparsecreatednmat(&matB,
static_cast<int64_t>(m),
static_cast<int64_t>(n),
static_cast<int64_t>(lda),
const_cast<double*>(A),
order);
cusparseStatus_t result = cusparseSparseToDense_bufferSize(
handle, matA, matB, CUSPARSE_SPARSETODENSE_ALG_DEFAULT, buffer_size);
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroySpMat(matA));
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroyDnMat(matB));
#else
cusparseStatus_t result = CUSPARSE_STATUS_SUCCESS;
buffer_size[0] = 0;
#endif
return result;
}
template <typename T>
cusparseStatus_t cusparsecsr2dense(cusparseHandle_t handle,
int m,
int n,
int nnz,
const cusparseMatDescr_t descrA,
const T* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
T* A,
int lda,
void* buffer,
cudaStream_t stream,
bool row_major = false);
template <>
inline cusparseStatus_t cusparsecsr2dense(cusparseHandle_t handle,
int m,
int n,
int nnz,
const cusparseMatDescr_t descrA,
const float* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
float* A,
int lda,
void* buffer,
cudaStream_t stream,
bool row_major)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
#if CUDART_VERSION >= 11020
cusparseOrder_t order = row_major ? CUSPARSE_ORDER_ROW : CUSPARSE_ORDER_COL;
cusparseSpMatDescr_t matA;
cusparsecreatecsr(&matA,
static_cast<int64_t>(m),
static_cast<int64_t>(n),
static_cast<int64_t>(nnz),
const_cast<int*>(csrRowPtrA),
const_cast<int*>(csrColIndA),
const_cast<float*>(csrValA));
cusparseDnMatDescr_t matB;
cusparsecreatednmat(&matB,
static_cast<int64_t>(m),
static_cast<int64_t>(n),
static_cast<int64_t>(lda),
const_cast<float*>(A),
order);
cusparseStatus_t result =
cusparseSparseToDense(handle, matA, matB, CUSPARSE_SPARSETODENSE_ALG_DEFAULT, buffer);
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroySpMat(matA));
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroyDnMat(matB));
return result;
#else
return cusparseScsr2dense(handle, m, n, descrA, csrValA, csrRowPtrA, csrColIndA, A, lda);
#endif
}
template <>
inline cusparseStatus_t cusparsecsr2dense(cusparseHandle_t handle,
int m,
int n,
int nnz,
const cusparseMatDescr_t descrA,
const double* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
double* A,
int lda,
void* buffer,
cudaStream_t stream,
bool row_major)
{
CUSPARSE_CHECK(cusparseSetStream(handle, stream));
#if CUDART_VERSION >= 11020
cusparseOrder_t order = row_major ? CUSPARSE_ORDER_ROW : CUSPARSE_ORDER_COL;
cusparseSpMatDescr_t matA;
cusparsecreatecsr(&matA,
static_cast<int64_t>(m),
static_cast<int64_t>(n),
static_cast<int64_t>(nnz),
const_cast<int*>(csrRowPtrA),
const_cast<int*>(csrColIndA),
const_cast<double*>(csrValA));
cusparseDnMatDescr_t matB;
cusparsecreatednmat(&matB,
static_cast<int64_t>(m),
static_cast<int64_t>(n),
static_cast<int64_t>(lda),
const_cast<double*>(A),
order);
cusparseStatus_t result =
cusparseSparseToDense(handle, matA, matB, CUSPARSE_SPARSETODENSE_ALG_DEFAULT, buffer);
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroySpMat(matA));
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroyDnMat(matB));
return result;
#else
return cusparseDcsr2dense(handle, m, n, descrA, csrValA, csrRowPtrA, csrColIndA, A, lda);
#endif
}
/** @} */
} // namespace detail
} // namespace sparse
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/selection/knn_graph.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use knn_graph.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the sparse/spatial version instead.")
#include <raft/sparse/neighbors/knn_graph.cuh>
namespace raft::sparse::selection {
using raft::sparse::neighbors::knn_graph;
}
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/selection/knn.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use knn.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the sparse/spatial version instead.")
#include <raft/sparse/neighbors/knn.cuh>
namespace raft::sparse::selection {
using raft::sparse::neighbors::brute_force_knn;
} | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/selection/cross_component_nn.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use cross_component_nn.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the sparse/spatial version instead.")
#include <raft/sparse/neighbors/cross_component_nn.cuh>
namespace raft::linkage {
using raft::sparse::neighbors::cross_component_nn;
using raft::sparse::neighbors::FixConnectivitiesRedOp;
using raft::sparse::neighbors::get_n_components;
} // namespace raft::linkage | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/mst/mst.cuh |
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft/sparse/solver version instead.")
#include <raft/sparse/mst/mst_solver.cuh>
#include <raft/sparse/solver/mst.cuh>
namespace raft::mst {
using raft::sparse::solver::mst;
} | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/mst/mst.hpp |
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft/sparse/solver version instead.")
#include <raft/sparse/mst/mst.cuh>
#include <raft/sparse/mst/mst_solver.cuh>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/mst/mst_solver.cuh |
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft/sparse/solver version instead.")
#include <raft/sparse/solver/mst_solver.cuh>
namespace raft {
using raft::sparse::solver::Graph_COO;
}
namespace raft::mst {
using raft::sparse::solver::MST_solver;
} | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/distance.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SPARSE_DIST_H
#define __SPARSE_DIST_H
#pragma once
#include "detail/common.hpp"
#include <unordered_set>
#include <raft/core/device_csr_matrix.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/sparse/distance/detail/bin_distance.cuh>
#include <raft/sparse/distance/detail/ip_distance.cuh>
#include <raft/sparse/distance/detail/l2_distance.cuh>
#include <raft/sparse/distance/detail/lp_distance.cuh>
namespace raft {
namespace sparse {
namespace distance {
static const std::unordered_set<raft::distance::DistanceType> supportedDistance{
raft::distance::DistanceType::L2Expanded,
raft::distance::DistanceType::L2Unexpanded,
raft::distance::DistanceType::L2SqrtExpanded,
raft::distance::DistanceType::L2SqrtUnexpanded,
raft::distance::DistanceType::InnerProduct,
raft::distance::DistanceType::L1,
raft::distance::DistanceType::Canberra,
raft::distance::DistanceType::Linf,
raft::distance::DistanceType::LpUnexpanded,
raft::distance::DistanceType::JaccardExpanded,
raft::distance::DistanceType::CosineExpanded,
raft::distance::DistanceType::HellingerExpanded,
raft::distance::DistanceType::DiceExpanded,
raft::distance::DistanceType::CorrelationExpanded,
raft::distance::DistanceType::RusselRaoExpanded,
raft::distance::DistanceType::HammingUnexpanded,
raft::distance::DistanceType::JensenShannon,
raft::distance::DistanceType::KLDivergence};
/**
* Compute pairwise distances between A and B, using the provided
* input configuration and distance function.
*
* @tparam value_idx index type
* @tparam value_t value type
* @param[out] out dense output array (size A.nrows * B.nrows)
* @param[in] input_config input argument configuration
* @param[in] metric distance metric to use
* @param[in] metric_arg metric argument (used for Minkowski distance)
*/
template <typename value_idx = int, typename value_t = float>
void pairwiseDistance(value_t* out,
detail::distances_config_t<value_idx, value_t> input_config,
raft::distance::DistanceType metric,
float metric_arg)
{
switch (metric) {
case raft::distance::DistanceType::L2Expanded:
detail::l2_expanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::L2SqrtExpanded:
detail::l2_sqrt_expanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::InnerProduct:
detail::ip_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::L2Unexpanded:
detail::l2_unexpanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::L2SqrtUnexpanded:
detail::l2_sqrt_unexpanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::L1:
detail::l1_unexpanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::LpUnexpanded:
detail::lp_unexpanded_distances_t<value_idx, value_t>(input_config, metric_arg).compute(out);
break;
case raft::distance::DistanceType::Linf:
detail::linf_unexpanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::Canberra:
detail::canberra_unexpanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::JaccardExpanded:
detail::jaccard_expanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::CosineExpanded:
detail::cosine_expanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::HellingerExpanded:
detail::hellinger_expanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::DiceExpanded:
detail::dice_expanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::CorrelationExpanded:
detail::correlation_expanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::RusselRaoExpanded:
detail::russelrao_expanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::HammingUnexpanded:
detail::hamming_unexpanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::JensenShannon:
detail::jensen_shannon_unexpanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
case raft::distance::DistanceType::KLDivergence:
detail::kl_divergence_unexpanded_distances_t<value_idx, value_t>(input_config).compute(out);
break;
default: THROW("Unsupported distance: %d", metric);
}
}
/**
* @defgroup sparse_distance Sparse Pairwise Distance
* @{
*/
/**
* @brief Compute pairwise distances between x and y, using the provided
* input configuration and distance function.
*
* @code{.cpp}
* #include <raft/core/device_resources.hpp>
* #include <raft/core/device_csr_matrix.hpp>
* #include <raft/core/device_mdspan.hpp>
*
* int x_n_rows = 100000;
* int y_n_rows = 50000;
* int n_cols = 10000;
*
* raft::device_resources handle;
* auto x = raft::make_device_csr_matrix<float>(handle, x_n_rows, n_cols);
* auto y = raft::make_device_csr_matrix<float>(handle, y_n_rows, n_cols);
*
* ...
* // populate data
* ...
*
* auto out = raft::make_device_matrix<float>(handle, x_nrows, y_nrows);
* auto metric = raft::distance::DistanceType::L2Expanded;
* raft::sparse::distance::pairwise_distance(handle, x.view(), y.view(), out, metric);
* @endcode
*
* @tparam DeviceCSRMatrix raft::device_csr_matrix or raft::device_csr_matrix_view
* @tparam ElementType data-type of inputs and output
* @tparam IndexType data-type for indexing
*
* @param[in] handle raft::resources
* @param[in] x raft::device_csr_matrix_view
* @param[in] y raft::device_csr_matrix_view
* @param[out] dist raft::device_matrix_view dense matrix
* @param[in] metric distance metric to use
* @param[in] metric_arg metric argument (used for Minkowski distance)
*/
template <typename DeviceCSRMatrix,
typename ElementType,
typename IndexType,
typename = std::enable_if_t<raft::is_device_csr_matrix_view_v<DeviceCSRMatrix>>>
void pairwise_distance(raft::resources const& handle,
DeviceCSRMatrix x,
DeviceCSRMatrix y,
raft::device_matrix_view<ElementType, IndexType, raft::row_major> dist,
raft::distance::DistanceType metric,
float metric_arg = 2.0f)
{
auto x_structure = x.structure_view();
auto y_structure = y.structure_view();
RAFT_EXPECTS(x_structure.get_n_cols() == y_structure.get_n_cols(),
"Number of columns must be equal");
RAFT_EXPECTS(dist.extent(0) == x_structure.get_n_rows(),
"Number of rows in output must be equal to "
"number of rows in X");
RAFT_EXPECTS(dist.extent(1) == y_structure.get_n_rows(),
"Number of columns in output must be equal to "
"number of rows in Y");
detail::distances_config_t<IndexType, ElementType> input_config(handle);
input_config.a_nrows = x_structure.get_n_rows();
input_config.a_ncols = x_structure.get_n_cols();
input_config.a_nnz = x_structure.get_nnz();
input_config.a_indptr = const_cast<IndexType*>(x_structure.get_indptr().data());
input_config.a_indices = const_cast<IndexType*>(x_structure.get_indices().data());
input_config.a_data = const_cast<ElementType*>(x.get_elements().data());
input_config.b_nrows = y_structure.get_n_rows();
input_config.b_ncols = y_structure.get_n_cols();
input_config.b_nnz = y_structure.get_nnz();
input_config.b_indptr = const_cast<IndexType*>(y_structure.get_indptr().data());
input_config.b_indices = const_cast<IndexType*>(y_structure.get_indices().data());
input_config.b_data = const_cast<ElementType*>(y.get_elements().data());
pairwiseDistance(dist.data_handle(), input_config, metric, metric_arg);
}
/** @} */ // end of sparse_distance
}; // namespace distance
}; // namespace sparse
}; // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/utils.cuh | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
/**
* Computes the maximum number of columns that can be stored
* in shared memory in dense form with the given block size
* and precision.
* @return the maximum number of columns that can be stored in smem
*/
template <typename value_idx, typename value_t, int tpb = 1024>
inline int max_cols_per_block()
{
// max cols = (total smem available - cub reduction smem)
return (raft::getSharedMemPerBlock() - ((tpb / raft::warp_size()) * sizeof(value_t))) /
sizeof(value_t);
}
} // namespace detail
} // namespace distance
} // namespace sparse
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/l2_distance.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/spatial/knn/knn.cuh>
#include "common.hpp"
#include <raft/distance/distance_types.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/sparse/csr.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/sparse/detail/utils.h>
#include <raft/sparse/distance/detail/ip_distance.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <nvfunctional>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
// @TODO: Move this into sparse prims (coo_norm)
template <typename value_idx, typename value_t>
RAFT_KERNEL compute_row_norm_kernel(value_t* out,
const value_idx* __restrict__ coo_rows,
const value_t* __restrict__ data,
value_idx nnz)
{
value_idx i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nnz) { atomicAdd(&out[coo_rows[i]], data[i] * data[i]); }
}
template <typename value_idx, typename value_t>
RAFT_KERNEL compute_row_sum_kernel(value_t* out,
const value_idx* __restrict__ coo_rows,
const value_t* __restrict__ data,
value_idx nnz)
{
value_idx i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nnz) { atomicAdd(&out[coo_rows[i]], data[i]); }
}
template <typename value_idx, typename value_t, typename expansion_f>
RAFT_KERNEL compute_euclidean_warp_kernel(value_t* __restrict__ C,
const value_t* __restrict__ Q_sq_norms,
const value_t* __restrict__ R_sq_norms,
value_idx n_rows,
value_idx n_cols,
expansion_f expansion_func)
{
std::size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
value_idx i = tid / n_cols;
value_idx j = tid % n_cols;
if (i >= n_rows || j >= n_cols) return;
value_t dot = C[(size_t)i * n_cols + j];
// e.g. Euclidean expansion func = -2.0 * dot + q_norm + r_norm
value_t val = expansion_func(dot, Q_sq_norms[i], R_sq_norms[j]);
// correct for small instabilities
C[(size_t)i * n_cols + j] = val * (fabs(val) >= 0.0001);
}
template <typename value_idx, typename value_t>
RAFT_KERNEL compute_correlation_warp_kernel(value_t* __restrict__ C,
const value_t* __restrict__ Q_sq_norms,
const value_t* __restrict__ R_sq_norms,
const value_t* __restrict__ Q_norms,
const value_t* __restrict__ R_norms,
value_idx n_rows,
value_idx n_cols,
value_idx n)
{
std::size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
value_idx i = tid / n_cols;
value_idx j = tid % n_cols;
if (i >= n_rows || j >= n_cols) return;
value_t dot = C[(size_t)i * n_cols + j];
value_t Q_l1 = Q_norms[i];
value_t R_l1 = R_norms[j];
value_t Q_l2 = Q_sq_norms[i];
value_t R_l2 = R_sq_norms[j];
value_t numer = n * dot - (Q_l1 * R_l1);
value_t Q_denom = n * Q_l2 - (Q_l1 * Q_l1);
value_t R_denom = n * R_l2 - (R_l1 * R_l1);
value_t val = 1 - (numer / raft::sqrt(Q_denom * R_denom));
// correct for small instabilities
C[(size_t)i * n_cols + j] = val * (fabs(val) >= 0.0001);
}
template <typename value_idx, typename value_t, int tpb = 256, typename expansion_f>
void compute_euclidean(value_t* C,
const value_t* Q_sq_norms,
const value_t* R_sq_norms,
value_idx n_rows,
value_idx n_cols,
cudaStream_t stream,
expansion_f expansion_func)
{
int blocks = raft::ceildiv<size_t>((size_t)n_rows * n_cols, tpb);
compute_euclidean_warp_kernel<<<blocks, tpb, 0, stream>>>(
C, Q_sq_norms, R_sq_norms, n_rows, n_cols, expansion_func);
}
template <typename value_idx, typename value_t, int tpb = 256, typename expansion_f>
void compute_l2(value_t* out,
const value_idx* Q_coo_rows,
const value_t* Q_data,
value_idx Q_nnz,
const value_idx* R_coo_rows,
const value_t* R_data,
value_idx R_nnz,
value_idx m,
value_idx n,
cudaStream_t stream,
expansion_f expansion_func)
{
rmm::device_uvector<value_t> Q_sq_norms(m, stream);
rmm::device_uvector<value_t> R_sq_norms(n, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Q_sq_norms.data(), 0, Q_sq_norms.size() * sizeof(value_t)));
RAFT_CUDA_TRY(cudaMemsetAsync(R_sq_norms.data(), 0, R_sq_norms.size() * sizeof(value_t)));
compute_row_norm_kernel<<<raft::ceildiv(Q_nnz, tpb), tpb, 0, stream>>>(
Q_sq_norms.data(), Q_coo_rows, Q_data, Q_nnz);
compute_row_norm_kernel<<<raft::ceildiv(R_nnz, tpb), tpb, 0, stream>>>(
R_sq_norms.data(), R_coo_rows, R_data, R_nnz);
compute_euclidean(out, Q_sq_norms.data(), R_sq_norms.data(), m, n, stream, expansion_func);
}
template <typename value_idx, typename value_t, int tpb = 256>
void compute_correlation(value_t* C,
const value_t* Q_sq_norms,
const value_t* R_sq_norms,
const value_t* Q_norms,
const value_t* R_norms,
value_idx n_rows,
value_idx n_cols,
value_idx n,
cudaStream_t stream)
{
int blocks = raft::ceildiv<size_t>((size_t)n_rows * n_cols, tpb);
compute_correlation_warp_kernel<<<blocks, tpb, 0, stream>>>(
C, Q_sq_norms, R_sq_norms, Q_norms, R_norms, n_rows, n_cols, n);
}
template <typename value_idx, typename value_t, int tpb = 256>
void compute_corr(value_t* out,
const value_idx* Q_coo_rows,
const value_t* Q_data,
value_idx Q_nnz,
const value_idx* R_coo_rows,
const value_t* R_data,
value_idx R_nnz,
value_idx m,
value_idx n,
value_idx n_cols,
cudaStream_t stream)
{
// sum_sq for std dev
rmm::device_uvector<value_t> Q_sq_norms(m, stream);
rmm::device_uvector<value_t> R_sq_norms(n, stream);
// sum for mean
rmm::device_uvector<value_t> Q_norms(m, stream);
rmm::device_uvector<value_t> R_norms(n, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Q_sq_norms.data(), 0, Q_sq_norms.size() * sizeof(value_t)));
RAFT_CUDA_TRY(cudaMemsetAsync(R_sq_norms.data(), 0, R_sq_norms.size() * sizeof(value_t)));
RAFT_CUDA_TRY(cudaMemsetAsync(Q_norms.data(), 0, Q_norms.size() * sizeof(value_t)));
RAFT_CUDA_TRY(cudaMemsetAsync(R_norms.data(), 0, R_norms.size() * sizeof(value_t)));
compute_row_norm_kernel<<<raft::ceildiv(Q_nnz, tpb), tpb, 0, stream>>>(
Q_sq_norms.data(), Q_coo_rows, Q_data, Q_nnz);
compute_row_norm_kernel<<<raft::ceildiv(R_nnz, tpb), tpb, 0, stream>>>(
R_sq_norms.data(), R_coo_rows, R_data, R_nnz);
compute_row_sum_kernel<<<raft::ceildiv(Q_nnz, tpb), tpb, 0, stream>>>(
Q_norms.data(), Q_coo_rows, Q_data, Q_nnz);
compute_row_sum_kernel<<<raft::ceildiv(R_nnz, tpb), tpb, 0, stream>>>(
R_norms.data(), R_coo_rows, R_data, R_nnz);
compute_correlation(out,
Q_sq_norms.data(),
R_sq_norms.data(),
Q_norms.data(),
R_norms.data(),
m,
n,
n_cols,
stream);
}
/**
* L2 distance using the expanded form: sum(x_k)^2 + sum(y_k)^2 - 2 * sum(x_k * y_k)
* The expanded form is more efficient for sparse data.
*/
template <typename value_idx = int, typename value_t = float>
class l2_expanded_distances_t : public distances_t<value_t> {
public:
explicit l2_expanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config), ip_dists(config)
{
}
void compute(value_t* out_dists)
{
ip_dists.compute(out_dists);
value_idx* b_indices = ip_dists.b_rows_coo();
value_t* b_data = ip_dists.b_data_coo();
rmm::device_uvector<value_idx> search_coo_rows(config_->a_nnz,
resource::get_cuda_stream(config_->handle));
raft::sparse::convert::csr_to_coo(config_->a_indptr,
config_->a_nrows,
search_coo_rows.data(),
config_->a_nnz,
resource::get_cuda_stream(config_->handle));
compute_l2(out_dists,
search_coo_rows.data(),
config_->a_data,
config_->a_nnz,
b_indices,
b_data,
config_->b_nnz,
config_->a_nrows,
config_->b_nrows,
resource::get_cuda_stream(config_->handle),
[] __device__ __host__(value_t dot, value_t q_norm, value_t r_norm) {
return -2 * dot + q_norm + r_norm;
});
}
~l2_expanded_distances_t() = default;
protected:
const distances_config_t<value_idx, value_t>* config_;
ip_distances_t<value_idx, value_t> ip_dists;
};
/**
* L2 sqrt distance performing the sqrt operation after the distance computation
* The expanded form is more efficient for sparse data.
*/
template <typename value_idx = int, typename value_t = float>
class l2_sqrt_expanded_distances_t : public l2_expanded_distances_t<value_idx, value_t> {
public:
explicit l2_sqrt_expanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: l2_expanded_distances_t<value_idx, value_t>(config)
{
}
void compute(value_t* out_dists) override
{
l2_expanded_distances_t<value_idx, value_t>::compute(out_dists);
// Sqrt Post-processing
raft::linalg::unaryOp<value_t>(
out_dists,
out_dists,
this->config_->a_nrows * this->config_->b_nrows,
[] __device__(value_t input) {
int neg = input < 0 ? -1 : 1;
return raft::sqrt(abs(input) * neg);
},
resource::get_cuda_stream(this->config_->handle));
}
~l2_sqrt_expanded_distances_t() = default;
};
template <typename value_idx, typename value_t>
class correlation_expanded_distances_t : public distances_t<value_t> {
public:
explicit correlation_expanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config), ip_dists(config)
{
}
void compute(value_t* out_dists)
{
ip_dists.compute(out_dists);
value_idx* b_indices = ip_dists.b_rows_coo();
value_t* b_data = ip_dists.b_data_coo();
rmm::device_uvector<value_idx> search_coo_rows(config_->a_nnz,
resource::get_cuda_stream(config_->handle));
raft::sparse::convert::csr_to_coo(config_->a_indptr,
config_->a_nrows,
search_coo_rows.data(),
config_->a_nnz,
resource::get_cuda_stream(config_->handle));
compute_corr(out_dists,
search_coo_rows.data(),
config_->a_data,
config_->a_nnz,
b_indices,
b_data,
config_->b_nnz,
config_->a_nrows,
config_->b_nrows,
config_->b_ncols,
resource::get_cuda_stream(config_->handle));
}
~correlation_expanded_distances_t() = default;
protected:
const distances_config_t<value_idx, value_t>* config_;
ip_distances_t<value_idx, value_t> ip_dists;
};
/**
* Cosine distance using the expanded form: 1 - ( sum(x_k * y_k) / (sqrt(sum(x_k)^2) *
* sqrt(sum(y_k)^2))) The expanded form is more efficient for sparse data.
*/
template <typename value_idx = int, typename value_t = float>
class cosine_expanded_distances_t : public distances_t<value_t> {
public:
explicit cosine_expanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config), workspace(0, resource::get_cuda_stream(config.handle)), ip_dists(config)
{
}
void compute(value_t* out_dists)
{
ip_dists.compute(out_dists);
value_idx* b_indices = ip_dists.b_rows_coo();
value_t* b_data = ip_dists.b_data_coo();
rmm::device_uvector<value_idx> search_coo_rows(config_->a_nnz,
resource::get_cuda_stream(config_->handle));
raft::sparse::convert::csr_to_coo(config_->a_indptr,
config_->a_nrows,
search_coo_rows.data(),
config_->a_nnz,
resource::get_cuda_stream(config_->handle));
compute_l2(out_dists,
search_coo_rows.data(),
config_->a_data,
config_->a_nnz,
b_indices,
b_data,
config_->b_nnz,
config_->a_nrows,
config_->b_nrows,
resource::get_cuda_stream(config_->handle),
[] __device__ __host__(value_t dot, value_t q_norm, value_t r_norm) {
value_t norms = raft::sqrt(q_norm) * raft::sqrt(r_norm);
// deal with potential for 0 in denominator by forcing 0/1 instead
value_t cos = ((norms != 0) * dot) / ((norms == 0) + norms);
// flip the similarity when both rows are 0
bool both_empty = (q_norm == 0) && (r_norm == 0);
return 1 - ((!both_empty * cos) + both_empty);
});
}
~cosine_expanded_distances_t() = default;
private:
const distances_config_t<value_idx, value_t>* config_;
rmm::device_uvector<char> workspace;
ip_distances_t<value_idx, value_t> ip_dists;
};
/**
* Hellinger distance using the expanded form: sqrt(1 - sum(sqrt(x_k) * sqrt(y_k)))
* The expanded form is more efficient for sparse data.
*
* This distance computation modifies A and B by computing a sqrt
* and then performing a `pow(x, 2)` to convert it back. Because of this,
* it is possible that the values in A and B might differ slightly
* after this is invoked.
*/
template <typename value_idx = int, typename value_t = float>
class hellinger_expanded_distances_t : public distances_t<value_t> {
public:
explicit hellinger_expanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config), workspace(0, resource::get_cuda_stream(config.handle))
{
}
void compute(value_t* out_dists)
{
rmm::device_uvector<value_idx> coo_rows(std::max(config_->b_nnz, config_->a_nnz),
resource::get_cuda_stream(config_->handle));
raft::sparse::convert::csr_to_coo(config_->b_indptr,
config_->b_nrows,
coo_rows.data(),
config_->b_nnz,
resource::get_cuda_stream(config_->handle));
balanced_coo_pairwise_generalized_spmv<value_idx, value_t>(
out_dists,
*config_,
coo_rows.data(),
[] __device__(value_t a, value_t b) { return raft::sqrt(a) * raft::sqrt(b); },
raft::add_op(),
raft::atomic_add_op());
raft::linalg::unaryOp<value_t>(
out_dists,
out_dists,
config_->a_nrows * config_->b_nrows,
[=] __device__(value_t input) {
// Adjust to replace NaN in sqrt with 0 if input to sqrt is negative
bool rectifier = (1 - input) > 0;
return raft::sqrt(rectifier * (1 - input));
},
resource::get_cuda_stream(config_->handle));
}
~hellinger_expanded_distances_t() = default;
private:
const distances_config_t<value_idx, value_t>* config_;
rmm::device_uvector<char> workspace;
};
template <typename value_idx = int, typename value_t = float>
class russelrao_expanded_distances_t : public distances_t<value_t> {
public:
explicit russelrao_expanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config), workspace(0, resource::get_cuda_stream(config.handle)), ip_dists(config)
{
}
void compute(value_t* out_dists)
{
ip_dists.compute(out_dists);
value_t n_cols = config_->a_ncols;
value_t n_cols_inv = 1.0 / n_cols;
raft::linalg::unaryOp<value_t>(
out_dists,
out_dists,
config_->a_nrows * config_->b_nrows,
[=] __device__(value_t input) { return (n_cols - input) * n_cols_inv; },
resource::get_cuda_stream(config_->handle));
auto exec_policy = rmm::exec_policy(resource::get_cuda_stream(config_->handle));
auto diags = thrust::counting_iterator<value_idx>(0);
value_idx b_nrows = config_->b_nrows;
thrust::for_each(exec_policy, diags, diags + config_->a_nrows, [=] __device__(value_idx input) {
out_dists[input * b_nrows + input] = 0.0;
});
}
~russelrao_expanded_distances_t() = default;
private:
const distances_config_t<value_idx, value_t>* config_;
rmm::device_uvector<char> workspace;
ip_distances_t<value_idx, value_t> ip_dists;
};
}; // END namespace detail
}; // END namespace distance
}; // END namespace sparse
}; // END namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/coo_spmv_kernel.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <cub/block/block_load.cuh>
#include <cub/block/block_radix_sort.cuh>
#include <cub/block/block_store.cuh>
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
/**
* Load-balanced sparse-matrix-sparse-matrix multiplication (SPMM) kernel with
* sparse-matrix-sparse-vector multiplication layout (SPMV).
* This is intended to be scheduled n_chunks_b times for each row of a.
* The steps are as follows:
*
* 1. Load row from A into dense vector in shared memory.
* This can be further chunked in the future if necessary to support larger
* column sizes.
* 2. Threads of block all step through chunks of B in parallel.
* When a new row is encountered in row_indices_b, a segmented
* reduction is performed across the warps and then across the
* block and the final value written out to host memory.
*
* Reference: https://www.icl.utk.edu/files/publications/2020/icl-utk-1421-2020.pdf
*
* @tparam value_idx index type
* @tparam value_t value type
* @tparam tpb threads per block configured on launch
* @tparam rev if this is true, the reduce/accumulate functions are only
* executed when A[col] == 0.0. when executed before/after !rev
* and A & B are reversed, this allows the full symmetric difference
* and intersection to be computed.
* @tparam kv_t data type stored in shared mem cache
* @tparam product_f reduce function type (semiring product() function).
* accepts two arguments of value_t and returns a value_t
* @tparam accum_f accumulation function type (semiring sum() function).
* accepts two arguments of value_t and returns a value_t
* @tparam write_f function to write value out. this should be mathematically
* equivalent to the accumulate function but implemented as
* an atomic operation on global memory. Accepts two arguments
* of value_t* and value_t and updates the value given by the
* pointer.
* @param[in] indptrA column pointer array for A
* @param[in] indicesA column indices array for A
* @param[in] dataA data array for A
* @param[in] rowsB coo row array for B
* @param[in] indicesB column indices array for B
* @param[in] dataB data array for B
* @param[in] m number of rows in A
* @param[in] n number of rows in B
* @param[in] dim number of features
* @param[in] nnz_b number of nonzeros in B
* @param[out] out array of size m*n
* @param[in] n_blocks_per_row number of blocks of B per row of A
* @param[in] chunk_size number of nnz for B to use for each row of A
* @param[in] buffer_size amount of smem to use for each row of A
* @param[in] product_func semiring product() function
* @param[in] accum_func semiring sum() function
* @param[in] write_func atomic semiring sum() function
*/
template <typename strategy_t,
typename indptr_it,
typename value_idx,
typename value_t,
bool rev,
int tpb,
typename product_f,
typename accum_f,
typename write_f>
RAFT_KERNEL balanced_coo_generalized_spmv_kernel(strategy_t strategy,
indptr_it indptrA,
value_idx* indicesA,
value_t* dataA,
value_idx nnz_a,
value_idx* rowsB,
value_idx* indicesB,
value_t* dataB,
value_idx m,
value_idx n,
int dim,
value_idx nnz_b,
value_t* out,
int n_blocks_per_row,
int chunk_size,
value_idx b_ncols,
product_f product_func,
accum_f accum_func,
write_f write_func)
{
typedef cub::WarpReduce<value_t> warp_reduce;
value_idx cur_row_a = indptrA.get_row_idx(n_blocks_per_row);
value_idx cur_chunk_offset = blockIdx.x % n_blocks_per_row;
// chunk starting offset
value_idx ind_offset = cur_chunk_offset * chunk_size * tpb;
// how many total cols will be processed by this block (should be <= chunk_size * n_threads)
value_idx active_chunk_size = min(chunk_size * tpb, nnz_b - ind_offset);
int tid = threadIdx.x;
int warp_id = tid / raft::warp_size();
// compute id relative to current warp
unsigned int lane_id = tid & (raft::warp_size() - 1);
value_idx ind = ind_offset + threadIdx.x;
extern __shared__ char smem[];
typename strategy_t::smem_type A = (typename strategy_t::smem_type)(smem);
typename warp_reduce::TempStorage* temp_storage = (typename warp_reduce::TempStorage*)(A + dim);
auto inserter = strategy.init_insert(A, dim);
__syncthreads();
value_idx start_offset_a, stop_offset_a;
bool first_a_chunk, last_a_chunk;
indptrA.get_row_offsets(
cur_row_a, start_offset_a, stop_offset_a, n_blocks_per_row, first_a_chunk, last_a_chunk);
// Convert current row vector in A to dense
for (int i = tid; i <= (stop_offset_a - start_offset_a); i += blockDim.x) {
strategy.insert(inserter, indicesA[start_offset_a + i], dataA[start_offset_a + i]);
}
__syncthreads();
auto finder = strategy.init_find(A, dim);
if (cur_row_a > m || cur_chunk_offset > n_blocks_per_row) return;
if (ind >= nnz_b) return;
value_idx start_index_a = 0, stop_index_a = b_ncols - 1;
indptrA.get_indices_boundary(indicesA,
cur_row_a,
start_offset_a,
stop_offset_a,
start_index_a,
stop_index_a,
first_a_chunk,
last_a_chunk);
value_idx cur_row_b = -1;
value_t c = 0.0;
auto warp_red = warp_reduce(*(temp_storage + warp_id));
if (tid < active_chunk_size) {
cur_row_b = rowsB[ind];
auto index_b = indicesB[ind];
auto in_bounds = indptrA.check_indices_bounds(start_index_a, stop_index_a, index_b);
if (in_bounds) {
value_t a_col = strategy.find(finder, index_b);
if (!rev || a_col == 0.0) { c = product_func(a_col, dataB[ind]); }
}
}
// loop through chunks in parallel, reducing when a new row is
// encountered by each thread
for (int i = tid; i < active_chunk_size; i += blockDim.x) {
value_idx ind_next = ind + blockDim.x;
value_idx next_row_b = -1;
if (i + blockDim.x < active_chunk_size) next_row_b = rowsB[ind_next];
bool diff_rows = next_row_b != cur_row_b;
if (__any_sync(0xffffffff, diff_rows)) {
// grab the threads currently participating in loops.
// because any other threads should have returned already.
unsigned int peer_group = __match_any_sync(0xffffffff, cur_row_b);
bool is_leader = get_lowest_peer(peer_group) == lane_id;
value_t v = warp_red.HeadSegmentedReduce(c, is_leader, accum_func);
// thread with lowest lane id among peers writes out
if (is_leader && v != 0.0) {
// this conditional should be uniform, since rev is constant
size_t idx = !rev ? (size_t)cur_row_a * n + cur_row_b : (size_t)cur_row_b * m + cur_row_a;
write_func(out + idx, v);
}
c = 0.0;
}
if (next_row_b != -1) {
ind = ind_next;
auto index_b = indicesB[ind];
auto in_bounds = indptrA.check_indices_bounds(start_index_a, stop_index_a, index_b);
if (in_bounds) {
value_t a_col = strategy.find(finder, index_b);
if (!rev || a_col == 0.0) { c = accum_func(c, product_func(a_col, dataB[ind])); }
}
cur_row_b = next_row_b;
}
}
}
} // namespace detail
} // namespace distance
} // namespace sparse
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/bin_distance.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <limits.h>
#include <raft/core/resource/cuda_stream.hpp>
#include "common.hpp"
#include <raft/distance/distance_types.hpp>
#include <raft/sparse/detail/utils.h>
#include <raft/sparse/distance/detail/ip_distance.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <nvfunctional>
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
// @TODO: Move this into sparse prims (coo_norm)
template <typename value_idx, typename value_t>
RAFT_KERNEL compute_binary_row_norm_kernel(value_t* out,
const value_idx* __restrict__ coo_rows,
const value_t* __restrict__ data,
value_idx nnz)
{
value_idx i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nnz) {
// We do conditional here only because it's
// possible there could be some stray zeros in
// the sparse structure and removing them would be
// more expensive.
atomicAdd(&out[coo_rows[i]], data[i] == 1.0);
}
}
template <typename value_idx, typename value_t, typename expansion_f>
RAFT_KERNEL compute_binary_warp_kernel(value_t* __restrict__ C,
const value_t* __restrict__ Q_norms,
const value_t* __restrict__ R_norms,
value_idx n_rows,
value_idx n_cols,
expansion_f expansion_func)
{
std::size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
value_idx i = tid / n_cols;
value_idx j = tid % n_cols;
if (i >= n_rows || j >= n_cols) return;
value_t q_norm = Q_norms[i];
value_t r_norm = R_norms[j];
value_t dot = C[(size_t)i * n_cols + j];
C[(size_t)i * n_cols + j] = expansion_func(dot, q_norm, r_norm);
}
template <typename value_idx, typename value_t, typename expansion_f, int tpb = 1024>
void compute_binary(value_t* C,
const value_t* Q_norms,
const value_t* R_norms,
value_idx n_rows,
value_idx n_cols,
expansion_f expansion_func,
cudaStream_t stream)
{
int blocks = raft::ceildiv<size_t>((size_t)n_rows * n_cols, tpb);
compute_binary_warp_kernel<<<blocks, tpb, 0, stream>>>(
C, Q_norms, R_norms, n_rows, n_cols, expansion_func);
}
template <typename value_idx, typename value_t, typename expansion_f, int tpb = 1024>
void compute_bin_distance(value_t* out,
const value_idx* Q_coo_rows,
const value_t* Q_data,
value_idx Q_nnz,
const value_idx* R_coo_rows,
const value_t* R_data,
value_idx R_nnz,
value_idx m,
value_idx n,
cudaStream_t stream,
expansion_f expansion_func)
{
rmm::device_uvector<value_t> Q_norms(m, stream);
rmm::device_uvector<value_t> R_norms(n, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Q_norms.data(), 0, Q_norms.size() * sizeof(value_t)));
RAFT_CUDA_TRY(cudaMemsetAsync(R_norms.data(), 0, R_norms.size() * sizeof(value_t)));
compute_binary_row_norm_kernel<<<raft::ceildiv(Q_nnz, tpb), tpb, 0, stream>>>(
Q_norms.data(), Q_coo_rows, Q_data, Q_nnz);
compute_binary_row_norm_kernel<<<raft::ceildiv(R_nnz, tpb), tpb, 0, stream>>>(
R_norms.data(), R_coo_rows, R_data, R_nnz);
compute_binary(out, Q_norms.data(), R_norms.data(), m, n, expansion_func, stream);
}
/**
* Jaccard distance using the expanded form:
* 1 - (sum(x_k * y_k) / ((sum(x_k) + sum(y_k)) - sum(x_k * y_k))
*/
template <typename value_idx = int, typename value_t = float>
class jaccard_expanded_distances_t : public distances_t<value_t> {
public:
explicit jaccard_expanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config), workspace(0, resource::get_cuda_stream(config.handle)), ip_dists(config)
{
}
void compute(value_t* out_dists)
{
ip_dists.compute(out_dists);
value_idx* b_indices = ip_dists.b_rows_coo();
value_t* b_data = ip_dists.b_data_coo();
rmm::device_uvector<value_idx> search_coo_rows(config_->a_nnz,
resource::get_cuda_stream(config_->handle));
raft::sparse::convert::csr_to_coo(config_->a_indptr,
config_->a_nrows,
search_coo_rows.data(),
config_->a_nnz,
resource::get_cuda_stream(config_->handle));
compute_bin_distance(out_dists,
search_coo_rows.data(),
config_->a_data,
config_->a_nnz,
b_indices,
b_data,
config_->b_nnz,
config_->a_nrows,
config_->b_nrows,
resource::get_cuda_stream(config_->handle),
[] __device__ __host__(value_t dot, value_t q_norm, value_t r_norm) {
value_t q_r_union = q_norm + r_norm;
value_t denom = q_r_union - dot;
value_t jacc = ((denom != 0) * dot) / ((denom == 0) + denom);
// flip the similarity when both rows are 0
bool both_empty = q_r_union == 0;
return 1 - ((!both_empty * jacc) + both_empty);
});
}
~jaccard_expanded_distances_t() = default;
private:
const distances_config_t<value_idx, value_t>* config_;
rmm::device_uvector<char> workspace;
ip_distances_t<value_idx, value_t> ip_dists;
};
/**
* Dice distance using the expanded form:
* 1 - ((2 * sum(x_k * y_k)) / (sum(x_k) + sum(y_k)))
*/
template <typename value_idx = int, typename value_t = float>
class dice_expanded_distances_t : public distances_t<value_t> {
public:
explicit dice_expanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config), workspace(0, resource::get_cuda_stream(config.handle)), ip_dists(config)
{
}
void compute(value_t* out_dists)
{
ip_dists.compute(out_dists);
value_idx* b_indices = ip_dists.b_rows_coo();
value_t* b_data = ip_dists.b_data_coo();
rmm::device_uvector<value_idx> search_coo_rows(config_->a_nnz,
resource::get_cuda_stream(config_->handle));
raft::sparse::convert::csr_to_coo(config_->a_indptr,
config_->a_nrows,
search_coo_rows.data(),
config_->a_nnz,
resource::get_cuda_stream(config_->handle));
compute_bin_distance(out_dists,
search_coo_rows.data(),
config_->a_data,
config_->a_nnz,
b_indices,
b_data,
config_->b_nnz,
config_->a_nrows,
config_->b_nrows,
resource::get_cuda_stream(config_->handle),
[] __device__ __host__(value_t dot, value_t q_norm, value_t r_norm) {
value_t q_r_union = q_norm + r_norm;
value_t dice = (2 * dot) / q_r_union;
bool both_empty = q_r_union == 0;
return 1 - ((!both_empty * dice) + both_empty);
});
}
~dice_expanded_distances_t() = default;
private:
const distances_config_t<value_idx, value_t>* config_;
rmm::device_uvector<char> workspace;
ip_distances_t<value_idx, value_t> ip_dists;
};
} // END namespace detail
}; // END namespace distance
}; // END namespace sparse
}; // END namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/coo_spmv.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "coo_spmv_strategies/dense_smem_strategy.cuh"
#include "coo_spmv_strategies/hash_strategy.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include "../../csr.hpp"
#include "../../detail/utils.h"
#include "common.hpp"
#include <limits.h>
#include <nvfunctional>
#include <cusparse_v2.h>
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
template <typename value_idx,
typename value_t,
int threads_per_block = 1024,
typename product_f,
typename accum_f,
typename write_f,
typename strategy_t>
inline void balanced_coo_pairwise_generalized_spmv(
value_t* out_dists,
const distances_config_t<value_idx, value_t>& config_,
value_idx* coo_rows_b,
product_f product_func,
accum_f accum_func,
write_f write_func,
strategy_t strategy,
int chunk_size = 500000)
{
uint64_t n = (uint64_t)sizeof(value_t) * (uint64_t)config_.a_nrows * (uint64_t)config_.b_nrows;
RAFT_CUDA_TRY(cudaMemsetAsync(out_dists, 0, n, resource::get_cuda_stream(config_.handle)));
strategy.dispatch(out_dists, coo_rows_b, product_func, accum_func, write_func, chunk_size);
};
/**
* Performs generalized sparse-matrix-sparse-matrix multiplication via a
* sparse-matrix-sparse-vector layout `out=A*B` where generalized product()
* and sum() operations can be used in place of the standard sum and product:
*
* out_ij = sum_k(product(A_ik, B_ik)) The sum goes through values of
* k=0..n_cols-1 where B_kj is nonzero.
*
* The product and sum operations shall form a semiring algebra with the
* following properties:
* 1. {+, 0} is a commutative sum reduction monoid with identity element 0
* 2. {*, 1} is a product monoid with identity element 1
* 3. Multiplication by 0 annihilates x. e.g. product(x, 0) = 0
*
* Each vector of A is loaded into shared memory in dense form and the
* non-zeros of B load balanced across the threads of each block.
* @tparam value_idx index type
* @tparam value_t value type
* @tparam threads_per_block block size
* @tparam product_f semiring product() function
* @tparam accum_f semiring sum() function
* @tparam write_f atomic semiring sum() function
* @param[out] out_dists dense array of out distances of size m * n in row-major
* format.
* @param[in] config_ distance config object
* @param[in] coo_rows_b coo row array for B
* @param[in] product_func semiring product() function
* @param[in] accum_func semiring sum() function
* @param[in] write_func atomic semiring sum() function
* @param[in] chunk_size number of nonzeros of B to process for each row of A
* this value was found through profiling and represents a reasonable
* setting for both large and small densities
*/
template <typename value_idx,
typename value_t,
int threads_per_block = 1024,
typename product_f,
typename accum_f,
typename write_f>
inline void balanced_coo_pairwise_generalized_spmv(
value_t* out_dists,
const distances_config_t<value_idx, value_t>& config_,
value_idx* coo_rows_b,
product_f product_func,
accum_f accum_func,
write_f write_func,
int chunk_size = 500000)
{
uint64_t n = (uint64_t)sizeof(value_t) * (uint64_t)config_.a_nrows * (uint64_t)config_.b_nrows;
RAFT_CUDA_TRY(cudaMemsetAsync(out_dists, 0, n, resource::get_cuda_stream(config_.handle)));
int max_cols = max_cols_per_block<value_idx, value_t>();
if (max_cols > config_.a_ncols) {
dense_smem_strategy<value_idx, value_t, threads_per_block> strategy(config_);
strategy.dispatch(out_dists, coo_rows_b, product_func, accum_func, write_func, chunk_size);
} else {
hash_strategy<value_idx, value_t, threads_per_block> strategy(config_);
strategy.dispatch(out_dists, coo_rows_b, product_func, accum_func, write_func, chunk_size);
}
};
template <typename value_idx,
typename value_t,
int threads_per_block = 1024,
typename product_f,
typename accum_f,
typename write_f,
typename strategy_t>
inline void balanced_coo_pairwise_generalized_spmv_rev(
value_t* out_dists,
const distances_config_t<value_idx, value_t>& config_,
value_idx* coo_rows_a,
product_f product_func,
accum_f accum_func,
write_f write_func,
strategy_t strategy,
int chunk_size = 500000)
{
strategy.dispatch_rev(out_dists, coo_rows_a, product_func, accum_func, write_func, chunk_size);
};
/**
* Used for computing distances where the reduction (e.g. product()) function
* requires an implicit union (product(x, 0) = x) to capture the difference A-B.
* This is necessary in some applications because the standard semiring algebra
* endowed with the default multiplication product monoid will only
* compute the intersection & B-A.
*
* This particular function is meant to accompany the function
* `balanced_coo_pairwise_generalized_spmv` and executes the product operation
* on only those columns that exist in B and not A.
*
* The product and sum operations shall enable the computation of a
* non-annihilating semiring algebra with the following properties:
* 1. {+, 0} is a commutative sum reduction monoid with identity element 0
* 2. {*, 0} is a product monoid with identity element 0
* 3. Multiplication by 0 does not annihilate x. e.g. product(x, 0) = x
*
* Manattan distance sum(abs(x_k-y_k)) is a great example of when this type of
* execution pattern is necessary.
*
* @tparam value_idx index type
* @tparam value_t value type
* @tparam threads_per_block block size
* @tparam product_f semiring product() function
* @tparam accum_f semiring sum() function
* @tparam write_f atomic semiring sum() function
* @param[out] out_dists dense array of out distances of size m * n
* @param[in] config_ distance config object
* @param[in] coo_rows_a coo row array for A
* @param[in] product_func semiring product() function
* @param[in] accum_func semiring sum() function
* @param[in] write_func atomic semiring sum() function
* @param[in] chunk_size number of nonzeros of B to process for each row of A
* this value was found through profiling and represents a reasonable
* setting for both large and small densities
*/
template <typename value_idx,
typename value_t,
int threads_per_block = 1024,
typename product_f,
typename accum_f,
typename write_f>
inline void balanced_coo_pairwise_generalized_spmv_rev(
value_t* out_dists,
const distances_config_t<value_idx, value_t>& config_,
value_idx* coo_rows_a,
product_f product_func,
accum_f accum_func,
write_f write_func,
int chunk_size = 500000)
{
// try dense first
int max_cols = max_cols_per_block<value_idx, value_t>();
if (max_cols > config_.b_ncols) {
dense_smem_strategy<value_idx, value_t, threads_per_block> strategy(config_);
strategy.dispatch_rev(out_dists, coo_rows_a, product_func, accum_func, write_func, chunk_size);
} else {
hash_strategy<value_idx, value_t, threads_per_block> strategy(config_);
strategy.dispatch_rev(out_dists, coo_rows_a, product_func, accum_func, write_func, chunk_size);
}
};
} // namespace detail
} // namespace distance
} // namespace sparse
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/common.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resources.hpp>
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
template <typename value_idx, typename value_t>
struct distances_config_t {
distances_config_t(raft::resources const& handle_) : handle(handle_) {}
// left side
value_idx a_nrows;
value_idx a_ncols;
value_idx a_nnz;
value_idx* a_indptr;
value_idx* a_indices;
value_t* a_data;
// right side
value_idx b_nrows;
value_idx b_ncols;
value_idx b_nnz;
value_idx* b_indptr;
value_idx* b_indices;
value_t* b_data;
raft::resources const& handle;
};
template <typename value_t>
class distances_t {
public:
virtual void compute(value_t* out) {}
virtual ~distances_t() = default;
};
}; // namespace detail
}; // namespace distance
}; // namespace sparse
}; // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/ip_distance.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <limits.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include "common.hpp"
#include <raft/core/operators.cuh>
#include <raft/core/operators.hpp>
#include <raft/sparse/convert/coo.cuh>
#include <raft/sparse/detail/utils.h>
#include <raft/sparse/distance/detail/coo_spmv.cuh>
#include <raft/sparse/linalg/transpose.cuh>
#include <rmm/device_uvector.hpp>
#include <nvfunctional>
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
template <typename value_idx, typename value_t>
class ip_distances_t : public distances_t<value_t> {
public:
/**
* Computes simple sparse inner product distances as sum(x_y * y_k)
* @param[in] config specifies inputs, outputs, and sizes
*/
ip_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config), coo_rows_b(config.b_nnz, resource::get_cuda_stream(config.handle))
{
raft::sparse::convert::csr_to_coo(config_->b_indptr,
config_->b_nrows,
coo_rows_b.data(),
config_->b_nnz,
resource::get_cuda_stream(config_->handle));
}
/**
* Performs pairwise distance computation and computes output distances
* @param out_distances dense output matrix (size a_nrows * b_nrows)
*/
void compute(value_t* out_distances)
{
/**
* Compute pairwise distances and return dense matrix in row-major format
*/
balanced_coo_pairwise_generalized_spmv<value_idx, value_t>(out_distances,
*config_,
coo_rows_b.data(),
raft::mul_op(),
raft::add_op(),
raft::atomic_add_op());
}
value_idx* b_rows_coo() { return coo_rows_b.data(); }
value_t* b_data_coo() { return config_->b_data; }
private:
const distances_config_t<value_idx, value_t>* config_;
rmm::device_uvector<value_idx> coo_rows_b;
};
}; // END namespace detail
}; // END namespace distance
}; // END namespace sparse
}; // END namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/lp_distance.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <limits.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/operators.cuh>
#include <raft/core/operators.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <raft/sparse/csr.hpp>
#include <raft/sparse/detail/utils.h>
#include "common.hpp"
#include <raft/sparse/convert/coo.cuh>
#include <nvfunctional>
#include <algorithm>
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
template <typename value_idx = int,
typename value_t = float,
typename product_f,
typename accum_f,
typename write_f>
void unexpanded_lp_distances(value_t* out_dists,
const distances_config_t<value_idx, value_t>* config_,
product_f product_func,
accum_f accum_func,
write_f write_func)
{
rmm::device_uvector<value_idx> coo_rows(std::max(config_->b_nnz, config_->a_nnz),
resource::get_cuda_stream(config_->handle));
raft::sparse::convert::csr_to_coo(config_->b_indptr,
config_->b_nrows,
coo_rows.data(),
config_->b_nnz,
resource::get_cuda_stream(config_->handle));
balanced_coo_pairwise_generalized_spmv<value_idx, value_t>(
out_dists, *config_, coo_rows.data(), product_func, accum_func, write_func);
raft::sparse::convert::csr_to_coo(config_->a_indptr,
config_->a_nrows,
coo_rows.data(),
config_->a_nnz,
resource::get_cuda_stream(config_->handle));
balanced_coo_pairwise_generalized_spmv_rev<value_idx, value_t>(
out_dists, *config_, coo_rows.data(), product_func, accum_func, write_func);
}
/**
* Computes L1 distances for sparse input. This does not have
* an equivalent expanded form, so it is only executed in
* an unexpanded form.
* @tparam value_idx
* @tparam value_t
*/
template <typename value_idx = int, typename value_t = float>
class l1_unexpanded_distances_t : public distances_t<value_t> {
public:
l1_unexpanded_distances_t(const distances_config_t<value_idx, value_t>& config) : config_(&config)
{
}
void compute(value_t* out_dists)
{
unexpanded_lp_distances<value_idx, value_t>(
out_dists, config_, raft::absdiff_op(), raft::add_op(), raft::atomic_add_op());
}
private:
const distances_config_t<value_idx, value_t>* config_;
};
template <typename value_idx = int, typename value_t = float>
class l2_unexpanded_distances_t : public distances_t<value_t> {
public:
l2_unexpanded_distances_t(const distances_config_t<value_idx, value_t>& config) : config_(&config)
{
}
void compute(value_t* out_dists)
{
unexpanded_lp_distances<value_idx, value_t>(
out_dists, config_, raft::sqdiff_op(), raft::add_op(), raft::atomic_add_op());
}
protected:
const distances_config_t<value_idx, value_t>* config_;
};
template <typename value_idx = int, typename value_t = float>
class l2_sqrt_unexpanded_distances_t : public l2_unexpanded_distances_t<value_idx, value_t> {
public:
l2_sqrt_unexpanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: l2_unexpanded_distances_t<value_idx, value_t>(config)
{
}
void compute(value_t* out_dists)
{
l2_unexpanded_distances_t<value_idx, value_t>::compute(out_dists);
uint64_t n = (uint64_t)this->config_->a_nrows * (uint64_t)this->config_->b_nrows;
// Sqrt Post-processing
raft::linalg::unaryOp<value_t>(
out_dists,
out_dists,
n,
[] __device__(value_t input) {
int neg = input < 0 ? -1 : 1;
return raft::sqrt(abs(input) * neg);
},
resource::get_cuda_stream(this->config_->handle));
}
};
template <typename value_idx = int, typename value_t = float>
class linf_unexpanded_distances_t : public distances_t<value_t> {
public:
explicit linf_unexpanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config)
{
}
void compute(value_t* out_dists)
{
unexpanded_lp_distances<value_idx, value_t>(
out_dists, config_, raft::absdiff_op(), raft::max_op(), raft::atomic_max_op());
}
private:
const distances_config_t<value_idx, value_t>* config_;
};
template <typename value_idx = int, typename value_t = float>
class canberra_unexpanded_distances_t : public distances_t<value_t> {
public:
explicit canberra_unexpanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config)
{
}
void compute(value_t* out_dists)
{
unexpanded_lp_distances<value_idx, value_t>(
out_dists,
config_,
[] __device__(value_t a, value_t b) {
value_t d = fabs(a) + fabs(b);
// deal with potential for 0 in denominator by
// forcing 1/0 instead
return ((d != 0) * fabs(a - b)) / (d + (d == 0));
},
raft::add_op(),
raft::atomic_add_op());
}
private:
const distances_config_t<value_idx, value_t>* config_;
};
template <typename value_idx = int, typename value_t = float>
class lp_unexpanded_distances_t : public distances_t<value_t> {
public:
explicit lp_unexpanded_distances_t(const distances_config_t<value_idx, value_t>& config,
value_t p_)
: config_(&config), p(p_)
{
}
void compute(value_t* out_dists)
{
unexpanded_lp_distances<value_idx, value_t>(
out_dists,
config_,
raft::compose_op(raft::pow_const_op<value_t>(p), raft::sub_op()),
raft::add_op(),
raft::atomic_add_op());
uint64_t n = (uint64_t)this->config_->a_nrows * (uint64_t)this->config_->b_nrows;
value_t one_over_p = value_t{1} / p;
raft::linalg::unaryOp<value_t>(out_dists,
out_dists,
n,
raft::pow_const_op<value_t>(one_over_p),
resource::get_cuda_stream(config_->handle));
}
private:
const distances_config_t<value_idx, value_t>* config_;
value_t p;
};
template <typename value_idx = int, typename value_t = float>
class hamming_unexpanded_distances_t : public distances_t<value_t> {
public:
explicit hamming_unexpanded_distances_t(const distances_config_t<value_idx, value_t>& config)
: config_(&config)
{
}
void compute(value_t* out_dists)
{
unexpanded_lp_distances<value_idx, value_t>(
out_dists, config_, raft::notequal_op(), raft::add_op(), raft::atomic_add_op());
uint64_t n = (uint64_t)config_->a_nrows * (uint64_t)config_->b_nrows;
value_t n_cols = 1.0 / config_->a_ncols;
raft::linalg::unaryOp<value_t>(out_dists,
out_dists,
n,
raft::mul_const_op<value_t>(n_cols),
resource::get_cuda_stream(config_->handle));
}
private:
const distances_config_t<value_idx, value_t>* config_;
};
template <typename value_idx = int, typename value_t = float>
class jensen_shannon_unexpanded_distances_t : public distances_t<value_t> {
public:
explicit jensen_shannon_unexpanded_distances_t(
const distances_config_t<value_idx, value_t>& config)
: config_(&config)
{
}
void compute(value_t* out_dists)
{
unexpanded_lp_distances<value_idx, value_t>(
out_dists,
config_,
[] __device__(value_t a, value_t b) {
value_t m = 0.5f * (a + b);
bool a_zero = a == 0;
bool b_zero = b == 0;
value_t x = (!a_zero * m) / (a_zero + a);
value_t y = (!b_zero * m) / (b_zero + b);
bool x_zero = x == 0;
bool y_zero = y == 0;
return (-a * (!x_zero * log(x + x_zero))) + (-b * (!y_zero * log(y + y_zero)));
},
raft::add_op(),
raft::atomic_add_op());
uint64_t n = (uint64_t)this->config_->a_nrows * (uint64_t)this->config_->b_nrows;
raft::linalg::unaryOp<value_t>(
out_dists,
out_dists,
n,
[=] __device__(value_t input) { return raft::sqrt(0.5 * input); },
resource::get_cuda_stream(config_->handle));
}
private:
const distances_config_t<value_idx, value_t>* config_;
};
template <typename value_idx = int, typename value_t = float>
class kl_divergence_unexpanded_distances_t : public distances_t<value_t> {
public:
explicit kl_divergence_unexpanded_distances_t(
const distances_config_t<value_idx, value_t>& config)
: config_(&config)
{
}
void compute(value_t* out_dists)
{
rmm::device_uvector<value_idx> coo_rows(std::max(config_->b_nnz, config_->a_nnz),
resource::get_cuda_stream(config_->handle));
raft::sparse::convert::csr_to_coo(config_->b_indptr,
config_->b_nrows,
coo_rows.data(),
config_->b_nnz,
resource::get_cuda_stream(config_->handle));
balanced_coo_pairwise_generalized_spmv<value_idx, value_t>(
out_dists,
*config_,
coo_rows.data(),
[] __device__(value_t a, value_t b) { return a * log(a / b); },
raft::add_op(),
raft::atomic_add_op());
uint64_t n = (uint64_t)this->config_->a_nrows * (uint64_t)this->config_->b_nrows;
raft::linalg::unaryOp<value_t>(out_dists,
out_dists,
n,
raft::mul_const_op<value_t>(0.5),
resource::get_cuda_stream(config_->handle));
}
private:
const distances_config_t<value_idx, value_t>* config_;
};
}; // END namespace detail
}; // END namespace distance
}; // END namespace sparse
}; // END namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/coo_spmv_strategies/dense_smem_strategy.cuh | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "base_strategy.cuh"
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
template <typename value_idx, typename value_t, int tpb>
class dense_smem_strategy : public coo_spmv_strategy<value_idx, value_t, tpb> {
public:
using smem_type = value_t*;
using insert_type = smem_type;
using find_type = smem_type;
dense_smem_strategy(const distances_config_t<value_idx, value_t>& config_)
: coo_spmv_strategy<value_idx, value_t, tpb>(config_)
{
}
inline static int smem_per_block(int n_cols)
{
return (n_cols * sizeof(value_t)) + ((1024 / raft::warp_size()) * sizeof(value_t));
}
template <typename product_f, typename accum_f, typename write_f>
void dispatch(value_t* out_dists,
value_idx* coo_rows_b,
product_f product_func,
accum_f accum_func,
write_f write_func,
int chunk_size)
{
auto n_blocks_per_row = raft::ceildiv(this->config.b_nnz, chunk_size * 1024);
auto n_blocks = this->config.a_nrows * n_blocks_per_row;
mask_row_it<value_idx> a_indptr(this->config.a_indptr, this->config.a_nrows);
this->_dispatch_base(*this,
this->config.b_ncols,
a_indptr,
out_dists,
coo_rows_b,
product_func,
accum_func,
write_func,
chunk_size,
n_blocks,
n_blocks_per_row);
}
template <typename product_f, typename accum_f, typename write_f>
void dispatch_rev(value_t* out_dists,
value_idx* coo_rows_a,
product_f product_func,
accum_f accum_func,
write_f write_func,
int chunk_size)
{
auto n_blocks_per_row = raft::ceildiv(this->config.a_nnz, chunk_size * 1024);
auto n_blocks = this->config.b_nrows * n_blocks_per_row;
mask_row_it<value_idx> b_indptr(this->config.b_indptr, this->config.b_nrows);
this->_dispatch_base_rev(*this,
this->config.a_ncols,
b_indptr,
out_dists,
coo_rows_a,
product_func,
accum_func,
write_func,
chunk_size,
n_blocks,
n_blocks_per_row);
}
__device__ inline insert_type init_insert(smem_type cache, const value_idx& cache_size)
{
for (int k = threadIdx.x; k < cache_size; k += blockDim.x) {
cache[k] = 0.0;
}
return cache;
}
__device__ inline void insert(insert_type cache, const value_idx& key, const value_t& value)
{
cache[key] = value;
}
__device__ inline find_type init_find(smem_type cache, const value_idx& cache_size)
{
return cache;
}
__device__ inline value_t find(find_type cache, const value_idx& key) { return cache[key]; }
};
} // namespace detail
} // namespace distance
} // namespace sparse
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/coo_spmv_strategies/coo_mask_row_iterators.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../common.hpp"
#include "../utils.cuh"
#include <rmm/device_uvector.hpp>
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
template <typename value_idx>
class mask_row_it {
public:
mask_row_it(const value_idx* full_indptr_,
const value_idx& n_rows_,
value_idx* mask_row_idx_ = NULL)
: full_indptr(full_indptr_), mask_row_idx(mask_row_idx_), n_rows(n_rows_)
{
}
__device__ inline value_idx get_row_idx(const int& n_blocks_nnz_b)
{
if (mask_row_idx != NULL) {
return mask_row_idx[blockIdx.x / n_blocks_nnz_b];
} else {
return blockIdx.x / n_blocks_nnz_b;
}
}
__device__ inline void get_row_offsets(const value_idx& row_idx,
value_idx& start_offset,
value_idx& stop_offset,
const value_idx& n_blocks_nnz_b,
bool& first_a_chunk,
bool& last_a_chunk)
{
start_offset = full_indptr[row_idx];
stop_offset = full_indptr[row_idx + 1] - 1;
}
__device__ constexpr inline void get_indices_boundary(const value_idx* indices,
value_idx& indices_len,
value_idx& start_offset,
value_idx& stop_offset,
value_idx& start_index,
value_idx& stop_index,
bool& first_a_chunk,
bool& last_a_chunk)
{
// do nothing;
}
__device__ constexpr inline bool check_indices_bounds(value_idx& start_index_a,
value_idx& stop_index_a,
value_idx& index_b)
{
return true;
}
const value_idx *full_indptr, &n_rows;
value_idx* mask_row_idx;
};
template <typename value_idx>
RAFT_KERNEL fill_chunk_indices_kernel(value_idx* n_chunks_per_row,
value_idx* chunk_indices,
value_idx n_rows)
{
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_rows) {
auto start = n_chunks_per_row[tid];
auto end = n_chunks_per_row[tid + 1];
#pragma unroll
for (int i = start; i < end; i++) {
chunk_indices[i] = tid;
}
}
}
template <typename value_idx>
class chunked_mask_row_it : public mask_row_it<value_idx> {
public:
chunked_mask_row_it(const value_idx* full_indptr_,
const value_idx& n_rows_,
value_idx* mask_row_idx_,
int row_chunk_size_,
const value_idx* n_chunks_per_row_,
const value_idx* chunk_indices_,
const cudaStream_t stream_)
: mask_row_it<value_idx>(full_indptr_, n_rows_, mask_row_idx_),
row_chunk_size(row_chunk_size_),
n_chunks_per_row(n_chunks_per_row_),
chunk_indices(chunk_indices_),
stream(stream_)
{
}
static void init(const value_idx* indptr,
const value_idx* mask_row_idx,
const value_idx& n_rows,
const int row_chunk_size,
rmm::device_uvector<value_idx>& n_chunks_per_row,
rmm::device_uvector<value_idx>& chunk_indices,
cudaStream_t stream)
{
auto policy = rmm::exec_policy(stream);
constexpr value_idx first_element = 0;
n_chunks_per_row.set_element_async(0, first_element, stream);
n_chunks_per_row_functor chunk_functor(indptr, row_chunk_size);
thrust::transform(
policy, mask_row_idx, mask_row_idx + n_rows, n_chunks_per_row.begin() + 1, chunk_functor);
thrust::inclusive_scan(
policy, n_chunks_per_row.begin() + 1, n_chunks_per_row.end(), n_chunks_per_row.begin() + 1);
raft::update_host(&total_row_blocks, n_chunks_per_row.data() + n_rows, 1, stream);
fill_chunk_indices(n_rows, n_chunks_per_row, chunk_indices, stream);
}
__device__ inline value_idx get_row_idx(const int& n_blocks_nnz_b)
{
return this->mask_row_idx[chunk_indices[blockIdx.x / n_blocks_nnz_b]];
}
__device__ inline void get_row_offsets(const value_idx& row_idx,
value_idx& start_offset,
value_idx& stop_offset,
const int& n_blocks_nnz_b,
bool& first_a_chunk,
bool& last_a_chunk)
{
auto chunk_index = blockIdx.x / n_blocks_nnz_b;
auto chunk_val = chunk_indices[chunk_index];
auto prev_n_chunks = n_chunks_per_row[chunk_val];
auto relative_chunk = chunk_index - prev_n_chunks;
first_a_chunk = relative_chunk == 0;
start_offset = this->full_indptr[row_idx] + relative_chunk * row_chunk_size;
stop_offset = start_offset + row_chunk_size;
auto final_stop_offset = this->full_indptr[row_idx + 1];
last_a_chunk = stop_offset >= final_stop_offset;
stop_offset = last_a_chunk ? final_stop_offset - 1 : stop_offset - 1;
}
__device__ inline void get_indices_boundary(const value_idx* indices,
value_idx& row_idx,
value_idx& start_offset,
value_idx& stop_offset,
value_idx& start_index,
value_idx& stop_index,
bool& first_a_chunk,
bool& last_a_chunk)
{
start_index = first_a_chunk ? start_index : indices[start_offset - 1] + 1;
stop_index = last_a_chunk ? stop_index : indices[stop_offset];
}
__device__ inline bool check_indices_bounds(value_idx& start_index_a,
value_idx& stop_index_a,
value_idx& index_b)
{
return (index_b >= start_index_a && index_b <= stop_index_a);
}
inline static value_idx total_row_blocks = 0;
const cudaStream_t stream;
const value_idx *n_chunks_per_row, *chunk_indices;
value_idx row_chunk_size;
struct n_chunks_per_row_functor {
public:
n_chunks_per_row_functor(const value_idx* indptr_, value_idx row_chunk_size_)
: indptr(indptr_), row_chunk_size(row_chunk_size_)
{
}
__host__ __device__ value_idx operator()(const value_idx& i)
{
auto degree = indptr[i + 1] - indptr[i];
return raft::ceildiv(degree, (value_idx)row_chunk_size);
}
const value_idx* indptr;
value_idx row_chunk_size;
};
private:
static void fill_chunk_indices(const value_idx& n_rows,
rmm::device_uvector<value_idx>& n_chunks_per_row,
rmm::device_uvector<value_idx>& chunk_indices,
cudaStream_t stream)
{
auto n_threads = std::min(n_rows, 256);
auto n_blocks = raft::ceildiv(n_rows, (value_idx)n_threads);
chunk_indices.resize(total_row_blocks, stream);
fill_chunk_indices_kernel<value_idx>
<<<n_blocks, n_threads, 0, stream>>>(n_chunks_per_row.data(), chunk_indices.data(), n_rows);
}
};
} // namespace detail
} // namespace distance
} // namespace sparse
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/coo_spmv_strategies/hash_strategy.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "base_strategy.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <cuco/static_map.cuh>
#include <thrust/copy.h>
#include <thrust/iterator/counting_iterator.h>
// this is needed by cuco as key, value must be bitwise comparable.
// compilers don't declare float/double as bitwise comparable
// but that is too strict
// for example, the following is true (or 0):
// float a = 5;
// float b = 5;
// memcmp(&a, &b, sizeof(float));
CUCO_DECLARE_BITWISE_COMPARABLE(float);
CUCO_DECLARE_BITWISE_COMPARABLE(double);
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
template <typename value_idx, typename value_t, int tpb>
class hash_strategy : public coo_spmv_strategy<value_idx, value_t, tpb> {
public:
using insert_type =
typename cuco::static_map<value_idx, value_t, cuda::thread_scope_block>::device_mutable_view;
using smem_type = typename insert_type::slot_type*;
using find_type =
typename cuco::static_map<value_idx, value_t, cuda::thread_scope_block>::device_view;
hash_strategy(const distances_config_t<value_idx, value_t>& config_,
float capacity_threshold_ = 0.5,
int map_size_ = get_map_size())
: coo_spmv_strategy<value_idx, value_t, tpb>(config_),
capacity_threshold(capacity_threshold_),
map_size(map_size_)
{
}
void chunking_needed(const value_idx* indptr,
const value_idx n_rows,
rmm::device_uvector<value_idx>& mask_indptr,
std::tuple<value_idx, value_idx>& n_rows_divided,
cudaStream_t stream)
{
auto policy = resource::get_thrust_policy(this->config.handle);
auto less = thrust::copy_if(policy,
thrust::make_counting_iterator(value_idx(0)),
thrust::make_counting_iterator(n_rows),
mask_indptr.data(),
fits_in_hash_table(indptr, 0, capacity_threshold * map_size));
std::get<0>(n_rows_divided) = less - mask_indptr.data();
auto more = thrust::copy_if(
policy,
thrust::make_counting_iterator(value_idx(0)),
thrust::make_counting_iterator(n_rows),
less,
fits_in_hash_table(
indptr, capacity_threshold * map_size, std::numeric_limits<value_idx>::max()));
std::get<1>(n_rows_divided) = more - less;
}
template <typename product_f, typename accum_f, typename write_f>
void dispatch(value_t* out_dists,
value_idx* coo_rows_b,
product_f product_func,
accum_f accum_func,
write_f write_func,
int chunk_size)
{
auto n_blocks_per_row = raft::ceildiv(this->config.b_nnz, chunk_size * tpb);
rmm::device_uvector<value_idx> mask_indptr(this->config.a_nrows,
resource::get_cuda_stream(this->config.handle));
std::tuple<value_idx, value_idx> n_rows_divided;
chunking_needed(this->config.a_indptr,
this->config.a_nrows,
mask_indptr,
n_rows_divided,
resource::get_cuda_stream(this->config.handle));
auto less_rows = std::get<0>(n_rows_divided);
if (less_rows > 0) {
mask_row_it<value_idx> less(this->config.a_indptr, less_rows, mask_indptr.data());
auto n_less_blocks = less_rows * n_blocks_per_row;
this->_dispatch_base(*this,
map_size,
less,
out_dists,
coo_rows_b,
product_func,
accum_func,
write_func,
chunk_size,
n_less_blocks,
n_blocks_per_row);
}
auto more_rows = std::get<1>(n_rows_divided);
if (more_rows > 0) {
rmm::device_uvector<value_idx> n_chunks_per_row(
more_rows + 1, resource::get_cuda_stream(this->config.handle));
rmm::device_uvector<value_idx> chunk_indices(0,
resource::get_cuda_stream(this->config.handle));
chunked_mask_row_it<value_idx>::init(this->config.a_indptr,
mask_indptr.data() + less_rows,
more_rows,
capacity_threshold * map_size,
n_chunks_per_row,
chunk_indices,
resource::get_cuda_stream(this->config.handle));
chunked_mask_row_it<value_idx> more(this->config.a_indptr,
more_rows,
mask_indptr.data() + less_rows,
capacity_threshold * map_size,
n_chunks_per_row.data(),
chunk_indices.data(),
resource::get_cuda_stream(this->config.handle));
auto n_more_blocks = more.total_row_blocks * n_blocks_per_row;
this->_dispatch_base(*this,
map_size,
more,
out_dists,
coo_rows_b,
product_func,
accum_func,
write_func,
chunk_size,
n_more_blocks,
n_blocks_per_row);
}
}
template <typename product_f, typename accum_f, typename write_f>
void dispatch_rev(value_t* out_dists,
value_idx* coo_rows_a,
product_f product_func,
accum_f accum_func,
write_f write_func,
int chunk_size)
{
auto n_blocks_per_row = raft::ceildiv(this->config.a_nnz, chunk_size * tpb);
rmm::device_uvector<value_idx> mask_indptr(this->config.b_nrows,
resource::get_cuda_stream(this->config.handle));
std::tuple<value_idx, value_idx> n_rows_divided;
chunking_needed(this->config.b_indptr,
this->config.b_nrows,
mask_indptr,
n_rows_divided,
resource::get_cuda_stream(this->config.handle));
auto less_rows = std::get<0>(n_rows_divided);
if (less_rows > 0) {
mask_row_it<value_idx> less(this->config.b_indptr, less_rows, mask_indptr.data());
auto n_less_blocks = less_rows * n_blocks_per_row;
this->_dispatch_base_rev(*this,
map_size,
less,
out_dists,
coo_rows_a,
product_func,
accum_func,
write_func,
chunk_size,
n_less_blocks,
n_blocks_per_row);
}
auto more_rows = std::get<1>(n_rows_divided);
if (more_rows > 0) {
rmm::device_uvector<value_idx> n_chunks_per_row(
more_rows + 1, resource::get_cuda_stream(this->config.handle));
rmm::device_uvector<value_idx> chunk_indices(0,
resource::get_cuda_stream(this->config.handle));
chunked_mask_row_it<value_idx>::init(this->config.b_indptr,
mask_indptr.data() + less_rows,
more_rows,
capacity_threshold * map_size,
n_chunks_per_row,
chunk_indices,
resource::get_cuda_stream(this->config.handle));
chunked_mask_row_it<value_idx> more(this->config.b_indptr,
more_rows,
mask_indptr.data() + less_rows,
capacity_threshold * map_size,
n_chunks_per_row.data(),
chunk_indices.data(),
resource::get_cuda_stream(this->config.handle));
auto n_more_blocks = more.total_row_blocks * n_blocks_per_row;
this->_dispatch_base_rev(*this,
map_size,
more,
out_dists,
coo_rows_a,
product_func,
accum_func,
write_func,
chunk_size,
n_more_blocks,
n_blocks_per_row);
}
}
__device__ inline insert_type init_insert(smem_type cache, const value_idx& cache_size)
{
return insert_type::make_from_uninitialized_slots(cooperative_groups::this_thread_block(),
cache,
cache_size,
cuco::sentinel::empty_key{value_idx{-1}},
cuco::sentinel::empty_value{value_t{0}});
}
__device__ inline void insert(insert_type cache, const value_idx& key, const value_t& value)
{
auto success = cache.insert(cuco::pair<value_idx, value_t>(key, value));
}
__device__ inline find_type init_find(smem_type cache, const value_idx& cache_size)
{
return find_type(cache,
cache_size,
cuco::sentinel::empty_key{value_idx{-1}},
cuco::sentinel::empty_value{value_t{0}});
}
__device__ inline value_t find(find_type cache, const value_idx& key)
{
auto a_pair = cache.find(key);
value_t a_col = 0.0;
if (a_pair != cache.end()) { a_col = a_pair->second; }
return a_col;
}
struct fits_in_hash_table {
public:
fits_in_hash_table(const value_idx* indptr_, value_idx degree_l_, value_idx degree_r_)
: indptr(indptr_), degree_l(degree_l_), degree_r(degree_r_)
{
}
__host__ __device__ bool operator()(const value_idx& i)
{
auto degree = indptr[i + 1] - indptr[i];
return degree >= degree_l && degree < degree_r;
}
private:
const value_idx* indptr;
const value_idx degree_l, degree_r;
};
inline static int get_map_size()
{
return (raft::getSharedMemPerBlock() - ((tpb / raft::warp_size()) * sizeof(value_t))) /
sizeof(typename insert_type::slot_type);
}
private:
float capacity_threshold;
int map_size;
};
} // namespace detail
} // namespace distance
} // namespace sparse
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail | rapidsai_public_repos/raft/cpp/include/raft/sparse/distance/detail/coo_spmv_strategies/base_strategy.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../common.hpp"
#include "../coo_spmv_kernel.cuh"
#include "../utils.cuh"
#include "coo_mask_row_iterators.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace sparse {
namespace distance {
namespace detail {
template <typename value_idx, typename value_t, int tpb>
class coo_spmv_strategy {
public:
coo_spmv_strategy(const distances_config_t<value_idx, value_t>& config_) : config(config_)
{
smem = raft::getSharedMemPerBlock();
}
template <typename strategy_t,
typename indptr_it,
typename product_f,
typename accum_f,
typename write_f>
void _dispatch_base(strategy_t& strategy,
int smem_dim,
indptr_it& a_indptr,
value_t* out_dists,
value_idx* coo_rows_b,
product_f product_func,
accum_f accum_func,
write_f write_func,
int chunk_size,
int n_blocks,
int n_blocks_per_row)
{
RAFT_CUDA_TRY(cudaFuncSetCacheConfig(balanced_coo_generalized_spmv_kernel<strategy_t,
indptr_it,
value_idx,
value_t,
false,
tpb,
product_f,
accum_f,
write_f>,
cudaFuncCachePreferShared));
balanced_coo_generalized_spmv_kernel<strategy_t, indptr_it, value_idx, value_t, false, tpb>
<<<n_blocks, tpb, smem, resource::get_cuda_stream(config.handle)>>>(strategy,
a_indptr,
config.a_indices,
config.a_data,
config.a_nnz,
coo_rows_b,
config.b_indices,
config.b_data,
config.a_nrows,
config.b_nrows,
smem_dim,
config.b_nnz,
out_dists,
n_blocks_per_row,
chunk_size,
config.b_ncols,
product_func,
accum_func,
write_func);
}
template <typename strategy_t,
typename indptr_it,
typename product_f,
typename accum_f,
typename write_f>
void _dispatch_base_rev(strategy_t& strategy,
int smem_dim,
indptr_it& b_indptr,
value_t* out_dists,
value_idx* coo_rows_a,
product_f product_func,
accum_f accum_func,
write_f write_func,
int chunk_size,
int n_blocks,
int n_blocks_per_row)
{
RAFT_CUDA_TRY(cudaFuncSetCacheConfig(balanced_coo_generalized_spmv_kernel<strategy_t,
indptr_it,
value_idx,
value_t,
true,
tpb,
product_f,
accum_f,
write_f>,
cudaFuncCachePreferShared));
balanced_coo_generalized_spmv_kernel<strategy_t, indptr_it, value_idx, value_t, true, tpb>
<<<n_blocks, tpb, smem, resource::get_cuda_stream(config.handle)>>>(strategy,
b_indptr,
config.b_indices,
config.b_data,
config.b_nnz,
coo_rows_a,
config.a_indices,
config.a_data,
config.b_nrows,
config.a_nrows,
smem_dim,
config.a_nnz,
out_dists,
n_blocks_per_row,
chunk_size,
config.a_ncols,
product_func,
accum_func,
write_func);
}
protected:
int smem;
const distances_config_t<value_idx, value_t>& config;
};
} // namespace detail
} // namespace distance
} // namespace sparse
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors/knn_graph.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/distance/distance_types.hpp>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/neighbors/detail/knn_graph.cuh>
#include <cstdint>
namespace raft::sparse::neighbors {
/**
* Constructs a (symmetrized) knn graph edge list from
* dense input vectors.
*
* Note: The resulting KNN graph is not guaranteed to be connected.
*
* @tparam value_idx
* @tparam value_t
* @param[in] handle raft handle
* @param[in] X dense matrix of input data samples and observations
* @param[in] m number of data samples (rows) in X
* @param[in] n number of observations (columns) in X
* @param[in] metric distance metric to use when constructing neighborhoods
* @param[out] out output edge list
* @param c
*/
template <typename value_idx = int, typename value_t = float>
void knn_graph(raft::resources const& handle,
const value_t* X,
std::size_t m,
std::size_t n,
raft::distance::DistanceType metric,
raft::sparse::COO<value_t, value_idx>& out,
int c = 15)
{
detail::knn_graph(handle, X, m, n, metric, out, c);
}
}; // namespace raft::sparse::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors/knn.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use knn.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the sparse/spatial version instead.")
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/sparse/neighbors/brute_force.cuh>
namespace raft::sparse::neighbors {
/**
* Search the sparse kNN for the k-nearest neighbors of a set of sparse query vectors
* using some distance implementation
* @param[in] idxIndptr csr indptr of the index matrix (size n_idx_rows + 1)
* @param[in] idxIndices csr column indices array of the index matrix (size n_idx_nnz)
* @param[in] idxData csr data array of the index matrix (size idxNNZ)
* @param[in] idxNNZ number of non-zeros for sparse index matrix
* @param[in] n_idx_rows number of data samples in index matrix
* @param[in] n_idx_cols
* @param[in] queryIndptr csr indptr of the query matrix (size n_query_rows + 1)
* @param[in] queryIndices csr indices array of the query matrix (size queryNNZ)
* @param[in] queryData csr data array of the query matrix (size queryNNZ)
* @param[in] queryNNZ number of non-zeros for sparse query matrix
* @param[in] n_query_rows number of data samples in query matrix
* @param[in] n_query_cols number of features in query matrix
* @param[out] output_indices dense matrix for output indices (size n_query_rows * k)
* @param[out] output_dists dense matrix for output distances (size n_query_rows * k)
* @param[in] k the number of neighbors to query
* @param[in] handle CUDA resource::get_cuda_stream(handle) to order operations with respect to
* @param[in] batch_size_index maximum number of rows to use from index matrix per batch
* @param[in] batch_size_query maximum number of rows to use from query matrix per batch
* @param[in] metric distance metric/measure to use
* @param[in] metricArg potential argument for metric (currently unused)
*/
template <typename value_idx = int, typename value_t = float, int TPB_X = 32>
void brute_force_knn(const value_idx* idxIndptr,
const value_idx* idxIndices,
const value_t* idxData,
size_t idxNNZ,
int n_idx_rows,
int n_idx_cols,
const value_idx* queryIndptr,
const value_idx* queryIndices,
const value_t* queryData,
size_t queryNNZ,
int n_query_rows,
int n_query_cols,
value_idx* output_indices,
value_t* output_dists,
int k,
raft::resources const& handle,
size_t batch_size_index = 2 << 14, // approx 1M
size_t batch_size_query = 2 << 14,
raft::distance::DistanceType metric = raft::distance::DistanceType::L2Expanded,
float metricArg = 0)
{
brute_force::knn<value_idx, value_t>(idxIndptr,
idxIndices,
idxData,
idxNNZ,
n_idx_rows,
n_idx_cols,
queryIndptr,
queryIndices,
queryData,
queryNNZ,
n_query_rows,
n_query_cols,
output_indices,
output_dists,
k,
handle,
batch_size_index,
batch_size_query,
metric,
metricArg);
}
}; // namespace raft::sparse::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors/cross_component_nn.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/neighbors/detail/cross_component_nn.cuh>
namespace raft::sparse::neighbors {
template <typename value_idx, typename value_t>
using FixConnectivitiesRedOp = detail::FixConnectivitiesRedOp<value_idx, value_t>;
/**
* Gets the number of unique components from array of
* colors or labels. This does not assume the components are
* drawn from a monotonically increasing set.
* @tparam value_idx
* @param[in] colors array of components
* @param[in] n_rows size of components array
* @param[in] stream cuda stream for which to order cuda operations
* @return total number of components
*/
template <typename value_idx>
value_idx get_n_components(value_idx* colors, size_t n_rows, cudaStream_t stream)
{
return detail::get_n_components(colors, n_rows, stream);
}
/**
* Connects the components of an otherwise unconnected knn graph
* by computing a 1-nn to neighboring components of each data point
* (e.g. component(nn) != component(self)) and reducing the results to
* include the set of smallest destination components for each source
* component. The result will not necessarily contain
* n_components^2 - n_components number of elements because many components
* will likely not be contained in the neighborhoods of 1-nns.
* @tparam value_idx
* @tparam value_t
* @param[in] handle raft handle
* @param[out] out output edge list containing nearest cross-component
* edges.
* @param[in] X original (row-major) dense matrix for which knn graph should be constructed.
* @param[in] orig_colors array containing component number for each row of X
* @param[in] n_rows number of rows in X
* @param[in] n_cols number of cols in X
* @param[in] reduction_op reduction operation for computing nearest neighbors. The reduction
* operation must have `gather` and `scatter` functions defined
* @param[in] row_batch_size the batch size for computing nearest neighbors. This parameter controls
* the number of samples for which the nearest neighbors are computed at once. Therefore, it affects
* the memory consumption mainly by reducing the size of the adjacency matrix for masked nearest
* neighbors computation
* @param[in] col_batch_size the input data is sorted and 'unsorted' based on color. An additional
* scratch space buffer of shape (n_rows, col_batch_size) is created for this. Usually, this
* parameter affects the memory consumption more drastically than the row_batch_size with a marginal
* increase in compute time as the col_batch_size is reduced
* @param[in] metric distance metric
*/
template <typename value_idx, typename value_t, typename red_op>
void cross_component_nn(
raft::resources const& handle,
raft::sparse::COO<value_t, value_idx>& out,
const value_t* X,
const value_idx* orig_colors,
size_t n_rows,
size_t n_cols,
red_op reduction_op,
size_t row_batch_size = 0,
size_t col_batch_size = 0,
raft::distance::DistanceType metric = raft::distance::DistanceType::L2SqrtExpanded)
{
detail::cross_component_nn(handle,
out,
X,
orig_colors,
n_rows,
n_cols,
reduction_op,
row_batch_size,
col_batch_size,
metric);
}
}; // end namespace raft::sparse::neighbors | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors/specializations.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors/brute_force.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/sparse/neighbors/detail/knn.cuh>
namespace raft::sparse::neighbors::brute_force {
/**
* Search the sparse kNN for the k-nearest neighbors of a set of sparse query vectors
* using some distance implementation
* @param[in] idxIndptr csr indptr of the index matrix (size n_idx_rows + 1)
* @param[in] idxIndices csr column indices array of the index matrix (size n_idx_nnz)
* @param[in] idxData csr data array of the index matrix (size idxNNZ)
* @param[in] idxNNZ number of non-zeros for sparse index matrix
* @param[in] n_idx_rows number of data samples in index matrix
* @param[in] n_idx_cols
* @param[in] queryIndptr csr indptr of the query matrix (size n_query_rows + 1)
* @param[in] queryIndices csr indices array of the query matrix (size queryNNZ)
* @param[in] queryData csr data array of the query matrix (size queryNNZ)
* @param[in] queryNNZ number of non-zeros for sparse query matrix
* @param[in] n_query_rows number of data samples in query matrix
* @param[in] n_query_cols number of features in query matrix
* @param[out] output_indices dense matrix for output indices (size n_query_rows * k)
* @param[out] output_dists dense matrix for output distances (size n_query_rows * k)
* @param[in] k the number of neighbors to query
* @param[in] handle CUDA resource::get_cuda_stream(handle) to order operations with respect to
* @param[in] batch_size_index maximum number of rows to use from index matrix per batch
* @param[in] batch_size_query maximum number of rows to use from query matrix per batch
* @param[in] metric distance metric/measure to use
* @param[in] metricArg potential argument for metric (currently unused)
*/
template <typename value_idx = int, typename value_t = float>
void knn(const value_idx* idxIndptr,
const value_idx* idxIndices,
const value_t* idxData,
size_t idxNNZ,
int n_idx_rows,
int n_idx_cols,
const value_idx* queryIndptr,
const value_idx* queryIndices,
const value_t* queryData,
size_t queryNNZ,
int n_query_rows,
int n_query_cols,
value_idx* output_indices,
value_t* output_dists,
int k,
raft::resources const& handle,
size_t batch_size_index = 2 << 14, // approx 1M
size_t batch_size_query = 2 << 14,
raft::distance::DistanceType metric = raft::distance::DistanceType::L2Expanded,
float metricArg = 0)
{
detail::sparse_knn_t<value_idx, value_t>(idxIndptr,
idxIndices,
idxData,
idxNNZ,
n_idx_rows,
n_idx_cols,
queryIndptr,
queryIndices,
queryData,
queryNNZ,
n_query_rows,
n_query_cols,
output_indices,
output_dists,
k,
handle,
batch_size_index,
batch_size_query,
metric,
metricArg)
.run();
}
}; // namespace raft::sparse::neighbors::brute_force
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors | rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors/detail/knn_graph.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/linalg/symmetrize.cuh>
#include <raft/spatial/knn/knn.cuh>
#include <raft/distance/distance_types.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <algorithm>
#include <limits>
namespace raft::sparse::neighbors::detail {
/**
* Fills indices array of pairwise distance array
* @tparam value_idx
* @param indices
* @param m
*/
template <typename value_idx>
RAFT_KERNEL fill_indices(value_idx* indices, size_t m, size_t nnz)
{
value_idx tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid >= nnz) return;
value_idx v = tid / m;
indices[tid] = v;
}
template <typename value_idx>
value_idx build_k(value_idx n_samples, int c)
{
// from "kNN-MST-Agglomerative: A fast & scalable graph-based data clustering
// approach on GPU"
return std::min(n_samples, std::max((value_idx)2, (value_idx)floor(log2(n_samples)) + c));
}
template <typename in_t, typename out_t>
RAFT_KERNEL conv_indices_kernel(in_t* inds, out_t* out, size_t nnz)
{
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= nnz) return;
out_t v = inds[tid];
out[tid] = v;
}
template <typename in_t, typename out_t, int tpb = 256>
void conv_indices(in_t* inds, out_t* out, size_t size, cudaStream_t stream)
{
size_t blocks = ceildiv(size, (size_t)tpb);
conv_indices_kernel<<<blocks, tpb, 0, stream>>>(inds, out, size);
}
/**
* Constructs a (symmetrized) knn graph edge list from
* dense input vectors.
*
* Note: The resulting KNN graph is not guaranteed to be connected.
*
* @tparam value_idx
* @tparam value_t
* @param[in] handle raft handle
* @param[in] X dense matrix of input data samples and observations
* @param[in] m number of data samples (rows) in X
* @param[in] n number of observations (columns) in X
* @param[in] metric distance metric to use when constructing neighborhoods
* @param[out] out output edge list
* @param[out] out output edge list
* @param c
*/
template <typename value_idx = int, typename value_t = float>
void knn_graph(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
raft::distance::DistanceType metric,
raft::sparse::COO<value_t, value_idx>& out,
int c = 15)
{
size_t k = build_k(m, c);
auto stream = resource::get_cuda_stream(handle);
size_t nnz = m * k;
rmm::device_uvector<value_idx> rows(nnz, stream);
rmm::device_uvector<value_idx> indices(nnz, stream);
rmm::device_uvector<value_t> data(nnz, stream);
size_t blocks = ceildiv(nnz, (size_t)256);
fill_indices<value_idx><<<blocks, 256, 0, stream>>>(rows.data(), k, nnz);
std::vector<value_t*> inputs;
inputs.push_back(const_cast<value_t*>(X));
std::vector<size_t> sizes;
sizes.push_back(m);
// This is temporary. Once faiss is updated, we should be able to
// pass value_idx through to knn.
rmm::device_uvector<int64_t> int64_indices(nnz, stream);
raft::spatial::knn::brute_force_knn<int64_t, value_t, size_t>(handle,
inputs,
sizes,
n,
const_cast<value_t*>(X),
m,
int64_indices.data(),
data.data(),
k,
true,
true,
nullptr,
metric);
// convert from current knn's 64-bit to 32-bit.
conv_indices(int64_indices.data(), indices.data(), nnz, stream);
raft::sparse::linalg::symmetrize(
handle, rows.data(), indices.data(), data.data(), m, k, nnz, out);
}
}; // namespace raft::sparse::neighbors::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors | rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors/detail/knn.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <rmm/device_uvector.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/csr.hpp>
#include <raft/sparse/detail/utils.h>
#include <raft/sparse/distance/distance.cuh>
#include <raft/sparse/op/slice.cuh>
#include <raft/spatial/knn/knn.cuh>
#include <algorithm>
namespace raft::sparse::neighbors::detail {
template <typename value_idx, typename value_t>
struct csr_batcher_t {
csr_batcher_t(value_idx batch_size,
value_idx n_rows,
const value_idx* csr_indptr,
const value_idx* csr_indices,
const value_t* csr_data)
: batch_start_(0),
batch_stop_(0),
batch_rows_(0),
total_rows_(n_rows),
batch_size_(batch_size),
csr_indptr_(csr_indptr),
csr_indices_(csr_indices),
csr_data_(csr_data),
batch_csr_start_offset_(0),
batch_csr_stop_offset_(0)
{
}
void set_batch(int batch_num)
{
batch_start_ = batch_num * batch_size_;
batch_stop_ = batch_start_ + batch_size_ - 1; // zero-based indexing
if (batch_stop_ >= total_rows_) batch_stop_ = total_rows_ - 1; // zero-based indexing
batch_rows_ = (batch_stop_ - batch_start_) + 1;
}
value_idx get_batch_csr_indptr_nnz(value_idx* batch_indptr, cudaStream_t stream)
{
raft::sparse::op::csr_row_slice_indptr(batch_start_,
batch_stop_,
csr_indptr_,
batch_indptr,
&batch_csr_start_offset_,
&batch_csr_stop_offset_,
stream);
return batch_csr_stop_offset_ - batch_csr_start_offset_;
}
void get_batch_csr_indices_data(value_idx* csr_indices, value_t* csr_data, cudaStream_t stream)
{
raft::sparse::op::csr_row_slice_populate(batch_csr_start_offset_,
batch_csr_stop_offset_,
csr_indices_,
csr_data_,
csr_indices,
csr_data,
stream);
}
value_idx batch_rows() const { return batch_rows_; }
value_idx batch_start() const { return batch_start_; }
value_idx batch_stop() const { return batch_stop_; }
private:
value_idx batch_size_;
value_idx batch_start_;
value_idx batch_stop_;
value_idx batch_rows_;
value_idx total_rows_;
const value_idx* csr_indptr_;
const value_idx* csr_indices_;
const value_t* csr_data_;
value_idx batch_csr_start_offset_;
value_idx batch_csr_stop_offset_;
};
template <typename value_idx, typename value_t>
class sparse_knn_t {
public:
sparse_knn_t(const value_idx* idxIndptr_,
const value_idx* idxIndices_,
const value_t* idxData_,
size_t idxNNZ_,
int n_idx_rows_,
int n_idx_cols_,
const value_idx* queryIndptr_,
const value_idx* queryIndices_,
const value_t* queryData_,
size_t queryNNZ_,
int n_query_rows_,
int n_query_cols_,
value_idx* output_indices_,
value_t* output_dists_,
int k_,
raft::resources const& handle_,
size_t batch_size_index_ = 2 << 14, // approx 1M
size_t batch_size_query_ = 2 << 14,
raft::distance::DistanceType metric_ = raft::distance::DistanceType::L2Expanded,
float metricArg_ = 0)
: idxIndptr(idxIndptr_),
idxIndices(idxIndices_),
idxData(idxData_),
idxNNZ(idxNNZ_),
n_idx_rows(n_idx_rows_),
n_idx_cols(n_idx_cols_),
queryIndptr(queryIndptr_),
queryIndices(queryIndices_),
queryData(queryData_),
queryNNZ(queryNNZ_),
n_query_rows(n_query_rows_),
n_query_cols(n_query_cols_),
output_indices(output_indices_),
output_dists(output_dists_),
k(k_),
handle(handle_),
batch_size_index(batch_size_index_),
batch_size_query(batch_size_query_),
metric(metric_),
metricArg(metricArg_)
{
}
void run()
{
using namespace raft::sparse;
int n_batches_query = raft::ceildiv((size_t)n_query_rows, batch_size_query);
csr_batcher_t<value_idx, value_t> query_batcher(
batch_size_query, n_query_rows, queryIndptr, queryIndices, queryData);
size_t rows_processed = 0;
for (int i = 0; i < n_batches_query; i++) {
/**
* Compute index batch info
*/
query_batcher.set_batch(i);
/**
* Slice CSR to rows in batch
*/
rmm::device_uvector<value_idx> query_batch_indptr(query_batcher.batch_rows() + 1,
resource::get_cuda_stream(handle));
value_idx n_query_batch_nnz = query_batcher.get_batch_csr_indptr_nnz(
query_batch_indptr.data(), resource::get_cuda_stream(handle));
rmm::device_uvector<value_idx> query_batch_indices(n_query_batch_nnz,
resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> query_batch_data(n_query_batch_nnz,
resource::get_cuda_stream(handle));
query_batcher.get_batch_csr_indices_data(
query_batch_indices.data(), query_batch_data.data(), resource::get_cuda_stream(handle));
// A 3-partition temporary merge space to scale the batching. 2 parts for subsequent
// batches and 1 space for the results of the merge, which get copied back to the top
rmm::device_uvector<value_idx> merge_buffer_indices(0, resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> merge_buffer_dists(0, resource::get_cuda_stream(handle));
value_t* dists_merge_buffer_ptr;
value_idx* indices_merge_buffer_ptr;
int n_batches_idx = raft::ceildiv((size_t)n_idx_rows, batch_size_index);
csr_batcher_t<value_idx, value_t> idx_batcher(
batch_size_index, n_idx_rows, idxIndptr, idxIndices, idxData);
for (int j = 0; j < n_batches_idx; j++) {
idx_batcher.set_batch(j);
merge_buffer_indices.resize(query_batcher.batch_rows() * k * 3,
resource::get_cuda_stream(handle));
merge_buffer_dists.resize(query_batcher.batch_rows() * k * 3,
resource::get_cuda_stream(handle));
/**
* Slice CSR to rows in batch
*/
rmm::device_uvector<value_idx> idx_batch_indptr(idx_batcher.batch_rows() + 1,
resource::get_cuda_stream(handle));
rmm::device_uvector<value_idx> idx_batch_indices(0, resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> idx_batch_data(0, resource::get_cuda_stream(handle));
value_idx idx_batch_nnz = idx_batcher.get_batch_csr_indptr_nnz(
idx_batch_indptr.data(), resource::get_cuda_stream(handle));
idx_batch_indices.resize(idx_batch_nnz, resource::get_cuda_stream(handle));
idx_batch_data.resize(idx_batch_nnz, resource::get_cuda_stream(handle));
idx_batcher.get_batch_csr_indices_data(
idx_batch_indices.data(), idx_batch_data.data(), resource::get_cuda_stream(handle));
/**
* Compute distances
*/
uint64_t dense_size =
(uint64_t)idx_batcher.batch_rows() * (uint64_t)query_batcher.batch_rows();
rmm::device_uvector<value_t> batch_dists(dense_size, resource::get_cuda_stream(handle));
RAFT_CUDA_TRY(cudaMemset(batch_dists.data(), 0, batch_dists.size() * sizeof(value_t)));
compute_distances(idx_batcher,
query_batcher,
idx_batch_nnz,
n_query_batch_nnz,
idx_batch_indptr.data(),
idx_batch_indices.data(),
idx_batch_data.data(),
query_batch_indptr.data(),
query_batch_indices.data(),
query_batch_data.data(),
batch_dists.data());
// Build batch indices array
rmm::device_uvector<value_idx> batch_indices(batch_dists.size(),
resource::get_cuda_stream(handle));
// populate batch indices array
value_idx batch_rows = query_batcher.batch_rows(), batch_cols = idx_batcher.batch_rows();
iota_fill(batch_indices.data(), batch_rows, batch_cols, resource::get_cuda_stream(handle));
/**
* Perform k-selection on batch & merge with other k-selections
*/
size_t merge_buffer_offset = batch_rows * k;
dists_merge_buffer_ptr = merge_buffer_dists.data() + merge_buffer_offset;
indices_merge_buffer_ptr = merge_buffer_indices.data() + merge_buffer_offset;
perform_k_selection(idx_batcher,
query_batcher,
batch_dists.data(),
batch_indices.data(),
dists_merge_buffer_ptr,
indices_merge_buffer_ptr);
value_t* dists_merge_buffer_tmp_ptr = dists_merge_buffer_ptr;
value_idx* indices_merge_buffer_tmp_ptr = indices_merge_buffer_ptr;
// Merge results of difference batches if necessary
if (idx_batcher.batch_start() > 0) {
size_t merge_buffer_tmp_out = batch_rows * k * 2;
dists_merge_buffer_tmp_ptr = merge_buffer_dists.data() + merge_buffer_tmp_out;
indices_merge_buffer_tmp_ptr = merge_buffer_indices.data() + merge_buffer_tmp_out;
merge_batches(idx_batcher,
query_batcher,
merge_buffer_dists.data(),
merge_buffer_indices.data(),
dists_merge_buffer_tmp_ptr,
indices_merge_buffer_tmp_ptr);
}
// copy merged output back into merge buffer partition for next iteration
raft::copy_async<value_idx>(merge_buffer_indices.data(),
indices_merge_buffer_tmp_ptr,
batch_rows * k,
resource::get_cuda_stream(handle));
raft::copy_async<value_t>(merge_buffer_dists.data(),
dists_merge_buffer_tmp_ptr,
batch_rows * k,
resource::get_cuda_stream(handle));
}
// Copy final merged batch to output array
raft::copy_async<value_idx>(output_indices + (rows_processed * k),
merge_buffer_indices.data(),
query_batcher.batch_rows() * k,
resource::get_cuda_stream(handle));
raft::copy_async<value_t>(output_dists + (rows_processed * k),
merge_buffer_dists.data(),
query_batcher.batch_rows() * k,
resource::get_cuda_stream(handle));
rows_processed += query_batcher.batch_rows();
}
}
private:
void merge_batches(csr_batcher_t<value_idx, value_t>& idx_batcher,
csr_batcher_t<value_idx, value_t>& query_batcher,
value_t* merge_buffer_dists,
value_idx* merge_buffer_indices,
value_t* out_dists,
value_idx* out_indices)
{
// build translation buffer to shift resulting indices by the batch
std::vector<value_idx> id_ranges;
id_ranges.push_back(0);
id_ranges.push_back(idx_batcher.batch_start());
rmm::device_uvector<value_idx> trans(id_ranges.size(), resource::get_cuda_stream(handle));
raft::update_device(
trans.data(), id_ranges.data(), id_ranges.size(), resource::get_cuda_stream(handle));
// combine merge buffers only if there's more than 1 partition to combine
raft::spatial::knn::knn_merge_parts(merge_buffer_dists,
merge_buffer_indices,
out_dists,
out_indices,
query_batcher.batch_rows(),
2,
k,
resource::get_cuda_stream(handle),
trans.data());
}
void perform_k_selection(csr_batcher_t<value_idx, value_t> idx_batcher,
csr_batcher_t<value_idx, value_t> query_batcher,
value_t* batch_dists,
value_idx* batch_indices,
value_t* out_dists,
value_idx* out_indices)
{
// populate batch indices array
value_idx batch_rows = query_batcher.batch_rows(), batch_cols = idx_batcher.batch_rows();
// build translation buffer to shift resulting indices by the batch
std::vector<value_idx> id_ranges;
id_ranges.push_back(0);
id_ranges.push_back(idx_batcher.batch_start());
// in the case where the number of idx rows in the batch is < k, we
// want to adjust k.
value_idx n_neighbors = std::min(static_cast<value_idx>(k), batch_cols);
bool ascending = raft::distance::is_min_close(metric);
// kernel to slice first (min) k cols and copy into batched merge buffer
raft::spatial::knn::select_k(batch_dists,
batch_indices,
batch_rows,
batch_cols,
out_dists,
out_indices,
ascending,
n_neighbors,
resource::get_cuda_stream(handle));
}
void compute_distances(csr_batcher_t<value_idx, value_t>& idx_batcher,
csr_batcher_t<value_idx, value_t>& query_batcher,
size_t idx_batch_nnz,
size_t query_batch_nnz,
value_idx* idx_batch_indptr,
value_idx* idx_batch_indices,
value_t* idx_batch_data,
value_idx* query_batch_indptr,
value_idx* query_batch_indices,
value_t* query_batch_data,
value_t* batch_dists)
{
/**
* Compute distances
*/
raft::sparse::distance::detail::distances_config_t<value_idx, value_t> dist_config(handle);
dist_config.b_nrows = idx_batcher.batch_rows();
dist_config.b_ncols = n_idx_cols;
dist_config.b_nnz = idx_batch_nnz;
dist_config.b_indptr = idx_batch_indptr;
dist_config.b_indices = idx_batch_indices;
dist_config.b_data = idx_batch_data;
dist_config.a_nrows = query_batcher.batch_rows();
dist_config.a_ncols = n_query_cols;
dist_config.a_nnz = query_batch_nnz;
dist_config.a_indptr = query_batch_indptr;
dist_config.a_indices = query_batch_indices;
dist_config.a_data = query_batch_data;
if (raft::sparse::distance::supportedDistance.find(metric) ==
raft::sparse::distance::supportedDistance.end())
THROW("DistanceType not supported: %d", metric);
raft::sparse::distance::pairwiseDistance(batch_dists, dist_config, metric, metricArg);
}
const value_idx *idxIndptr, *idxIndices, *queryIndptr, *queryIndices;
value_idx* output_indices;
const value_t *idxData, *queryData;
value_t* output_dists;
size_t idxNNZ, queryNNZ, batch_size_index, batch_size_query;
raft::distance::DistanceType metric;
float metricArg;
int n_idx_rows, n_idx_cols, n_query_rows, n_query_cols, k;
raft::resources const& handle;
};
}; // namespace raft::sparse::neighbors::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors | rapidsai_public_repos/raft/cpp/include/raft/sparse/neighbors/detail/cross_component_nn.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <cub/cub.cuh>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/kvp.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/distance/masked_nn.cuh>
#include <raft/label/classlabels.cuh>
#include <raft/linalg/map.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/matrix/gather.cuh>
#include <raft/matrix/scatter.cuh>
#include <raft/sparse/convert/csr.cuh>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/linalg/symmetrize.cuh>
#include <raft/sparse/op/reduce.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/fast_int_div.cuh>
#include <rmm/device_uvector.hpp>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <thrust/gather.h>
#include <thrust/scatter.h>
#include <cub/cub.cuh>
#include <limits>
namespace raft::sparse::neighbors::detail {
/**
* Base functor with reduction ops for performing masked 1-nn
* computation.
* @tparam value_idx
* @tparam value_t
*/
template <typename value_idx, typename value_t>
struct FixConnectivitiesRedOp {
value_idx m;
// default constructor for cutlass
DI FixConnectivitiesRedOp() : m(0) {}
FixConnectivitiesRedOp(value_idx m_) : m(m_){};
typedef typename raft::KeyValuePair<value_idx, value_t> KVP;
DI void operator()(value_idx rit, KVP* out, const KVP& other) const
{
if (rit < m && other.value < out->value) {
out->key = other.key;
out->value = other.value;
}
}
DI KVP operator()(value_idx rit, const KVP& a, const KVP& b) const
{
if (rit < m && a.value < b.value) {
return a;
} else
return b;
}
DI void init(value_t* out, value_t maxVal) const { *out = maxVal; }
DI void init(KVP* out, value_t maxVal) const
{
out->key = -1;
out->value = maxVal;
}
DI void init_key(value_t& out, value_idx idx) const { return; }
DI void init_key(KVP& out, value_idx idx) const { out.key = idx; }
DI value_t get_value(KVP& out) const { return out.value; }
DI value_t get_value(value_t& out) const { return out; }
/** The gather and scatter ensure that operator() is still consistent after rearranging the data.
* TODO (tarang-jain): refactor cross_component_nn API to separate out the gather and scatter
* functions from the reduction op. Reference: https://github.com/rapidsai/raft/issues/1614 */
void gather(const raft::resources& handle, value_idx* map) {}
void scatter(const raft::resources& handle, value_idx* map) {}
};
/**
* Assumes 3-iterator tuple containing COO rows, cols, and
* a cub keyvalue pair object. Sorts the 3 arrays in
* ascending order: row->col->keyvaluepair
*/
struct TupleComp {
template <typename one, typename two>
__host__ __device__ bool operator()(const one& t1, const two& t2)
{
// sort first by each sample's color,
if (thrust::get<0>(t1) < thrust::get<0>(t2)) return true;
if (thrust::get<0>(t1) > thrust::get<0>(t2)) return false;
// then by the color of each sample's closest neighbor,
if (thrust::get<1>(t1) < thrust::get<1>(t2)) return true;
if (thrust::get<1>(t1) > thrust::get<1>(t2)) return false;
// then sort by value in descending order
return thrust::get<2>(t1).value < thrust::get<2>(t2).value;
}
};
template <typename LabelT, typename DataT>
struct CubKVPMinReduce {
typedef raft::KeyValuePair<LabelT, DataT> KVP;
DI KVP
operator()(LabelT rit, const KVP& a, const KVP& b)
{
return b.value < a.value ? b : a;
}
DI KVP
operator()(const KVP& a, const KVP& b)
{
return b.value < a.value ? b : a;
}
}; // KVPMinReduce
/**
* Gets the number of unique components from array of
* colors or labels. This does not assume the components are
* drawn from a monotonically increasing set.
* @tparam value_idx
* @param[in] colors array of components
* @param[in] n_rows size of components array
* @param[in] stream cuda stream for which to order cuda operations
* @return total number of components
*/
template <typename value_idx>
value_idx get_n_components(value_idx* colors, size_t n_rows, cudaStream_t stream)
{
rmm::device_uvector<value_idx> map_ids(0, stream);
int num_clusters = raft::label::getUniquelabels(map_ids, colors, n_rows, stream);
return num_clusters;
}
/**
* Functor to look up a component for a vertex
* @tparam value_idx
* @tparam value_t
*/
template <typename value_idx, typename value_t>
struct LookupColorOp {
value_idx* colors;
LookupColorOp(value_idx* colors_) : colors(colors_) {}
DI value_idx
operator()(const raft::KeyValuePair<value_idx, value_t>& kvp)
{
return colors[kvp.key];
}
};
/**
* Compute the cross-component 1-nearest neighbors for each row in X using
* the given array of components
* @tparam value_idx
* @tparam value_t
* @param[in] handle raft handle
* @param[out] kvp mapping of closest neighbor vertex and distance for each vertex in the given
* array of components
* @param[out] nn_colors components of nearest neighbors for each vertex
* @param[in] colors components of each vertex
* @param[in] X original dense data
* @param[in] n_rows number of rows in original dense data
* @param[in] n_cols number of columns in original dense data
* @param[in] row_batch_size row batch size for computing nearest neighbors
* @param[in] col_batch_size column batch size for sorting and 'unsorting'
* @param[in] reduction_op reduction operation for computing nearest neighbors
*/
template <typename value_idx, typename value_t, typename red_op>
void perform_1nn(raft::resources const& handle,
raft::KeyValuePair<value_idx, value_t>* kvp,
value_idx* nn_colors,
value_idx* colors,
const value_t* X,
size_t n_rows,
size_t n_cols,
size_t row_batch_size,
size_t col_batch_size,
red_op reduction_op)
{
auto stream = resource::get_cuda_stream(handle);
auto exec_policy = resource::get_thrust_policy(handle);
auto sort_plan = raft::make_device_vector<value_idx>(handle, (value_idx)n_rows);
raft::linalg::map_offset(handle, sort_plan.view(), [] __device__(value_idx idx) { return idx; });
thrust::sort_by_key(
resource::get_thrust_policy(handle), colors, colors + n_rows, sort_plan.data_handle());
// Modify the reduction operation based on the sort plan.
reduction_op.gather(handle, sort_plan.data_handle());
auto X_mutable_view =
raft::make_device_matrix_view<value_t, value_idx>(const_cast<value_t*>(X), n_rows, n_cols);
auto sort_plan_const_view =
raft::make_device_vector_view<const value_idx, value_idx>(sort_plan.data_handle(), n_rows);
raft::matrix::gather(handle, X_mutable_view, sort_plan_const_view, (value_idx)col_batch_size);
// Get the number of unique components from the array of colors
value_idx n_components = get_n_components(colors, n_rows, stream);
// colors_group_idxs is an array containing the *end* indices of each color
// component in colors. That is, the value of colors_group_idxs[j] indicates
// the start of color j + 1, i.e., it is the inclusive scan of the sizes of
// the color components.
auto colors_group_idxs = raft::make_device_vector<value_idx, value_idx>(handle, n_components + 1);
raft::sparse::convert::sorted_coo_to_csr(
colors, n_rows, colors_group_idxs.data_handle(), n_components + 1, stream);
auto group_idxs_view = raft::make_device_vector_view<const value_idx, value_idx>(
colors_group_idxs.data_handle() + 1, n_components);
auto x_norm = raft::make_device_vector<value_t, value_idx>(handle, (value_idx)n_rows);
raft::linalg::rowNorm(
x_norm.data_handle(), X, n_cols, n_rows, raft::linalg::L2Norm, true, stream);
auto adj = raft::make_device_matrix<bool, value_idx>(handle, row_batch_size, n_components);
using OutT = raft::KeyValuePair<value_idx, value_t>;
using ParamT = raft::distance::masked_l2_nn_params<red_op, red_op>;
bool apply_sqrt = true;
bool init_out_buffer = true;
ParamT params{reduction_op, reduction_op, apply_sqrt, init_out_buffer};
auto X_full_view = raft::make_device_matrix_view<const value_t, value_idx>(X, n_rows, n_cols);
size_t n_batches = raft::ceildiv(n_rows, row_batch_size);
for (size_t bid = 0; bid < n_batches; bid++) {
size_t batch_offset = bid * row_batch_size;
size_t rows_per_batch = min(row_batch_size, n_rows - batch_offset);
auto X_batch_view = raft::make_device_matrix_view<const value_t, value_idx>(
X + batch_offset * n_cols, rows_per_batch, n_cols);
auto x_norm_batch_view = raft::make_device_vector_view<const value_t, value_idx>(
x_norm.data_handle() + batch_offset, rows_per_batch);
auto mask_op = [colors,
n_components = raft::util::FastIntDiv(n_components),
batch_offset] __device__(value_idx idx) {
value_idx row = idx / n_components;
value_idx col = idx % n_components;
return colors[batch_offset + row] != col;
};
auto adj_vector_view = raft::make_device_vector_view<bool, value_idx>(
adj.data_handle(), rows_per_batch * n_components);
raft::linalg::map_offset(handle, adj_vector_view, mask_op);
auto adj_view = raft::make_device_matrix_view<const bool, value_idx>(
adj.data_handle(), rows_per_batch, n_components);
auto kvp_view =
raft::make_device_vector_view<raft::KeyValuePair<value_idx, value_t>, value_idx>(
kvp + batch_offset, rows_per_batch);
raft::distance::masked_l2_nn<value_t, OutT, value_idx, red_op, red_op>(handle,
params,
X_batch_view,
X_full_view,
x_norm_batch_view,
x_norm.view(),
adj_view,
group_idxs_view,
kvp_view);
}
// Transform the keys so that they correctly point to the unpermuted indices.
thrust::transform(exec_policy,
kvp,
kvp + n_rows,
kvp,
[sort_plan = sort_plan.data_handle()] __device__(OutT KVP) {
OutT res;
res.value = KVP.value;
res.key = sort_plan[KVP.key];
return res;
});
// Undo permutation of the rows of X by scattering in place.
raft::matrix::scatter(handle, X_mutable_view, sort_plan_const_view, (value_idx)col_batch_size);
// Undo permutation of the key-value pair and color vectors. This is not done
// inplace, so using two temporary vectors.
auto tmp_colors = raft::make_device_vector<value_idx>(handle, n_rows);
auto tmp_kvp = raft::make_device_vector<OutT>(handle, n_rows);
thrust::scatter(exec_policy, kvp, kvp + n_rows, sort_plan.data_handle(), tmp_kvp.data_handle());
thrust::scatter(
exec_policy, colors, colors + n_rows, sort_plan.data_handle(), tmp_colors.data_handle());
reduction_op.scatter(handle, sort_plan.data_handle());
raft::copy_async(colors, tmp_colors.data_handle(), n_rows, stream);
raft::copy_async(kvp, tmp_kvp.data_handle(), n_rows, stream);
LookupColorOp<value_idx, value_t> extract_colors_op(colors);
thrust::transform(exec_policy, kvp, kvp + n_rows, nn_colors, extract_colors_op);
}
/**
* Sort nearest neighboring components wrt component of source vertices
* @tparam value_idx
* @tparam value_t
* @param[inout] colors components array of source vertices
* @param[inout] nn_colors nearest neighbors components array
* @param[inout] kvp nearest neighbor source vertex / distance array
* @param[inout] src_indices array of source vertex indices which will become arg_sort
* indices
* @param n_rows number of components in `colors`
* @param stream stream for which to order CUDA operations
*/
template <typename value_idx, typename value_t>
void sort_by_color(raft::resources const& handle,
value_idx* colors,
value_idx* nn_colors,
raft::KeyValuePair<value_idx, value_t>* kvp,
value_idx* src_indices,
size_t n_rows)
{
auto exec_policy = resource::get_thrust_policy(handle);
thrust::counting_iterator<value_idx> arg_sort_iter(0);
thrust::copy(exec_policy, arg_sort_iter, arg_sort_iter + n_rows, src_indices);
auto keys = thrust::make_zip_iterator(
thrust::make_tuple(colors, nn_colors, (KeyValuePair<value_idx, value_t>*)kvp));
auto vals = thrust::make_zip_iterator(thrust::make_tuple(src_indices));
// get all the colors in contiguous locations so we can map them to warps.
thrust::sort_by_key(exec_policy, keys, keys + n_rows, vals, TupleComp());
}
template <typename value_idx, typename value_t>
RAFT_KERNEL min_components_by_color_kernel(value_idx* out_rows,
value_idx* out_cols,
value_t* out_vals,
const value_idx* out_index,
const value_idx* indices,
const raft::KeyValuePair<value_idx, value_t>* kvp,
size_t nnz)
{
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= nnz) return;
int idx = out_index[tid];
if ((tid == 0 || (out_index[tid - 1] != idx))) {
out_rows[idx] = indices[tid];
out_cols[idx] = kvp[tid].key;
out_vals[idx] = kvp[tid].value;
}
}
/**
* Computes the min set of unique components that neighbor the
* components of each source vertex.
* @tparam value_idx
* @tparam value_t
* @param[out] coo output edge list
* @param[in] out_index output indptr for ordering edge list
* @param[in] indices indices of source vertices for each component
* @param[in] kvp indices and distances of each destination vertex for each component
* @param[in] n_colors number of components
* @param[in] stream cuda stream for which to order cuda operations
*/
template <typename value_idx, typename value_t>
void min_components_by_color(raft::sparse::COO<value_t, value_idx>& coo,
const value_idx* out_index,
const value_idx* indices,
const raft::KeyValuePair<value_idx, value_t>* kvp,
size_t nnz,
cudaStream_t stream)
{
/**
* Arrays should be ordered by: colors_indptr->colors_n->kvp.value
* so the last element of each column in the input CSR should be
* the min.
*/
min_components_by_color_kernel<<<raft::ceildiv(nnz, (size_t)256), 256, 0, stream>>>(
coo.rows(), coo.cols(), coo.vals(), out_index, indices, kvp, nnz);
}
/**
* Connects the components of an otherwise unconnected knn graph
* by computing a 1-nn to neighboring components of each data point
* (e.g. component(nn) != component(self)) and reducing the results to
* include the set of smallest destination components for each source
* component. The result will not necessarily contain
* n_components^2 - n_components number of elements because many components
* will likely not be contained in the neighborhoods of 1-nns.
* @tparam value_idx
* @tparam value_t
* @param[in] handle raft handle
* @param[out] out output edge list containing nearest cross-component
* edges.
* @param[in] X original (row-major) dense matrix for which knn graph should be constructed.
* @param[in] orig_colors array containing component number for each row of X
* @param[in] n_rows number of rows in X
* @param[in] n_cols number of cols in X
* @param[in] reduction_op reduction operation for computing nearest neighbors. The reduction
* operation must have `gather` and `scatter` functions defined
* @param[in] row_batch_size the batch size for computing nearest neighbors. This parameter controls
* the number of samples for which the nearest neighbors are computed at once. Therefore, it affects
* the memory consumption mainly by reducing the size of the adjacency matrix for masked nearest
* neighbors computation. default 0 indicates that no batching is done
* @param[in] col_batch_size the input data is sorted and 'unsorted' based on color. An additional
* scratch space buffer of shape (n_rows, col_batch_size) is created for this. Usually, this
* parameter affects the memory consumption more drastically than the col_batch_size with a marginal
* increase in compute time as the col_batch_size is reduced. default 0 indicates that no batching
* is done
* @param[in] metric distance metric
*/
template <typename value_idx, typename value_t, typename red_op>
void cross_component_nn(
raft::resources const& handle,
raft::sparse::COO<value_t, value_idx>& out,
const value_t* X,
const value_idx* orig_colors,
size_t n_rows,
size_t n_cols,
red_op reduction_op,
size_t row_batch_size,
size_t col_batch_size,
raft::distance::DistanceType metric = raft::distance::DistanceType::L2SqrtExpanded)
{
auto stream = resource::get_cuda_stream(handle);
RAFT_EXPECTS(metric == raft::distance::DistanceType::L2SqrtExpanded,
"Fixing connectivities for an unconnected k-NN graph only "
"supports L2SqrtExpanded currently.");
if (row_batch_size == 0 || row_batch_size > n_rows) { row_batch_size = n_rows; }
if (col_batch_size == 0 || col_batch_size > n_cols) { col_batch_size = n_cols; }
rmm::device_uvector<value_idx> colors(n_rows, stream);
// Normalize colors so they are drawn from a monotonically increasing set
constexpr bool zero_based = true;
raft::label::make_monotonic(
colors.data(), const_cast<value_idx*>(orig_colors), n_rows, stream, zero_based);
/**
* First compute 1-nn for all colors where the color of each data point
* is guaranteed to be != color of its nearest neighbor.
*/
rmm::device_uvector<value_idx> nn_colors(n_rows, stream);
rmm::device_uvector<raft::KeyValuePair<value_idx, value_t>> temp_inds_dists(n_rows, stream);
rmm::device_uvector<value_idx> src_indices(n_rows, stream);
perform_1nn(handle,
temp_inds_dists.data(),
nn_colors.data(),
colors.data(),
X,
n_rows,
n_cols,
row_batch_size,
col_batch_size,
reduction_op);
/**
* Sort data points by color (neighbors are not sorted)
*/
// max_color + 1 = number of connected components
// sort nn_colors by key w/ original colors
sort_by_color(
handle, colors.data(), nn_colors.data(), temp_inds_dists.data(), src_indices.data(), n_rows);
/**
* Take the min for any duplicate colors
*/
// Compute mask of duplicates
rmm::device_uvector<value_idx> out_index(n_rows + 1, stream);
raft::sparse::op::compute_duplicates_mask(
out_index.data(), colors.data(), nn_colors.data(), n_rows, stream);
thrust::exclusive_scan(resource::get_thrust_policy(handle),
out_index.data(),
out_index.data() + out_index.size(),
out_index.data());
// compute final size
value_idx size = 0;
raft::update_host(&size, out_index.data() + (out_index.size() - 1), 1, stream);
resource::sync_stream(handle, stream);
size++;
raft::sparse::COO<value_t, value_idx> min_edges(stream);
min_edges.allocate(size, n_rows, n_rows, true, stream);
min_components_by_color(
min_edges, out_index.data(), src_indices.data(), temp_inds_dists.data(), n_rows, stream);
/**
* Symmetrize resulting edge list
*/
raft::sparse::linalg::symmetrize(
handle, min_edges.rows(), min_edges.cols(), min_edges.vals(), n_rows, n_rows, size, out);
}
}; // end namespace raft::sparse::neighbors::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/degree.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SPARSE_DEGREE_H
#define __SPARSE_DEGREE_H
#pragma once
#include <raft/sparse/coo.hpp>
#include <raft/sparse/linalg/detail/degree.cuh>
namespace raft {
namespace sparse {
namespace linalg {
/**
* @brief Count the number of values for each row
* @tparam TPB_X: number of threads to use per block
* @param rows: rows array of the COO matrix
* @param nnz: size of the rows array
* @param results: output result array
* @param stream: cuda stream to use
*/
template <typename T = int>
void coo_degree(const T* rows, int nnz, T* results, cudaStream_t stream)
{
detail::coo_degree<64, T>(rows, nnz, results, stream);
}
/**
* @brief Count the number of values for each row
* @tparam TPB_X: number of threads to use per block
* @tparam T: type name of underlying values array
* @param in: input COO object for counting rows
* @param results: output array with row counts (size=in->n_rows)
* @param stream: cuda stream to use
*/
template <typename T>
void coo_degree(COO<T>* in, int* results, cudaStream_t stream)
{
coo_degree(in->rows(), in->nnz, results, stream);
}
/**
* @brief Count the number of values for each row that doesn't match a particular scalar
* @tparam TPB_X: number of threads to use per block
* @tparam T: the type name of the underlying value arrays
* @param rows: Input COO row array
* @param vals: Input COO val arrays
* @param nnz: size of input COO arrays
* @param scalar: scalar to match for counting rows
* @param results: output row counts
* @param stream: cuda stream to use
*/
template <typename T>
void coo_degree_scalar(
const int* rows, const T* vals, int nnz, T scalar, int* results, cudaStream_t stream = 0)
{
detail::coo_degree_scalar<64>(rows, vals, nnz, scalar, results, stream);
}
/**
* @brief Count the number of values for each row that doesn't match a particular scalar
* @tparam TPB_X: number of threads to use per block
* @tparam T: the type name of the underlying value arrays
* @param in: Input COO array
* @param scalar: scalar to match for counting rows
* @param results: output row counts
* @param stream: cuda stream to use
*/
template <typename T>
void coo_degree_scalar(COO<T>* in, T scalar, int* results, cudaStream_t stream)
{
coo_degree_scalar(in->rows(), in->vals(), in->nnz, scalar, results, stream);
}
/**
* @brief Count the number of nonzeros for each row
* @tparam TPB_X: number of threads to use per block
* @tparam T: the type name of the underlying value arrays
* @param rows: Input COO row array
* @param vals: Input COO val arrays
* @param nnz: size of input COO arrays
* @param results: output row counts
* @param stream: cuda stream to use
*/
template <typename T>
void coo_degree_nz(const int* rows, const T* vals, int nnz, int* results, cudaStream_t stream)
{
detail::coo_degree_nz<64>(rows, vals, nnz, results, stream);
}
/**
* @brief Count the number of nonzero values for each row
* @tparam TPB_X: number of threads to use per block
* @tparam T: the type name of the underlying value arrays
* @param in: Input COO array
* @param results: output row counts
* @param stream: cuda stream to use
*/
template <typename T>
void coo_degree_nz(COO<T>* in, int* results, cudaStream_t stream)
{
coo_degree_nz(in->rows(), in->vals(), in->nnz, results, stream);
}
}; // end NAMESPACE linalg
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/spectral.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SPARSE_SPECTRAL_H
#define __SPARSE_SPECTRAL_H
#include <raft/core/resources.hpp>
#include <raft/sparse/linalg/detail/spectral.cuh>
namespace raft {
namespace sparse {
namespace spectral {
template <typename T>
void fit_embedding(raft::resources const& handle,
int* rows,
int* cols,
T* vals,
int nnz,
int n,
int n_components,
T* out,
unsigned long long seed = 1234567)
{
detail::fit_embedding(handle, rows, cols, vals, nnz, n, n_components, out, seed);
}
}; // namespace spectral
}; // namespace sparse
}; // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/norm.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SPARSE_NORM_H
#define __SPARSE_NORM_H
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/norm_types.hpp>
#include <raft/sparse/linalg/detail/norm.cuh>
namespace raft {
namespace sparse {
namespace linalg {
/**
* @brief Perform L1 normalization on the rows of a given CSR-formatted sparse matrix
*
* @param ia: row_ind array
* @param vals: data array
* @param nnz: size of data array
* @param m: size of row_ind array
* @param result: l1 normalized data array
* @param stream: cuda stream to use
*/
template <typename T>
void csr_row_normalize_l1(const int* ia, // csr row ex_scan (sorted by row)
const T* vals,
int nnz, // array of values and number of non-zeros
int m, // num rows in csr
T* result,
cudaStream_t stream)
{ // output array
detail::csr_row_normalize_l1(ia, vals, nnz, m, result, stream);
}
/**
* @brief Perform L_inf normalization on a given CSR-formatted sparse matrix
*
* @param ia: row_ind array
* @param vals: data array
* @param nnz: size of data array
* @param m: size of row_ind array
* @param result: l1 normalized data array
* @param stream: cuda stream to use
*/
template <typename T>
void csr_row_normalize_max(const int* ia, // csr row ind array (sorted by row)
const T* vals,
int nnz, // array of values and number of non-zeros
int m, // num total rows in csr
T* result,
cudaStream_t stream)
{
detail::csr_row_normalize_max(ia, vals, nnz, m, result, stream);
}
/**
* @brief Compute row-wise norm of the input matrix and perform fin_op lambda
*
* Row-wise norm is useful while computing pairwise distance matrix, for
* example.
* This is used in many clustering algos like knn, kmeans, dbscan, etc...
*
* @tparam Type the data type
* @tparam Lambda device final lambda
* @tparam IdxType Integer type used to for addressing
* @param handle raft handle
* @param ia the input matrix row index array
* @param data the input matrix nnz data
* @param nnz number of elements in data
* @param N number of rows
* @param norm the output vector of row-wise norm, size [N]
* @param type the type of norm to be applied
* @param fin_op the final lambda op
*/
template <typename Type, typename IdxType = int, typename Lambda = raft::identity_op>
void rowNormCsr(raft::resources const& handle,
const IdxType* ia,
const Type* data,
const IdxType nnz,
const IdxType N,
Type* norm,
raft::linalg::NormType type,
Lambda fin_op = raft::identity_op())
{
detail::rowNormCsrCaller(ia, data, nnz, N, norm, type, fin_op, resource::get_cuda_stream(handle));
}
}; // end NAMESPACE linalg
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/spmm.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SPMM_H
#define __SPMM_H
#pragma once
#include "detail/spmm.hpp"
namespace raft {
namespace sparse {
namespace linalg {
/**
* @brief SPMM function designed for handling all CSR * DENSE
* combinations of operand layouts for cuSparse.
* It computes the following equation: Z = alpha . X * Y + beta . Z
* where X is a CSR device matrix view and Y,Z are device matrix views
* @tparam ValueType Data type of input/output matrices (float/double)
* @tparam IndexType Type of Y and Z
* @tparam NZType Type of X
* @tparam LayoutPolicyY layout of Y
* @tparam LayoutPolicyZ layout of Z
* @param[in] handle raft handle
* @param[in] trans_x transpose operation for X
* @param[in] trans_y transpose operation for Y
* @param[in] alpha scalar
* @param[in] x input raft::device_csr_matrix_view
* @param[in] y input raft::device_matrix_view
* @param[in] beta scalar
* @param[out] z output raft::device_matrix_view
*/
template <typename ValueType,
typename IndexType,
typename NZType,
typename LayoutPolicyY,
typename LayoutPolicyZ>
void spmm(raft::resources const& handle,
const bool trans_x,
const bool trans_y,
const ValueType* alpha,
raft::device_csr_matrix_view<const ValueType, int, int, NZType> x,
raft::device_matrix_view<const ValueType, IndexType, LayoutPolicyY> y,
const ValueType* beta,
raft::device_matrix_view<ValueType, IndexType, LayoutPolicyZ> z)
{
bool is_row_major = detail::is_row_major(y, z);
auto descr_x = detail::create_descriptor(x);
auto descr_y = detail::create_descriptor(y, is_row_major);
auto descr_z = detail::create_descriptor(z, is_row_major);
detail::spmm(handle, trans_x, trans_y, is_row_major, alpha, descr_x, descr_y, beta, descr_z);
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroySpMat(descr_x));
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroyDnMat(descr_y));
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroyDnMat(descr_z));
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
} // end namespace linalg
} // end namespace sparse
} // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/symmetrize.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SYMMETRIZE_H
#define __SYMMETRIZE_H
#pragma once
#include <raft/sparse/coo.hpp>
#include <raft/sparse/linalg/detail/symmetrize.cuh>
namespace raft {
namespace sparse {
namespace linalg {
/**
* @brief takes a COO matrix which may not be symmetric and symmetrizes
* it, running a custom reduction function against the each value
* and its transposed value.
*
* @param in: Input COO matrix
* @param out: Output symmetrized COO matrix
* @param reduction_op: a custom reduction function
* @param stream: cuda stream to use
*/
template <typename T, typename Lambda>
void coo_symmetrize(COO<T>* in,
COO<T>* out,
Lambda reduction_op, // two-argument reducer
cudaStream_t stream)
{
detail::coo_symmetrize(in, out, reduction_op, stream);
}
/**
* @brief Find how much space needed in each row.
* We look through all datapoints and increment the count for each row.
*
* TODO: This isn't generalized. Remove in place of `symmetrize()`
* @param data: Input knn distances(n, k)
* @param indices: Input knn indices(n, k)
* @param n: Number of rows
* @param k: Number of n_neighbors
* @param row_sizes: Input empty row sum 1 array(n)
* @param row_sizes2: Input empty row sum 2 array(n) for faster reduction
*/
template <typename value_idx = int64_t, typename value_t = float>
RAFT_KERNEL symmetric_find_size(const value_t __restrict__* data,
const value_idx __restrict__* indices,
const value_idx n,
const int k,
value_idx __restrict__* row_sizes,
value_idx __restrict__* row_sizes2)
{
detail::symmetric_find_size(data, indices, n, k, row_sizes, row_sizes2);
}
/**
* @brief Reduce sum(row_sizes) + k
* Reduction for symmetric_find_size kernel. Allows algo to be faster.
*
* TODO: This isn't generalized. Remove in place of `symmetrize()`
* @param n: Number of rows
* @param k: Number of n_neighbors
* @param row_sizes: Input row sum 1 array(n)
* @param row_sizes2: Input row sum 2 array(n) for faster reduction
*/
template <typename value_idx>
RAFT_KERNEL reduce_find_size(const value_idx n,
const int k,
value_idx __restrict__* row_sizes,
const value_idx __restrict__* row_sizes2)
{
detail::reduce_find_size(n, k, row_sizes, row_sizes2);
}
/**
* @brief Perform data + data.T operation.
* Can only run once row_sizes from the CSR matrix of data + data.T has been
* determined.
*
* TODO: This isn't generalized. Remove in place of `symmetrize()`
*
* @param edges: Input row sum array(n) after reduction
* @param data: Input knn distances(n, k)
* @param indices: Input knn indices(n, k)
* @param VAL: Output values for data + data.T
* @param COL: Output column indices for data + data.T
* @param ROW: Output row indices for data + data.T
* @param n: Number of rows
* @param k: Number of n_neighbors
*/
template <typename value_idx = int64_t, typename value_t = float>
RAFT_KERNEL symmetric_sum(value_idx* __restrict__ edges,
const value_t* __restrict__ data,
const value_idx* __restrict__ indices,
value_t* __restrict__ VAL,
value_idx* __restrict__ COL,
value_idx* __restrict__ ROW,
const value_idx n,
const int k)
{
detail::symmetric_sum(edges, data, indices, VAL, COL, ROW, n, k);
}
/**
* @brief Perform data + data.T on raw KNN data.
* The following steps are invoked:
* (1) Find how much space needed in each row
* (2) Compute final space needed (n*k + sum(row_sizes)) == 2*n*k
* (3) Allocate new space
* (4) Prepare edges for each new row
* (5) Perform final data + data.T operation
* (6) Return summed up VAL, COL, ROW
*
* TODO: This isn't generalized. Remove in place of `symmetrize()`
*
* @param knn_indices: Input knn distances(n, k)
* @param knn_dists: Input knn indices(n, k)
* @param n: Number of rows
* @param k: Number of n_neighbors
* @param out: Output COO Matrix class
* @param stream: Input cuda stream
*/
template <typename value_idx = int64_t, typename value_t = float, int TPB_X = 32, int TPB_Y = 32>
void from_knn_symmetrize_matrix(const value_idx* __restrict__ knn_indices,
const value_t* __restrict__ knn_dists,
const value_idx n,
const int k,
COO<value_t, value_idx>* out,
cudaStream_t stream)
{
detail::from_knn_symmetrize_matrix(knn_indices, knn_dists, n, k, out, stream);
}
/**
* Symmetrizes a COO matrix
*/
template <typename value_idx, typename value_t>
void symmetrize(raft::resources const& handle,
const value_idx* rows,
const value_idx* cols,
const value_t* vals,
size_t m,
size_t n,
size_t nnz,
raft::sparse::COO<value_t, value_idx>& out)
{
detail::symmetrize(handle, rows, cols, vals, m, n, nnz, out);
}
}; // end NAMESPACE linalg
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/transpose.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cusparse_handle.hpp>
#include <raft/core/resources.hpp>
#include <raft/sparse/linalg/detail/transpose.h>
namespace raft {
namespace sparse {
namespace linalg {
/**
* Transpose a set of CSR arrays into a set of CSC arrays.
* @tparam value_idx : data type of the CSR index arrays
* @tparam value_t : data type of the CSR data array
* @param[in] handle : used for invoking cusparse
* @param[in] csr_indptr : CSR row index array
* @param[in] csr_indices : CSR column indices array
* @param[in] csr_data : CSR data array
* @param[out] csc_indptr : CSC row index array
* @param[out] csc_indices : CSC column indices array
* @param[out] csc_data : CSC data array
* @param[in] csr_nrows : Number of rows in CSR
* @param[in] csr_ncols : Number of columns in CSR
* @param[in] nnz : Number of nonzeros of CSR
* @param[in] stream : Cuda stream for ordering events
*/
template <typename value_idx, typename value_t>
void csr_transpose(raft::resources const& handle,
const value_idx* csr_indptr,
const value_idx* csr_indices,
const value_t* csr_data,
value_idx* csc_indptr,
value_idx* csc_indices,
value_t* csc_data,
value_idx csr_nrows,
value_idx csr_ncols,
value_idx nnz,
cudaStream_t stream)
{
detail::csr_transpose(resource::get_cusparse_handle(handle),
csr_indptr,
csr_indices,
csr_data,
csc_indptr,
csc_indices,
csc_data,
csr_nrows,
csr_ncols,
nnz,
stream);
}
}; // end NAMESPACE linalg
}; // end NAMESPACE sparse
}; // end NAMESPACE raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/add.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SPARSE_ADD_H
#define __SPARSE_ADD_H
#pragma once
#include <raft/sparse/linalg/detail/add.cuh>
namespace raft {
namespace sparse {
namespace linalg {
/**
* @brief Calculate the CSR row_ind array that would result
* from summing together two CSR matrices
* @param a_ind: left hand row_ind array
* @param a_indptr: left hand index_ptr array
* @param a_val: left hand data array
* @param nnz1: size of left hand index_ptr and val arrays
* @param b_ind: right hand row_ind array
* @param b_indptr: right hand index_ptr array
* @param b_val: right hand data array
* @param nnz2: size of right hand index_ptr and val arrays
* @param m: size of output array (number of rows in final matrix)
* @param out_ind: output row_ind array
* @param stream: cuda stream to use
*/
template <typename T>
size_t csr_add_calc_inds(const int* a_ind,
const int* a_indptr,
const T* a_val,
int nnz1,
const int* b_ind,
const int* b_indptr,
const T* b_val,
int nnz2,
int m,
int* out_ind,
cudaStream_t stream)
{
return detail::csr_add_calc_inds(
a_ind, a_indptr, a_val, nnz1, b_ind, b_indptr, b_val, nnz2, m, out_ind, stream);
}
/**
* @brief Calculate the CSR row_ind array that would result
* from summing together two CSR matrices
* @param a_ind: left hand row_ind array
* @param a_indptr: left hand index_ptr array
* @param a_val: left hand data array
* @param nnz1: size of left hand index_ptr and val arrays
* @param b_ind: right hand row_ind array
* @param b_indptr: right hand index_ptr array
* @param b_val: right hand data array
* @param nnz2: size of right hand index_ptr and val arrays
* @param m: size of output array (number of rows in final matrix)
* @param c_ind: output row_ind array
* @param c_indptr: output ind_ptr array
* @param c_val: output data array
* @param stream: cuda stream to use
*/
template <typename T>
void csr_add_finalize(const int* a_ind,
const int* a_indptr,
const T* a_val,
int nnz1,
const int* b_ind,
const int* b_indptr,
const T* b_val,
int nnz2,
int m,
int* c_ind,
int* c_indptr,
T* c_val,
cudaStream_t stream)
{
detail::csr_add_finalize(
a_ind, a_indptr, a_val, nnz1, b_ind, b_indptr, b_val, nnz2, m, c_ind, c_indptr, c_val, stream);
}
}; // end NAMESPACE linalg
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/detail/spmm.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cusparse_handle.hpp>
#include <raft/core/resources.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
namespace raft {
namespace sparse {
namespace linalg {
namespace detail {
/**
* @brief determine common data layout for both dense matrices
* @tparam ValueType Data type of Y,Z (float/double)
* @tparam IndexType Type of Y,Z
* @tparam LayoutPolicyY layout of Y
* @tparam LayoutPolicyZ layout of Z
* @param[in] x input raft::device_matrix_view
* @param[in] y input raft::device_matrix_view
* @returns dense matrix descriptor to be used by cuSparse API
*/
template <typename ValueType, typename IndexType, typename LayoutPolicyY, typename LayoutPolicyZ>
bool is_row_major(raft::device_matrix_view<const ValueType, IndexType, LayoutPolicyY>& y,
raft::device_matrix_view<ValueType, IndexType, LayoutPolicyZ>& z)
{
bool is_row_major = z.stride(1) == 1 && y.stride(1) == 1;
bool is_col_major = z.stride(0) == 1 && y.stride(0) == 1;
ASSERT(is_row_major || is_col_major, "Both matrices need to be either row or col major");
return is_row_major;
}
/**
* @brief create a cuSparse dense descriptor
* @tparam ValueType Data type of dense_view (float/double)
* @tparam IndexType Type of dense_view
* @tparam LayoutPolicy layout of dense_view
* @param[in] dense_view input raft::device_matrix_view
* @param[in] is_row_major data layout of raft::device_matrix_view
* @returns dense matrix descriptor to be used by cuSparse API
*/
template <typename ValueType, typename IndexType, typename LayoutPolicy>
cusparseDnMatDescr_t create_descriptor(
raft::device_matrix_view<ValueType, IndexType, LayoutPolicy>& dense_view, const bool is_row_major)
{
auto order = is_row_major ? CUSPARSE_ORDER_ROW : CUSPARSE_ORDER_COL;
IndexType ld = is_row_major ? dense_view.stride(0) : dense_view.stride(1);
cusparseDnMatDescr_t descr;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecreatednmat(
&descr,
dense_view.extent(0),
dense_view.extent(1),
ld,
const_cast<std::remove_const_t<ValueType>*>(dense_view.data_handle()),
order));
return descr;
}
/**
* @brief create a cuSparse sparse descriptor
* @tparam ValueType Data type of sparse_view (float/double)
* @tparam IndptrType Data type of csr_matrix_view index pointers
* @tparam IndicesType Data type of csr_matrix_view indices
* @tparam NZType Type of sparse_view
* @param[in] sparse_view input raft::device_csr_matrix_view of size M rows x K columns
* @returns sparse matrix descriptor to be used by cuSparse API
*/
template <typename ValueType, typename IndptrType, typename IndicesType, typename NZType>
cusparseSpMatDescr_t create_descriptor(
raft::device_csr_matrix_view<ValueType, IndptrType, IndicesType, NZType>& sparse_view)
{
cusparseSpMatDescr_t descr;
auto csr_structure = sparse_view.structure_view();
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecreatecsr(
&descr,
static_cast<int64_t>(csr_structure.get_n_rows()),
static_cast<int64_t>(csr_structure.get_n_cols()),
static_cast<int64_t>(csr_structure.get_nnz()),
const_cast<IndptrType*>(csr_structure.get_indptr().data()),
const_cast<IndicesType*>(csr_structure.get_indices().data()),
const_cast<std::remove_const_t<ValueType>*>(sparse_view.get_elements().data())));
return descr;
}
/**
* @brief SPMM function designed for handling all CSR * DENSE
* combinations of operand layouts for cuSparse.
* It computes the following equation: Z = alpha . X * Y + beta . Z
* where X is a CSR device matrix view and Y,Z are device matrix views
* @tparam ValueType Data type of input/output matrices (float/double)
* @tparam IndexType Type of Y and Z
* @tparam NZType Type of X
* @tparam LayoutPolicyY layout of Y
* @tparam LayoutPolicyZ layout of Z
* @param[in] handle raft handle
* @param[in] trans_x transpose operation for X
* @param[in] trans_y transpose operation for Y
* @param[in] is_row_major data layout of Y,Z
* @param[in] alpha scalar
* @param[in] descr_x input sparse descriptor
* @param[in] descr_y input dense descriptor
* @param[in] beta scalar
* @param[out] descr_z output dense descriptor
*/
template <typename ValueType>
void spmm(raft::resources const& handle,
const bool trans_x,
const bool trans_y,
const bool is_row_major,
const ValueType* alpha,
cusparseSpMatDescr_t& descr_x,
cusparseDnMatDescr_t& descr_y,
const ValueType* beta,
cusparseDnMatDescr_t& descr_z)
{
auto opX = trans_x ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE;
auto opY = trans_y ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE;
auto alg = is_row_major ? CUSPARSE_SPMM_CSR_ALG2 : CUSPARSE_SPMM_CSR_ALG1;
size_t bufferSize;
RAFT_CUSPARSE_TRY(
raft::sparse::detail::cusparsespmm_bufferSize(resource::get_cusparse_handle(handle),
opX,
opY,
alpha,
descr_x,
descr_y,
beta,
descr_z,
alg,
&bufferSize,
resource::get_cuda_stream(handle)));
raft::interruptible::synchronize(resource::get_cuda_stream(handle));
rmm::device_uvector<ValueType> tmp(bufferSize, resource::get_cuda_stream(handle));
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsespmm(resource::get_cusparse_handle(handle),
opX,
opY,
alpha,
descr_x,
descr_y,
beta,
descr_z,
alg,
tmp.data(),
resource::get_cuda_stream(handle)));
}
} // end namespace detail
} // end namespace linalg
} // end namespace sparse
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/detail/degree.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cudart_utils.hpp>
#include <raft/util/device_atomics.cuh>
#include <cuda_runtime.h>
#include <stdio.h>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/detail/utils.h>
namespace raft {
namespace sparse {
namespace linalg {
namespace detail {
/**
* @brief Count all the rows in the coo row array and place them in the
* results matrix, indexed by row.
*
* @tparam TPB_X: number of threads to use per block
* @param rows the rows array of the coo matrix
* @param nnz the size of the rows array
* @param results array to place results
*/
template <int TPB_X = 64, typename T = int>
RAFT_KERNEL coo_degree_kernel(const T* rows, int nnz, T* results)
{
int row = (blockIdx.x * TPB_X) + threadIdx.x;
if (row < nnz) { atomicAdd(results + rows[row], (T)1); }
}
/**
* @brief Count the number of values for each row
* @tparam TPB_X: number of threads to use per block
* @param rows: rows array of the COO matrix
* @param nnz: size of the rows array
* @param results: output result array
* @param stream: cuda stream to use
*/
template <int TPB_X = 64, typename T = int>
void coo_degree(const T* rows, int nnz, T* results, cudaStream_t stream)
{
dim3 grid_rc(raft::ceildiv(nnz, TPB_X), 1, 1);
dim3 blk_rc(TPB_X, 1, 1);
coo_degree_kernel<TPB_X><<<grid_rc, blk_rc, 0, stream>>>(rows, nnz, results);
RAFT_CUDA_TRY(cudaGetLastError());
}
template <int TPB_X = 64, typename T>
RAFT_KERNEL coo_degree_nz_kernel(const int* rows, const T* vals, int nnz, int* results)
{
int row = (blockIdx.x * TPB_X) + threadIdx.x;
if (row < nnz && vals[row] != 0.0) { raft::myAtomicAdd(results + rows[row], 1); }
}
template <int TPB_X = 64, typename T>
RAFT_KERNEL coo_degree_scalar_kernel(
const int* rows, const T* vals, int nnz, T scalar, int* results)
{
int row = (blockIdx.x * TPB_X) + threadIdx.x;
if (row < nnz && vals[row] != scalar) { raft::myAtomicAdd(results + rows[row], 1); }
}
/**
* @brief Count the number of values for each row that doesn't match a particular scalar
* @tparam TPB_X: number of threads to use per block
* @tparam T: the type name of the underlying value arrays
* @param rows: Input COO row array
* @param vals: Input COO val arrays
* @param nnz: size of input COO arrays
* @param scalar: scalar to match for counting rows
* @param results: output row counts
* @param stream: cuda stream to use
*/
template <int TPB_X = 64, typename T>
void coo_degree_scalar(
const int* rows, const T* vals, int nnz, T scalar, int* results, cudaStream_t stream = 0)
{
dim3 grid_rc(raft::ceildiv(nnz, TPB_X), 1, 1);
dim3 blk_rc(TPB_X, 1, 1);
coo_degree_scalar_kernel<TPB_X, T>
<<<grid_rc, blk_rc, 0, stream>>>(rows, vals, nnz, scalar, results);
}
/**
* @brief Count the number of nonzeros for each row
* @tparam TPB_X: number of threads to use per block
* @tparam T: the type name of the underlying value arrays
* @param rows: Input COO row array
* @param vals: Input COO val arrays
* @param nnz: size of input COO arrays
* @param results: output row counts
* @param stream: cuda stream to use
*/
template <int TPB_X = 64, typename T>
void coo_degree_nz(const int* rows, const T* vals, int nnz, int* results, cudaStream_t stream)
{
dim3 grid_rc(raft::ceildiv(nnz, TPB_X), 1, 1);
dim3 blk_rc(TPB_X, 1, 1);
coo_degree_nz_kernel<TPB_X, T><<<grid_rc, blk_rc, 0, stream>>>(rows, vals, nnz, results);
}
}; // end NAMESPACE detail
}; // end NAMESPACE linalg
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/detail/spectral.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cudart_utils.hpp>
#include <raft/spectral/cluster_solvers.cuh>
#include <raft/spectral/eigen_solvers.cuh>
#include <raft/spectral/partition.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
#include <raft/sparse/convert/csr.cuh>
#include <raft/sparse/coo.hpp>
namespace raft {
namespace sparse {
namespace spectral {
namespace detail {
template <typename T>
void fit_embedding(raft::resources const& handle,
int* rows,
int* cols,
T* vals,
int nnz,
int n,
int n_components,
T* out,
unsigned long long seed = 1234567)
{
auto stream = resource::get_cuda_stream(handle);
rmm::device_uvector<int> src_offsets(n + 1, stream);
rmm::device_uvector<int> dst_cols(nnz, stream);
rmm::device_uvector<T> dst_vals(nnz, stream);
convert::coo_to_csr(
handle, rows, cols, vals, nnz, n, src_offsets.data(), dst_cols.data(), dst_vals.data());
rmm::device_uvector<T> eigVals(n_components + 1, stream);
rmm::device_uvector<T> eigVecs(n * (n_components + 1), stream);
rmm::device_uvector<int> labels(n, stream);
resource::sync_stream(handle, stream);
/**
* Raft spectral clustering
*/
using index_type = int;
using value_type = T;
index_type* ro = src_offsets.data();
index_type* ci = dst_cols.data();
value_type* vs = dst_vals.data();
raft::spectral::matrix::sparse_matrix_t<index_type, value_type> const r_csr_m{
handle, ro, ci, vs, n, nnz};
index_type neigvs = n_components + 1;
index_type maxiter = 4000; // default reset value (when set to 0);
value_type tol = 0.01;
index_type restart_iter = 15 + neigvs; // what cugraph is using
raft::spectral::eigen_solver_config_t<index_type, value_type> cfg{
neigvs, maxiter, restart_iter, tol};
cfg.seed = seed;
raft::spectral::lanczos_solver_t<index_type, value_type> eig_solver{cfg};
// cluster computation here is irrelevant,
// hence define a no-op such solver to
// feed partition():
//
struct no_op_cluster_solver_t {
using index_type_t = index_type;
using size_type_t = index_type;
using value_type_t = value_type;
std::pair<value_type_t, index_type_t> solve(raft::resources const& handle,
size_type_t n_obs_vecs,
size_type_t dim,
value_type_t const* __restrict__ obs,
index_type_t* __restrict__ codes) const
{
return std::make_pair<value_type_t, index_type_t>(0, 0);
}
};
raft::spectral::partition(handle,
r_csr_m,
eig_solver,
no_op_cluster_solver_t{},
labels.data(),
eigVals.data(),
eigVecs.data());
raft::copy<T>(out, eigVecs.data() + n, n * n_components, stream);
RAFT_CUDA_TRY(cudaGetLastError());
}
}; // namespace detail
}; // namespace spectral
}; // namespace sparse
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/detail/norm.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/common/nvtx.hpp>
#include <raft/core/operators.hpp>
#include <raft/linalg/norm_types.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/sparse/op/row_op.cuh>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <iostream>
#include <limits>
#include <raft/sparse/detail/utils.h>
namespace raft {
namespace sparse {
namespace linalg {
namespace detail {
template <int TPB_X = 64, typename T>
RAFT_KERNEL csr_row_normalize_l1_kernel(
// @TODO: This can be done much more parallel by
// having threads in a warp compute the sum in parallel
// over each row and then divide the values in parallel.
const int* ia, // csr row ex_scan (sorted by row)
const T* vals,
int nnz, // array of values and number of non-zeros
int m, // num rows in csr
T* result)
{ // output array
// row-based matrix 1 thread per row
int row = (blockIdx.x * TPB_X) + threadIdx.x;
// sum all vals_arr for row and divide each val by sum
if (row < m) {
int start_idx = ia[row];
int stop_idx = 0;
if (row < m - 1) {
stop_idx = ia[row + 1];
} else
stop_idx = nnz;
T sum = T(0.0);
for (int j = start_idx; j < stop_idx; j++) {
sum = sum + fabs(vals[j]);
}
for (int j = start_idx; j < stop_idx; j++) {
if (sum != 0.0) {
T val = vals[j];
result[j] = val / sum;
} else {
result[j] = 0.0;
}
}
}
}
/**
* @brief Perform L1 normalization on the rows of a given CSR-formatted sparse matrix
*
* @param ia: row_ind array
* @param vals: data array
* @param nnz: size of data array
* @param m: size of row_ind array
* @param result: l1 normalized data array
* @param stream: cuda stream to use
*/
template <int TPB_X = 64, typename T>
void csr_row_normalize_l1(const int* ia, // csr row ex_scan (sorted by row)
const T* vals,
int nnz, // array of values and number of non-zeros
int m, // num rows in csr
T* result,
cudaStream_t stream)
{ // output array
dim3 grid(raft::ceildiv(m, TPB_X), 1, 1);
dim3 blk(TPB_X, 1, 1);
csr_row_normalize_l1_kernel<TPB_X, T><<<grid, blk, 0, stream>>>(ia, vals, nnz, m, result);
RAFT_CUDA_TRY(cudaGetLastError());
}
template <int TPB_X = 64, typename T>
RAFT_KERNEL csr_row_normalize_max_kernel(
// @TODO: This can be done much more parallel by
// having threads in a warp compute the sum in parallel
// over each row and then divide the values in parallel.
const int* ia, // csr row ind array (sorted by row)
const T* vals,
int nnz, // array of values and number of non-zeros
int m, // num total rows in csr
T* result)
{ // output array
// row-based matrix 1 thread per row
int row = (blockIdx.x * TPB_X) + threadIdx.x;
// find max across columns and divide
if (row < m) {
int start_idx = ia[row];
int stop_idx = 0;
if (row < m - 1) {
stop_idx = ia[row + 1];
} else
stop_idx = nnz;
T max = std::numeric_limits<float>::min();
for (int j = start_idx; j < stop_idx; j++) {
if (vals[j] > max) max = vals[j];
}
// divide nonzeros in current row by max
for (int j = start_idx; j < stop_idx; j++) {
if (max != 0.0 && max > std::numeric_limits<float>::min()) {
T val = vals[j];
result[j] = val / max;
} else {
result[j] = 0.0;
}
}
}
}
/**
* @brief Perform L_inf normalization on a given CSR-formatted sparse matrix
*
* @param ia: row_ind array
* @param vals: data array
* @param nnz: size of data array
* @param m: size of row_ind array
* @param result: l1 normalized data array
* @param stream: cuda stream to use
*/
template <int TPB_X = 64, typename T>
void csr_row_normalize_max(const int* ia, // csr row ind array (sorted by row)
const T* vals,
int nnz, // array of values and number of non-zeros
int m, // num total rows in csr
T* result,
cudaStream_t stream)
{
dim3 grid(raft::ceildiv(m, TPB_X), 1, 1);
dim3 blk(TPB_X, 1, 1);
csr_row_normalize_max_kernel<TPB_X, T><<<grid, blk, 0, stream>>>(ia, vals, nnz, m, result);
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename Type,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void csr_row_op_wrapper(const IdxType* ia,
const Type* data,
IdxType nnz,
IdxType N,
Type init,
Type* norm,
cudaStream_t stream,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
op::csr_row_op<IdxType>(
ia,
N,
nnz,
[data, init, norm, main_op, reduce_op, final_op] __device__(
IdxType row, IdxType start_idx, IdxType stop_idx) {
norm[row] = init;
for (IdxType i = start_idx; i < stop_idx; i++)
norm[row] = final_op(reduce_op(norm[row], main_op(data[i])));
},
stream);
}
template <typename Type, typename IdxType, typename Lambda>
void rowNormCsrCaller(const IdxType* ia,
const Type* data,
IdxType nnz,
IdxType N,
Type* norm,
raft::linalg::NormType type,
Lambda fin_op,
cudaStream_t stream)
{
switch (type) {
case raft::linalg::NormType::L1Norm:
csr_row_op_wrapper(
ia, data, nnz, N, (Type)0, norm, stream, raft::abs_op(), raft::add_op(), fin_op);
break;
case raft::linalg::NormType::L2Norm:
csr_row_op_wrapper(
ia, data, nnz, N, (Type)0, norm, stream, raft::sq_op(), raft::add_op(), fin_op);
break;
case raft::linalg::NormType::LinfNorm:
csr_row_op_wrapper(
ia, data, nnz, N, (Type)0, norm, stream, raft::abs_op(), raft::max_op(), fin_op);
break;
default: THROW("Unsupported norm type: %d", type);
};
}
}; // end NAMESPACE detail
}; // end NAMESPACE linalg
}; // end NAMESPACE sparse
}; // end NAMESPACE raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/detail/symmetrize.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <raft/sparse/op/sort.cuh>
#include <raft/util/device_atomics.cuh>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <raft/sparse/convert/csr.cuh>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/detail/utils.h>
#include <raft/sparse/op/reduce.cuh>
namespace raft {
namespace sparse {
namespace linalg {
namespace detail {
// TODO: value_idx param needs to be used for this once FAISS is updated to use float32
// for indices so that the index types can be uniform
template <int TPB_X = 128, typename T, typename Lambda>
RAFT_KERNEL coo_symmetrize_kernel(int* row_ind,
int* rows,
int* cols,
T* vals,
int* orows,
int* ocols,
T* ovals,
int n,
int cnnz,
Lambda reduction_op)
{
int row = (blockIdx.x * TPB_X) + threadIdx.x;
if (row < n) {
int start_idx = row_ind[row]; // each thread processes one row
int stop_idx = get_stop_idx(row, n, cnnz, row_ind);
int row_nnz = 0;
int out_start_idx = start_idx * 2;
for (int idx = 0; idx < stop_idx - start_idx; idx++) {
int cur_row = rows[idx + start_idx];
int cur_col = cols[idx + start_idx];
T cur_val = vals[idx + start_idx];
int lookup_row = cur_col;
int t_start = row_ind[lookup_row]; // Start at
int t_stop = get_stop_idx(lookup_row, n, cnnz, row_ind);
T transpose = 0.0;
bool found_match = false;
for (int t_idx = t_start; t_idx < t_stop; t_idx++) {
// If we find a match, let's get out of the loop. We won't
// need to modify the transposed value, since that will be
// done in a different thread.
if (cols[t_idx] == cur_row && rows[t_idx] == cur_col) {
// If it exists already, set transposed value to existing value
transpose = vals[t_idx];
found_match = true;
break;
}
}
// Custom reduction op on value and its transpose, which enables
// specialized weighting.
// If only simple X+X.T is desired, this op can just sum
// the two values.
T res = reduction_op(cur_row, cur_col, cur_val, transpose);
// if we didn't find an exact match, we need to add
// the computed res into our current matrix to guarantee
// symmetry.
// Note that if we did find a match, we don't need to
// compute `res` on it here because it will be computed
// in a different thread.
if (!found_match && vals[idx] != 0.0) {
orows[out_start_idx + row_nnz] = cur_col;
ocols[out_start_idx + row_nnz] = cur_row;
ovals[out_start_idx + row_nnz] = res;
++row_nnz;
}
if (res != 0.0) {
orows[out_start_idx + row_nnz] = cur_row;
ocols[out_start_idx + row_nnz] = cur_col;
ovals[out_start_idx + row_nnz] = res;
++row_nnz;
}
}
}
}
/**
* @brief takes a COO matrix which may not be symmetric and symmetrizes
* it, running a custom reduction function against the each value
* and its transposed value.
*
* @param in: Input COO matrix
* @param out: Output symmetrized COO matrix
* @param reduction_op: a custom reduction function
* @param stream: cuda stream to use
*/
template <int TPB_X = 128, typename T, typename Lambda>
void coo_symmetrize(COO<T>* in,
COO<T>* out,
Lambda reduction_op, // two-argument reducer
cudaStream_t stream)
{
dim3 grid(raft::ceildiv(in->n_rows, TPB_X), 1, 1);
dim3 blk(TPB_X, 1, 1);
ASSERT(!out->validate_mem(), "Expecting unallocated COO for output");
rmm::device_uvector<int> in_row_ind(in->n_rows, stream);
convert::sorted_coo_to_csr(in, in_row_ind.data(), stream);
out->allocate(in->nnz * 2, in->n_rows, in->n_cols, true, stream);
coo_symmetrize_kernel<TPB_X, T><<<grid, blk, 0, stream>>>(in_row_ind.data(),
in->rows(),
in->cols(),
in->vals(),
out->rows(),
out->cols(),
out->vals(),
in->n_rows,
in->nnz,
reduction_op);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* @brief Find how much space needed in each row.
* We look through all datapoints and increment the count for each row.
*
* @param data: Input knn distances(n, k)
* @param indices: Input knn indices(n, k)
* @param n: Number of rows
* @param k: Number of n_neighbors
* @param row_sizes: Input empty row sum 1 array(n)
* @param row_sizes2: Input empty row sum 2 array(n) for faster reduction
*/
template <typename value_idx = int64_t, typename value_t = float>
RAFT_KERNEL symmetric_find_size(const value_t* __restrict__ data,
const value_idx* __restrict__ indices,
const value_idx n,
const int k,
value_idx* __restrict__ row_sizes,
value_idx* __restrict__ row_sizes2)
{
const auto row = blockIdx.x * blockDim.x + threadIdx.x; // for every row
const auto j = blockIdx.y * blockDim.y + threadIdx.y; // for every item in row
if (row >= n || j >= k) return;
const auto col = indices[row * k + j];
if (j % 2)
atomicAdd(&row_sizes[col], value_idx(1));
else
atomicAdd(&row_sizes2[col], value_idx(1));
}
/**
* @brief Reduce sum(row_sizes) + k
* Reduction for symmetric_find_size kernel. Allows algo to be faster.
*
* @param n: Number of rows
* @param k: Number of n_neighbors
* @param row_sizes: Input row sum 1 array(n)
* @param row_sizes2: Input row sum 2 array(n) for faster reduction
*/
template <typename value_idx>
RAFT_KERNEL reduce_find_size(const value_idx n,
const int k,
value_idx* __restrict__ row_sizes,
const value_idx* __restrict__ row_sizes2)
{
const auto i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i >= n) return;
row_sizes[i] += (row_sizes2[i] + k);
}
/**
* @brief Perform data + data.T operation.
* Can only run once row_sizes from the CSR matrix of data + data.T has been
* determined.
*
* @param edges: Input row sum array(n) after reduction
* @param data: Input knn distances(n, k)
* @param indices: Input knn indices(n, k)
* @param VAL: Output values for data + data.T
* @param COL: Output column indices for data + data.T
* @param ROW: Output row indices for data + data.T
* @param n: Number of rows
* @param k: Number of n_neighbors
*/
template <typename value_idx = int64_t, typename value_t = float>
RAFT_KERNEL symmetric_sum(value_idx* __restrict__ edges,
const value_t* __restrict__ data,
const value_idx* __restrict__ indices,
value_t* __restrict__ VAL,
value_idx* __restrict__ COL,
value_idx* __restrict__ ROW,
const value_idx n,
const int k)
{
const auto row = blockIdx.x * blockDim.x + threadIdx.x; // for every row
const auto j = blockIdx.y * blockDim.y + threadIdx.y; // for every item in row
if (row >= n || j >= k) return;
const auto col = indices[row * k + j];
const auto original = atomicAdd(&edges[row], value_idx(1));
const auto transpose = atomicAdd(&edges[col], value_idx(1));
VAL[transpose] = VAL[original] = data[row * k + j];
// Notice swapped ROW, COL since transpose
ROW[original] = row;
COL[original] = col;
ROW[transpose] = col;
COL[transpose] = row;
}
/**
* @brief Perform data + data.T on raw KNN data.
* The following steps are invoked:
* (1) Find how much space needed in each row
* (2) Compute final space needed (n*k + sum(row_sizes)) == 2*n*k
* (3) Allocate new space
* (4) Prepare edges for each new row
* (5) Perform final data + data.T operation
* (6) Return summed up VAL, COL, ROW
*
* @param knn_indices: Input knn distances(n, k)
* @param knn_dists: Input knn indices(n, k)
* @param n: Number of rows
* @param k: Number of n_neighbors
* @param out: Output COO Matrix class
* @param stream: Input cuda stream
*/
template <typename value_idx = int64_t, typename value_t = float, int TPB_X = 32, int TPB_Y = 32>
void from_knn_symmetrize_matrix(const value_idx* __restrict__ knn_indices,
const value_t* __restrict__ knn_dists,
const value_idx n,
const int k,
COO<value_t, value_idx>* out,
cudaStream_t stream)
{
// (1) Find how much space needed in each row
// We look through all datapoints and increment the count for each row.
const dim3 threadsPerBlock(TPB_X, TPB_Y);
const dim3 numBlocks(raft::ceildiv(n, (value_idx)TPB_X), raft::ceildiv(k, TPB_Y));
// Notice n+1 since we can reuse these arrays for transpose_edges, original_edges in step (4)
rmm::device_uvector<value_idx> row_sizes(n, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(row_sizes.data(), 0, sizeof(value_idx) * n, stream));
rmm::device_uvector<value_idx> row_sizes2(n, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(row_sizes2.data(), 0, sizeof(value_idx) * n, stream));
symmetric_find_size<<<numBlocks, threadsPerBlock, 0, stream>>>(
knn_dists, knn_indices, n, k, row_sizes.data(), row_sizes2.data());
RAFT_CUDA_TRY(cudaPeekAtLastError());
reduce_find_size<<<raft::ceildiv(n, (value_idx)1024), 1024, 0, stream>>>(
n, k, row_sizes.data(), row_sizes2.data());
RAFT_CUDA_TRY(cudaPeekAtLastError());
// (2) Compute final space needed (n*k + sum(row_sizes)) == 2*n*k
// Notice we don't do any merging and leave the result as 2*NNZ
const auto NNZ = 2 * n * k;
// (3) Allocate new space
out->allocate(NNZ, n, n, true, stream);
// (4) Prepare edges for each new row
// This mirrors CSR matrix's row Pointer, were maximum bounds for each row
// are calculated as the cumulative rolling sum of the previous rows.
// Notice reusing old row_sizes2 memory
value_idx* edges = row_sizes2.data();
thrust::device_ptr<value_idx> __edges = thrust::device_pointer_cast(edges);
thrust::device_ptr<value_idx> __row_sizes = thrust::device_pointer_cast(row_sizes.data());
// Rolling cumulative sum
thrust::exclusive_scan(rmm::exec_policy(stream), __row_sizes, __row_sizes + n, __edges);
// (5) Perform final data + data.T operation in tandem with memcpying
symmetric_sum<<<numBlocks, threadsPerBlock, 0, stream>>>(
edges, knn_dists, knn_indices, out->vals(), out->cols(), out->rows(), n, k);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* Symmetrizes a COO matrix
*/
template <typename value_idx, typename value_t>
void symmetrize(raft::resources const& handle,
const value_idx* rows,
const value_idx* cols,
const value_t* vals,
size_t m,
size_t n,
size_t nnz,
raft::sparse::COO<value_t, value_idx>& out)
{
auto stream = resource::get_cuda_stream(handle);
// copy rows to cols and cols to rows
rmm::device_uvector<value_idx> symm_rows(nnz * 2, stream);
rmm::device_uvector<value_idx> symm_cols(nnz * 2, stream);
rmm::device_uvector<value_t> symm_vals(nnz * 2, stream);
raft::copy_async(symm_rows.data(), rows, nnz, stream);
raft::copy_async(symm_rows.data() + nnz, cols, nnz, stream);
raft::copy_async(symm_cols.data(), cols, nnz, stream);
raft::copy_async(symm_cols.data() + nnz, rows, nnz, stream);
raft::copy_async(symm_vals.data(), vals, nnz, stream);
raft::copy_async(symm_vals.data() + nnz, vals, nnz, stream);
// sort COO
raft::sparse::op::coo_sort((value_idx)m,
(value_idx)n,
(value_idx)nnz * 2,
symm_rows.data(),
symm_cols.data(),
symm_vals.data(),
stream);
raft::sparse::op::max_duplicates(
handle, out, symm_rows.data(), symm_cols.data(), symm_vals.data(), nnz * 2, m, n);
}
}; // end NAMESPACE detail
}; // end NAMESPACE linalg
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/detail/transpose.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <raft/sparse/detail/utils.h>
namespace raft {
namespace sparse {
namespace linalg {
namespace detail {
/**
* Transpose a set of CSR arrays into a set of CSC arrays.
* @tparam value_idx : data type of the CSR index arrays
* @tparam value_t : data type of the CSR data array
* @param[in] handle : used for invoking cusparse
* @param[in] csr_indptr : CSR row index array
* @param[in] csr_indices : CSR column indices array
* @param[in] csr_data : CSR data array
* @param[out] csc_indptr : CSC row index array
* @param[out] csc_indices : CSC column indices array
* @param[out] csc_data : CSC data array
* @param[in] csr_nrows : Number of rows in CSR
* @param[in] csr_ncols : Number of columns in CSR
* @param[in] nnz : Number of nonzeros of CSR
* @param[in] stream : Cuda stream for ordering events
*/
template <typename value_idx, typename value_t>
void csr_transpose(cusparseHandle_t handle,
const value_idx* csr_indptr,
const value_idx* csr_indices,
const value_t* csr_data,
value_idx* csc_indptr,
value_idx* csc_indices,
value_t* csc_data,
value_idx csr_nrows,
value_idx csr_ncols,
value_idx nnz,
cudaStream_t stream)
{
size_t convert_csc_workspace_size = 0;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecsr2csc_bufferSize(handle,
csr_nrows,
csr_ncols,
nnz,
csr_data,
csr_indptr,
csr_indices,
csc_data,
csc_indptr,
csc_indices,
CUSPARSE_ACTION_NUMERIC,
CUSPARSE_INDEX_BASE_ZERO,
CUSPARSE_CSR2CSC_ALG1,
&convert_csc_workspace_size,
stream));
rmm::device_uvector<char> convert_csc_workspace(convert_csc_workspace_size, stream);
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecsr2csc(handle,
csr_nrows,
csr_ncols,
nnz,
csr_data,
csr_indptr,
csr_indices,
csc_data,
csc_indptr,
csc_indices,
CUSPARSE_ACTION_NUMERIC,
CUSPARSE_INDEX_BASE_ZERO,
CUSPARSE_CSR2CSC_ALG1,
convert_csc_workspace.data(),
stream));
}
}; // end NAMESPACE detail
}; // end NAMESPACE linalg
}; // end NAMESPACE sparse
}; // end NAMESPACE raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg | rapidsai_public_repos/raft/cpp/include/raft/sparse/linalg/detail/add.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <raft/sparse/detail/utils.h>
namespace raft {
namespace sparse {
namespace linalg {
namespace detail {
template <typename T, int TPB_X = 128>
RAFT_KERNEL csr_add_calc_row_counts_kernel(const int* a_ind,
const int* a_indptr,
const T* a_val,
int nnz1,
const int* b_ind,
const int* b_indptr,
const T* b_val,
int nnz2,
int m,
int* out_rowcounts)
{
// loop through columns in each set of rows and
// calculate number of unique cols across both rows
int row = (blockIdx.x * TPB_X) + threadIdx.x;
if (row < m) {
int a_start_idx = a_ind[row];
int a_stop_idx = get_stop_idx(row, m, nnz1, a_ind);
int b_start_idx = b_ind[row];
int b_stop_idx = get_stop_idx(row, m, nnz2, b_ind);
/**
* Union of columns within each row of A and B so that we can scan through
* them, adding their values together.
*/
int max_size = (a_stop_idx - a_start_idx) + (b_stop_idx - b_start_idx);
int* arr = new int[max_size];
int cur_arr_idx = 0;
for (int j = a_start_idx; j < a_stop_idx; j++) {
arr[cur_arr_idx] = a_indptr[j];
cur_arr_idx++;
}
int arr_size = cur_arr_idx;
int final_size = arr_size;
for (int j = b_start_idx; j < b_stop_idx; j++) {
int cur_col = b_indptr[j];
bool found = false;
for (int k = 0; k < arr_size; k++) {
if (arr[k] == cur_col) {
found = true;
break;
}
}
if (!found) { final_size++; }
}
out_rowcounts[row] = final_size;
raft::myAtomicAdd(out_rowcounts + m, final_size);
delete[] arr;
}
}
template <typename T, int TPB_X = 128>
RAFT_KERNEL csr_add_kernel(const int* a_ind,
const int* a_indptr,
const T* a_val,
int nnz1,
const int* b_ind,
const int* b_indptr,
const T* b_val,
int nnz2,
int m,
int* out_ind,
int* out_indptr,
T* out_val)
{
// 1 thread per row
int row = (blockIdx.x * TPB_X) + threadIdx.x;
if (row < m) {
int a_start_idx = a_ind[row];
// TODO: Shouldn't need this if rowind is proper CSR
int a_stop_idx = get_stop_idx(row, m, nnz1, a_ind);
int b_start_idx = b_ind[row];
int b_stop_idx = get_stop_idx(row, m, nnz2, b_ind);
int o_idx = out_ind[row];
int cur_o_idx = o_idx;
for (int j = a_start_idx; j < a_stop_idx; j++) {
out_indptr[cur_o_idx] = a_indptr[j];
out_val[cur_o_idx] = a_val[j];
cur_o_idx++;
}
int arr_size = cur_o_idx - o_idx;
for (int j = b_start_idx; j < b_stop_idx; j++) {
int cur_col = b_indptr[j];
bool found = false;
for (int k = o_idx; k < o_idx + arr_size; k++) {
// If we found a match, sum the two values
if (out_indptr[k] == cur_col) {
out_val[k] += b_val[j];
found = true;
break;
}
}
// if we didn't find a match, add the value for b
if (!found) {
out_indptr[o_idx + arr_size] = cur_col;
out_val[o_idx + arr_size] = b_val[j];
arr_size++;
}
}
}
}
/**
* @brief Calculate the CSR row_ind array that would result
* from summing together two CSR matrices
* @param a_ind: left hand row_ind array
* @param a_indptr: left hand index_ptr array
* @param a_val: left hand data array
* @param nnz1: size of left hand index_ptr and val arrays
* @param b_ind: right hand row_ind array
* @param b_indptr: right hand index_ptr array
* @param b_val: right hand data array
* @param nnz2: size of right hand index_ptr and val arrays
* @param m: size of output array (number of rows in final matrix)
* @param out_ind: output row_ind array
* @param stream: cuda stream to use
*/
template <typename T, int TPB_X = 128>
size_t csr_add_calc_inds(const int* a_ind,
const int* a_indptr,
const T* a_val,
int nnz1,
const int* b_ind,
const int* b_indptr,
const T* b_val,
int nnz2,
int m,
int* out_ind,
cudaStream_t stream)
{
dim3 grid(raft::ceildiv(m, TPB_X), 1, 1);
dim3 blk(TPB_X, 1, 1);
rmm::device_uvector<int> row_counts(m + 1, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(row_counts.data(), 0, (m + 1) * sizeof(int), stream));
csr_add_calc_row_counts_kernel<T, TPB_X><<<grid, blk, 0, stream>>>(
a_ind, a_indptr, a_val, nnz1, b_ind, b_indptr, b_val, nnz2, m, row_counts.data());
int cnnz = 0;
raft::update_host(&cnnz, row_counts.data() + m, 1, stream);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
// create csr compressed row index from row counts
thrust::device_ptr<int> row_counts_d = thrust::device_pointer_cast(row_counts.data());
thrust::device_ptr<int> c_ind_d = thrust::device_pointer_cast(out_ind);
exclusive_scan(rmm::exec_policy(stream), row_counts_d, row_counts_d + m, c_ind_d);
return cnnz;
}
/**
* @brief Calculate the CSR row_ind array that would result
* from summing together two CSR matrices
* @param a_ind: left hand row_ind array
* @param a_indptr: left hand index_ptr array
* @param a_val: left hand data array
* @param nnz1: size of left hand index_ptr and val arrays
* @param b_ind: right hand row_ind array
* @param b_indptr: right hand index_ptr array
* @param b_val: right hand data array
* @param nnz2: size of right hand index_ptr and val arrays
* @param m: size of output array (number of rows in final matrix)
* @param c_ind: output row_ind array
* @param c_indptr: output ind_ptr array
* @param c_val: output data array
* @param stream: cuda stream to use
*/
template <typename T, int TPB_X = 128>
void csr_add_finalize(const int* a_ind,
const int* a_indptr,
const T* a_val,
int nnz1,
const int* b_ind,
const int* b_indptr,
const T* b_val,
int nnz2,
int m,
int* c_ind,
int* c_indptr,
T* c_val,
cudaStream_t stream)
{
dim3 grid(raft::ceildiv(m, TPB_X), 1, 1);
dim3 blk(TPB_X, 1, 1);
csr_add_kernel<T, TPB_X><<<grid, blk, 0, stream>>>(
a_ind, a_indptr, a_val, nnz1, b_ind, b_indptr, b_val, nnz2, m, c_ind, c_indptr, c_val);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}; // end NAMESPACE detail
}; // end NAMESPACE linalg
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/convert/csr.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CSR_H
#define __CSR_H
#pragma once
#include <raft/sparse/convert/detail/adj_to_csr.cuh>
#include <raft/sparse/convert/detail/csr.cuh>
#include <raft/sparse/csr.hpp>
namespace raft {
namespace sparse {
namespace convert {
template <typename value_t>
void coo_to_csr(raft::resources const& handle,
const int* srcRows,
const int* srcCols,
const value_t* srcVals,
int nnz,
int m,
int* dst_offsets,
int* dstCols,
value_t* dstVals)
{
detail::coo_to_csr(handle, srcRows, srcCols, srcVals, nnz, m, dst_offsets, dstCols, dstVals);
}
/**
* @brief Generate the row indices array for a sorted COO matrix
*
* @param rows: COO rows array
* @param nnz: size of COO rows array
* @param row_ind: output row indices array
* @param m: number of rows in dense matrix
* @param stream: cuda stream to use
*/
template <typename T>
void sorted_coo_to_csr(const T* rows, int nnz, T* row_ind, int m, cudaStream_t stream)
{
detail::sorted_coo_to_csr(rows, nnz, row_ind, m, stream);
}
/**
* @brief Generate the row indices array for a sorted COO matrix
*
* @param coo: Input COO matrix
* @param row_ind: output row indices array
* @param stream: cuda stream to use
*/
template <typename T>
void sorted_coo_to_csr(COO<T>* coo, int* row_ind, cudaStream_t stream)
{
detail::sorted_coo_to_csr(coo->rows(), coo->nnz, row_ind, coo->n_rows, stream);
}
/**
* @brief Converts a boolean adjacency matrix into unsorted CSR format.
*
* The conversion supports non-square matrices.
*
* @tparam index_t Indexing arithmetic type
*
* @param[in] handle RAFT handle
* @param[in] adj A num_rows x num_cols boolean matrix in contiguous row-major
* format.
* @param[in] row_ind An array of length num_rows that indicates at which index
* a row starts in out_col_ind. Equivalently, it is the
* exclusive scan of the number of non-zeros in each row of
* adj.
* @param[in] num_rows Number of rows of adj.
* @param[in] num_cols Number of columns of adj.
* @param tmp A pre-allocated array of size num_rows.
* @param[out] out_col_ind An array containing the column indices of the
* non-zero values in adj. Size should be at least the
* number of non-zeros in adj.
*/
template <typename index_t = int>
void adj_to_csr(raft::resources const& handle,
const bool* adj, // Row-major adjacency matrix
const index_t* row_ind, // Precomputed row indices
index_t num_rows, // # rows of adj
index_t num_cols, // # cols of adj
index_t* tmp, // Pre-allocated atomic counters. Minimum size: num_rows elements.
index_t* out_col_ind // Output column indices
)
{
detail::adj_to_csr(handle, adj, row_ind, num_rows, num_cols, tmp, out_col_ind);
}
}; // end NAMESPACE convert
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/convert/coo.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __COO_H
#define __COO_H
#pragma once
#include <raft/sparse/convert/detail/coo.cuh>
namespace raft {
namespace sparse {
namespace convert {
/**
* @brief Convert a CSR row_ind array to a COO rows array
* @param row_ind: Input CSR row_ind array
* @param m: size of row_ind array
* @param coo_rows: Output COO row array
* @param nnz: size of output COO row array
* @param stream: cuda stream to use
*/
template <typename value_idx = int>
void csr_to_coo(
const value_idx* row_ind, value_idx m, value_idx* coo_rows, value_idx nnz, cudaStream_t stream)
{
detail::csr_to_coo<value_idx, 32>(row_ind, m, coo_rows, nnz, stream);
}
}; // end NAMESPACE convert
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse | rapidsai_public_repos/raft/cpp/include/raft/sparse/convert/dense.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __DENSE_H
#define __DENSE_H
#pragma once
#include <raft/sparse/convert/detail/dense.cuh>
namespace raft {
namespace sparse {
namespace convert {
/**
* Convert CSR arrays to a dense matrix in either row-
* or column-major format. A custom kernel is used when
* row-major output is desired since cusparse does not
* output row-major.
* @tparam value_idx : data type of the CSR index arrays
* @tparam value_t : data type of the CSR value array
* @param[in] handle : cusparse handle for conversion
* @param[in] nrows : number of rows in CSR
* @param[in] ncols : number of columns in CSR
* @param[in] nnz : number of nonzeros in CSR
* @param[in] csr_indptr : CSR row index pointer array
* @param[in] csr_indices : CSR column indices array
* @param[in] csr_data : CSR data array
* @param[in] lda : Leading dimension (used for col-major only)
* @param[out] out : Dense output array of size nrows * ncols
* @param[in] stream : Cuda stream for ordering events
* @param[in] row_major : Is row-major output desired?
*/
template <typename value_idx, typename value_t>
void csr_to_dense(cusparseHandle_t handle,
value_idx nrows,
value_idx ncols,
value_idx nnz,
const value_idx* csr_indptr,
const value_idx* csr_indices,
const value_t* csr_data,
value_idx lda,
value_t* out,
cudaStream_t stream,
bool row_major = true)
{
detail::csr_to_dense<value_idx, value_t>(
handle, nrows, ncols, nnz, csr_indptr, csr_indices, csr_data, lda, out, stream, row_major);
}
}; // end NAMESPACE convert
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/convert | rapidsai_public_repos/raft/cpp/include/raft/sparse/convert/detail/csr.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cusparse_handle.hpp>
#include <raft/core/resources.hpp>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/detail/utils.h>
#include <raft/sparse/linalg/degree.cuh>
#include <raft/sparse/op/row_op.cuh>
namespace raft {
namespace sparse {
namespace convert {
namespace detail {
template <typename value_t>
void coo_to_csr(raft::resources const& handle,
const int* srcRows,
const int* srcCols,
const value_t* srcVals,
int nnz,
int m,
int* dst_offsets,
int* dstCols,
value_t* dstVals)
{
auto stream = resource::get_cuda_stream(handle);
auto cusparseHandle = resource::get_cusparse_handle(handle);
rmm::device_uvector<int> dstRows(nnz, stream);
RAFT_CUDA_TRY(
cudaMemcpyAsync(dstRows.data(), srcRows, sizeof(int) * nnz, cudaMemcpyDeviceToDevice, stream));
RAFT_CUDA_TRY(
cudaMemcpyAsync(dstCols, srcCols, sizeof(int) * nnz, cudaMemcpyDeviceToDevice, stream));
auto buffSize = raft::sparse::detail::cusparsecoosort_bufferSizeExt(
cusparseHandle, m, m, nnz, srcRows, srcCols, stream);
rmm::device_uvector<char> pBuffer(buffSize, stream);
rmm::device_uvector<int> P(nnz, stream);
RAFT_CUSPARSE_TRY(cusparseCreateIdentityPermutation(cusparseHandle, nnz, P.data()));
raft::sparse::detail::cusparsecoosortByRow(
cusparseHandle, m, m, nnz, dstRows.data(), dstCols, P.data(), pBuffer.data(), stream);
raft::sparse::detail::cusparsegthr(cusparseHandle, nnz, srcVals, dstVals, P.data(), stream);
raft::sparse::detail::cusparsecoo2csr(
cusparseHandle, dstRows.data(), nnz, m, dst_offsets, stream);
RAFT_CUDA_TRY(cudaDeviceSynchronize());
}
/**
* @brief Generate the row indices array for a sorted COO matrix
*
* @param rows: COO rows array
* @param nnz: size of COO rows array
* @param row_ind: output row indices array
* @param m: number of rows in dense matrix
* @param stream: cuda stream to use
*/
template <typename T>
void sorted_coo_to_csr(const T* rows, int nnz, T* row_ind, int m, cudaStream_t stream)
{
rmm::device_uvector<T> row_counts(m, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(row_counts.data(), 0, m * sizeof(T), stream));
linalg::coo_degree(rows, nnz, row_counts.data(), stream);
// create csr compressed row index from row counts
thrust::device_ptr<T> row_counts_d = thrust::device_pointer_cast(row_counts.data());
thrust::device_ptr<T> c_ind_d = thrust::device_pointer_cast(row_ind);
exclusive_scan(rmm::exec_policy(stream), row_counts_d, row_counts_d + m, c_ind_d);
}
}; // end NAMESPACE detail
}; // end NAMESPACE convert
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/convert | rapidsai_public_repos/raft/cpp/include/raft/sparse/convert/detail/coo.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/detail/utils.h>
namespace raft {
namespace sparse {
namespace convert {
namespace detail {
template <typename value_idx = int, int TPB_X = 32>
RAFT_KERNEL csr_to_coo_kernel(const value_idx* row_ind,
value_idx m,
value_idx* coo_rows,
value_idx nnz)
{
// row-based matrix 1 thread per row
value_idx row = (blockIdx.x * TPB_X) + threadIdx.x;
if (row < m) {
value_idx start_idx = row_ind[row];
value_idx stop_idx = get_stop_idx(row, m, nnz, row_ind);
for (value_idx i = start_idx; i < stop_idx; i++)
coo_rows[i] = row;
}
}
/**
* @brief Convert a CSR row_ind array to a COO rows array
* @param row_ind: Input CSR row_ind array
* @param m: size of row_ind array
* @param coo_rows: Output COO row array
* @param nnz: size of output COO row array
* @param stream: cuda stream to use
*/
template <typename value_idx = int, int TPB_X = 32>
void csr_to_coo(
const value_idx* row_ind, value_idx m, value_idx* coo_rows, value_idx nnz, cudaStream_t stream)
{
// @TODO: Use cusparse for this.
dim3 grid(raft::ceildiv(m, (value_idx)TPB_X), 1, 1);
dim3 blk(TPB_X, 1, 1);
csr_to_coo_kernel<value_idx, TPB_X><<<grid, blk, 0, stream>>>(row_ind, m, coo_rows, nnz);
RAFT_CUDA_TRY(cudaGetLastError());
}
}; // end NAMESPACE detail
}; // end NAMESPACE convert
}; // end NAMESPACE sparse
}; // end NAMESPACE raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/convert | rapidsai_public_repos/raft/cpp/include/raft/sparse/convert/detail/adj_to_csr.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cooperative_groups.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/device_atomics.cuh>
#include <raft/util/vectorized.cuh>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace sparse {
namespace convert {
namespace detail {
// Threads per block in adj_to_csr_kernel.
static const constexpr int adj_to_csr_tpb = 512;
/**
* @brief Convert dense adjacency matrix into unsorted CSR format.
*
* The adj_to_csr kernel converts a boolean adjacency matrix into CSR
* format. High performance comes at the cost of non-deterministic output: the
* column indices are not guaranteed to be stored in order.
*
* The kernel has been optimized to handle matrices that are non-square, for
* instance subsets of a full adjacency matrix. In practice, these matrices can
* be very wide and not very tall. In principle, each row is assigned to one
* block. If there are more SMs than rows, multiple blocks operate on a single
* row. To enable cooperation between these blocks, each row is provided a
* counter where the current output index can be cooperatively (atomically)
* incremented. As a result, the order of the output indices is not guaranteed.
*
* @param[in] adj A num_rows x num_cols boolean matrix in contiguous row-major
* format.
* @param[in] row_ind An array of length num_rows that indicates at which index
* a row starts in out_col_ind. Equivalently, it is the
* exclusive scan of the number of non-zeros in each row of
* adj.
* @param[in] num_rows Number of rows of adj.
* @param[in] num_cols Number of columns of adj.
* @param[in,out] row_counters A temporary zero-initialized array of length num_rows.
* @param[out] out_col_ind An array containing the column indices of the
* non-zero values in `adj`. Size should be at least
* the number of non-zeros in `adj`.
*/
template <typename index_t>
RAFT_KERNEL __launch_bounds__(adj_to_csr_tpb)
adj_to_csr_kernel(const bool* adj, // row-major adjacency matrix
const index_t* row_ind, // precomputed row indices
index_t num_rows, // # rows of adj
index_t num_cols, // # cols of adj
index_t* row_counters, // pre-allocated (zeroed) atomic counters
index_t* out_col_ind // output column indices
)
{
const int chunk_size = 16;
typedef raft::TxN_t<bool, chunk_size> chunk_bool;
for (index_t i = blockIdx.y; i < num_rows; i += gridDim.y) {
// Load row information
index_t row_base = row_ind[i];
index_t* row_count = row_counters + i;
const bool* row = adj + i * num_cols;
// Peeling: process the first j0 elements that are not aligned to a chunk_size-byte
// boundary.
index_t j0 = (chunk_size - (((uintptr_t)(const void*)row) % chunk_size)) % chunk_size;
j0 = min(j0, num_cols);
if (threadIdx.x < j0 && blockIdx.x == 0) {
if (row[threadIdx.x]) { out_col_ind[row_base + atomicIncWarp(row_count)] = threadIdx.x; }
}
// Process the rest of the row in chunk_size byte chunks starting at j0.
// This is a grid-stride loop.
index_t j = j0 + chunk_size * (blockIdx.x * blockDim.x + threadIdx.x);
for (; j + chunk_size - 1 < num_cols; j += chunk_size * (blockDim.x * gridDim.x)) {
chunk_bool chunk;
chunk.load(row, j);
for (int k = 0; k < chunk_size; ++k) {
if (chunk.val.data[k]) { out_col_ind[row_base + atomicIncWarp(row_count)] = j + k; }
}
}
// Remainder: process the last j1 bools in the row individually.
index_t j1 = (num_cols - j0) % chunk_size;
if (threadIdx.x < j1 && blockIdx.x == 0) {
int j = num_cols - j1 + threadIdx.x;
if (row[j]) { out_col_ind[row_base + atomicIncWarp(row_count)] = j; }
}
}
}
/**
* @brief Converts a boolean adjacency matrix into unsorted CSR format.
*
* The conversion supports non-square matrices.
*
* @tparam index_t Indexing arithmetic type
*
* @param[in] handle RAFT handle
* @param[in] adj A num_rows x num_cols boolean matrix in contiguous row-major
* format.
* @param[in] row_ind An array of length num_rows that indicates at which index
* a row starts in out_col_ind. Equivalently, it is the
* exclusive scan of the number of non-zeros in each row of
* adj.
* @param[in] num_rows Number of rows of adj.
* @param[in] num_cols Number of columns of adj.
* @param tmp A pre-allocated array of size num_rows.
* @param[out] out_col_ind An array containing the column indices of the
* non-zero values in adj. Size should be at least the
* number of non-zeros in adj.
*/
template <typename index_t = int>
void adj_to_csr(raft::resources const& handle,
const bool* adj, // row-major adjacency matrix
const index_t* row_ind, // precomputed row indices
index_t num_rows, // # rows of adj
index_t num_cols, // # cols of adj
index_t* tmp, // pre-allocated atomic counters
index_t* out_col_ind // output column indices
)
{
auto stream = resource::get_cuda_stream(handle);
// Check inputs and return early if possible.
if (num_rows == 0 || num_cols == 0) { return; }
RAFT_EXPECTS(tmp != nullptr, "adj_to_csr: tmp workspace may not be null.");
// Zero-fill a temporary vector that is be used by the kernel to keep track of
// the number of entries added to a row.
RAFT_CUDA_TRY(cudaMemsetAsync(tmp, 0, num_rows * sizeof(index_t), stream));
// Split the grid in the row direction (since each row can be processed
// independently). If the maximum number of active blocks (num_sms *
// occupancy) exceeds the number of rows, assign multiple blocks to a single
// row.
int dev_id, sm_count, blocks_per_sm;
cudaGetDevice(&dev_id);
cudaDeviceGetAttribute(&sm_count, cudaDevAttrMultiProcessorCount, dev_id);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&blocks_per_sm, adj_to_csr_kernel<index_t>, adj_to_csr_tpb, 0);
index_t max_active_blocks = sm_count * blocks_per_sm;
index_t blocks_per_row = raft::ceildiv(max_active_blocks, num_rows);
index_t grid_rows = raft::ceildiv(max_active_blocks, blocks_per_row);
dim3 block(adj_to_csr_tpb, 1);
dim3 grid(blocks_per_row, grid_rows);
adj_to_csr_kernel<index_t>
<<<grid, block, 0, stream>>>(adj, row_ind, num_rows, num_cols, tmp, out_col_ind);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}; // end NAMESPACE detail
}; // end NAMESPACE convert
}; // end NAMESPACE sparse
}; // end NAMESPACE raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/sparse/convert | rapidsai_public_repos/raft/cpp/include/raft/sparse/convert/detail/dense.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/sparse/detail/cusparse_wrappers.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <raft/sparse/detail/utils.h>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace sparse {
namespace convert {
namespace detail {
template <typename value_t>
RAFT_KERNEL csr_to_dense_warp_per_row_kernel(
int n_cols, const value_t* csrVal, const int* csrRowPtr, const int* csrColInd, value_t* a)
{
int row = blockIdx.x;
int tid = threadIdx.x;
int colStart = csrRowPtr[row];
int colEnd = csrRowPtr[row + 1];
int rowNnz = colEnd - colStart;
for (int i = tid; i < rowNnz; i += blockDim.x) {
int colIdx = colStart + i;
if (colIdx < colEnd) {
int col = csrColInd[colIdx];
a[row * n_cols + col] = csrVal[colIdx];
}
}
}
/**
* Convert CSR arrays to a dense matrix in either row-
* or column-major format. A custom kernel is used when
* row-major output is desired since cusparse does not
* output row-major.
* @tparam value_idx : data type of the CSR index arrays
* @tparam value_t : data type of the CSR value array
* @param[in] handle : cusparse handle for conversion
* @param[in] nrows : number of rows in CSR
* @param[in] ncols : number of columns in CSR
* @param[in] nnz : the number of nonzeros in CSR
* @param[in] csr_indptr : CSR row index pointer array
* @param[in] csr_indices : CSR column indices array
* @param[in] csr_data : CSR data array
* @param[in] lda : Leading dimension (used for col-major only)
* @param[out] out : Dense output array of size nrows * ncols
* @param[in] stream : Cuda stream for ordering events
* @param[in] row_major : Is row-major output desired?
*/
template <typename value_idx, typename value_t>
void csr_to_dense(cusparseHandle_t handle,
value_idx nrows,
value_idx ncols,
value_idx nnz,
const value_idx* csr_indptr,
const value_idx* csr_indices,
const value_t* csr_data,
value_idx lda,
value_t* out,
cudaStream_t stream,
bool row_major = true)
{
if (!row_major) {
/**
* If we need col-major, use cusparse.
*/
cusparseMatDescr_t out_mat;
RAFT_CUSPARSE_TRY(cusparseCreateMatDescr(&out_mat));
RAFT_CUSPARSE_TRY(cusparseSetMatIndexBase(out_mat, CUSPARSE_INDEX_BASE_ZERO));
RAFT_CUSPARSE_TRY(cusparseSetMatType(out_mat, CUSPARSE_MATRIX_TYPE_GENERAL));
size_t buffer_size;
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecsr2dense_buffersize(handle,
nrows,
ncols,
nnz,
out_mat,
csr_data,
csr_indptr,
csr_indices,
out,
lda,
&buffer_size,
stream));
rmm::device_uvector<char> buffer(buffer_size, stream);
RAFT_CUSPARSE_TRY(raft::sparse::detail::cusparsecsr2dense(handle,
nrows,
ncols,
nnz,
out_mat,
csr_data,
csr_indptr,
csr_indices,
out,
lda,
buffer.data(),
stream));
RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroyMatDescr(out_mat));
} else {
int blockdim = block_dim(ncols);
RAFT_CUDA_TRY(cudaMemsetAsync(out, 0, nrows * ncols * sizeof(value_t), stream));
csr_to_dense_warp_per_row_kernel<<<nrows, blockdim, 0, stream>>>(
ncols, csr_data, csr_indptr, csr_indices, out);
}
}
}; // namespace detail
}; // end NAMESPACE convert
}; // end NAMESPACE sparse
}; // end NAMESPACE raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/random/sample_without_replacement.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/rng_impl.cuh"
#include "rng_state.hpp"
#include <cassert>
#include <optional>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <type_traits>
#include <variant>
namespace raft::random {
namespace sample_without_replacement_impl {
template <typename T>
struct weight_alias {};
template <>
struct weight_alias<std::nullopt_t> {
using type = double;
};
template <typename ElementType, typename IndexType>
struct weight_alias<std::optional<raft::device_vector_view<ElementType, IndexType>>> {
using type = typename raft::device_vector_view<ElementType, IndexType>::value_type;
};
template <typename T>
using weight_t = typename weight_alias<T>::type;
} // namespace sample_without_replacement_impl
/**
* \defgroup sample_without_replacement Sampling without Replacement
* @{
*/
/**
* @brief Sample the input vector without replacement, optionally based on the
* input weight vector for each element in the array.
*
* The implementation is based on the `one-pass sampling` algorithm described in
* ["Accelerating weighted random sampling without
* replacement,"](https://www.ethz.ch/content/dam/ethz/special-interest/baug/ivt/ivt-dam/vpl/reports/1101-1200/ab1141.pdf)
* a technical report by Kirill Mueller.
*
* If no input weight vector is provided, then input elements will be
* sampled uniformly. Otherwise, the elements sampled from the input
* vector will always appear in increasing order of their weights as
* computed using the exponential distribution. So, if you are
* particular about the order (for e.g., array permutations), then
* this might not be the right choice.
*
* @tparam DataT type of each element of the input array @c in
* @tparam IdxT type of the dimensions of the arrays; output index type
* @tparam WeightsVectorType std::optional<raft::device_vector_view<const weight_type, IdxT>> of
* each elements of the weights array @c weights_opt
* @tparam OutIndexVectorType std::optional<raft::device_vector_view<IdxT, IdxT>> of output indices
* @c outIdx_opt
*
* @note Please do not specify template parameters explicitly,
* as the compiler can deduce them from the arguments.
*
* @param[in] handle RAFT handle containing (among other resources)
* the CUDA stream on which to run.
* @param[inout] rng_state Pseudorandom number generator state.
* @param[in] in Input vector to be sampled.
* @param[in] weights_opt std::optional weights vector.
* If not provided, uniform sampling will be used.
* @param[out] out Vector of samples from the input vector.
* @param[out] outIdx_opt std::optional vector of the indices
* sampled from the input array.
*
* @pre The number of samples `out.extent(0)`
* is less than or equal to the number of inputs `in.extent(0)`.
*
* @pre The number of weights `wts.extent(0)`
* equals the number of inputs `in.extent(0)`.
*/
template <typename DataT, typename IdxT, typename WeightsVectorType, class OutIndexVectorType>
void sample_without_replacement(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<const DataT, IdxT> in,
WeightsVectorType&& weights_opt,
raft::device_vector_view<DataT, IdxT> out,
OutIndexVectorType&& outIdx_opt)
{
using weight_type = sample_without_replacement_impl::weight_t<
std::remove_const_t<std::remove_reference_t<WeightsVectorType>>>;
std::optional<raft::device_vector_view<const weight_type, IdxT>> wts =
std::forward<WeightsVectorType>(weights_opt);
std::optional<raft::device_vector_view<IdxT, IdxT>> outIdx =
std::forward<OutIndexVectorType>(outIdx_opt);
static_assert(std::is_integral<IdxT>::value, "IdxT must be an integral type.");
const IdxT sampledLen = out.extent(0);
const IdxT len = in.extent(0);
RAFT_EXPECTS(sampledLen <= len,
"sampleWithoutReplacement: "
"sampledLen (out.extent(0)) must be <= len (in.extent(0))");
RAFT_EXPECTS(len == 0 || in.data_handle() != nullptr,
"sampleWithoutReplacement: "
"If in.extents(0) != 0, then in.data_handle() must be nonnull");
RAFT_EXPECTS(sampledLen == 0 || out.data_handle() != nullptr,
"sampleWithoutReplacement: "
"If out.extents(0) != 0, then out.data_handle() must be nonnull");
const bool outIdx_has_value = outIdx.has_value();
if (outIdx_has_value) {
RAFT_EXPECTS((*outIdx).extent(0) == sampledLen,
"sampleWithoutReplacement: "
"If outIdx is provided, its extent(0) must equal out.extent(0)");
}
IdxT* outIdx_ptr = outIdx_has_value ? (*outIdx).data_handle() : nullptr;
const bool wts_has_value = wts.has_value();
if (wts_has_value) {
RAFT_EXPECTS((*wts).extent(0) == len,
"sampleWithoutReplacement: "
"If wts is provided, its extent(0) must equal in.extent(0)");
}
const weight_type* wts_ptr = wts_has_value ? (*wts).data_handle() : nullptr;
detail::sampleWithoutReplacement(rng_state,
out.data_handle(),
outIdx_ptr,
in.data_handle(),
wts_ptr,
sampledLen,
len,
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `sample_without_replacement` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
*
* Please see above for documentation of `sample_without_replacement`.
*/
template <typename... Args, typename = std::enable_if_t<sizeof...(Args) == 5>>
void sample_without_replacement(Args... args)
{
sample_without_replacement(std::forward<Args>(args)..., std::nullopt);
}
/** @} */
} // end namespace raft::random | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/random/make_regression.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Adapted from scikit-learn
* https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
*/
#ifndef __MAKE_REGRESSION_H
#define __MAKE_REGRESSION_H
#pragma once
#include <algorithm>
#include <optional>
#include <raft/core/mdarray.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include "detail/make_regression.cuh"
namespace raft::random {
/**
* @brief GPU-equivalent of sklearn.datasets.make_regression as documented at:
* https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html
*
* @tparam DataT Scalar type
* @tparam IdxT Index type
*
* @param[in] handle RAFT handle
* @param[out] out Row-major (samples, features) matrix to store
* the problem data
* @param[out] values Row-major (samples, targets) matrix to store
* the values for the regression problem
* @param[in] n_rows Number of samples
* @param[in] n_cols Number of features
* @param[in] n_informative Number of informative features (non-zero
* coefficients)
* @param[in] stream CUDA stream
* @param[out] coef Row-major (features, targets) matrix to store
* the coefficients used to generate the values
* for the regression problem. If nullptr is
* given, nothing will be written
* @param[in] n_targets Number of targets (generated values per sample)
* @param[in] bias A scalar that will be added to the values
* @param[in] effective_rank The approximate rank of the data matrix (used
* to create correlations in the data). -1 is the
* code to use well-conditioned data
* @param[in] tail_strength The relative importance of the fat noisy tail
* of the singular values profile if
* effective_rank is not -1
* @param[in] noise Standard deviation of the Gaussian noise
* applied to the output
* @param[in] shuffle Shuffle the samples and the features
* @param[in] seed Seed for the random number generator
* @param[in] type Random generator type
*/
template <typename DataT, typename IdxT>
void make_regression(raft::resources const& handle,
DataT* out,
DataT* values,
IdxT n_rows,
IdxT n_cols,
IdxT n_informative,
cudaStream_t stream,
DataT* coef = nullptr,
IdxT n_targets = (IdxT)1,
DataT bias = (DataT)0.0,
IdxT effective_rank = (IdxT)-1,
DataT tail_strength = (DataT)0.5,
DataT noise = (DataT)0.0,
bool shuffle = true,
uint64_t seed = 0ULL,
GeneratorType type = GenPC)
{
detail::make_regression_caller(handle,
out,
values,
n_rows,
n_cols,
n_informative,
stream,
coef,
n_targets,
bias,
effective_rank,
tail_strength,
noise,
shuffle,
seed,
type);
}
/**
* @defgroup make_regression Generate Dataset for Regression Model
* @{
*/
/**
* @brief GPU-equivalent of sklearn.datasets.make_regression as documented at:
* https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html
*
* @tparam DataT Scalar type
* @tparam IdxT Index type
*
* @param[in] handle RAFT handle
* @param[out] out Row-major (samples, features) matrix to store
* the problem data
* @param[out] values Row-major (samples, targets) matrix to store
* the values for the regression problem
* @param[in] n_informative Number of informative features (non-zero
* coefficients)
* @param[out] coef If present, a row-major (features, targets) matrix
* to store the coefficients used to generate the values
* for the regression problem
* @param[in] bias A scalar that will be added to the values
* @param[in] effective_rank The approximate rank of the data matrix (used
* to create correlations in the data). -1 is the
* code to use well-conditioned data
* @param[in] tail_strength The relative importance of the fat noisy tail
* of the singular values profile if
* effective_rank is not -1
* @param[in] noise Standard deviation of the Gaussian noise
* applied to the output
* @param[in] shuffle Shuffle the samples and the features
* @param[in] seed Seed for the random number generator
* @param[in] type Random generator type
*/
template <typename DataT, typename IdxT>
void make_regression(raft::resources const& handle,
raft::device_matrix_view<DataT, IdxT, raft::row_major> out,
raft::device_matrix_view<DataT, IdxT, raft::row_major> values,
IdxT n_informative,
std::optional<raft::device_matrix_view<DataT, IdxT, raft::row_major>> coef,
DataT bias = DataT{},
IdxT effective_rank = static_cast<IdxT>(-1),
DataT tail_strength = DataT{0.5},
DataT noise = DataT{},
bool shuffle = true,
uint64_t seed = 0ULL,
GeneratorType type = GenPC)
{
const auto n_samples = out.extent(0);
assert(values.extent(0) == n_samples);
const auto n_features = out.extent(1);
const auto n_targets = values.extent(1);
const bool have_coef = coef.has_value();
if (have_coef) {
const auto coef_ref = *coef;
assert(coef_ref.extent(0) == n_features);
assert(coef_ref.extent(1) == n_targets);
}
DataT* coef_ptr = have_coef ? (*coef).data_handle() : nullptr;
detail::make_regression_caller(handle,
out.data_handle(),
values.data_handle(),
n_samples,
n_features,
n_informative,
resource::get_cuda_stream(handle),
coef_ptr,
n_targets,
bias,
effective_rank,
tail_strength,
noise,
shuffle,
seed,
type);
}
/** @} */ // end group make_regression
} // namespace raft::random
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/random/rng.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/rng_impl.cuh"
#include "detail/rng_impl_deprecated.cuh" // necessary for now (to be removed)
#include "rng_state.hpp"
#include <cassert>
#include <optional>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <type_traits>
#include <variant>
namespace raft::random {
/**
* \defgroup univariate_random_sampling Univariate random sampling
* @{
*/
/**
* @brief Generate uniformly distributed numbers in the given range
*
* @tparam OutputValueType Data type of output random number
* @tparam Index Data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out the output array
* @param[in] start start of the range
* @param[in] end end of the range
*/
template <typename OutputValueType, typename IndexType>
void uniform(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType start,
OutputValueType end)
{
detail::uniform(
rng_state, out.data_handle(), out.extent(0), start, end, resource::get_cuda_stream(handle));
}
/**
* @}
*/
/**
* @brief Legacy overload of `uniform` taking raw pointers
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr the output array
* @param[in] len the number of elements in the output
* @param[in] start start of the range
* @param[in] end end of the range
*/
template <typename OutType, typename LenType = int>
void uniform(raft::resources const& handle,
RngState& rng_state,
OutType* ptr,
LenType len,
OutType start,
OutType end)
{
detail::uniform(rng_state, ptr, len, start, end, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate uniformly distributed integers in the given range
*
* @tparam OutputValueType Integral type; value type of the output vector
* @tparam IndexType Type used to represent length of the output vector
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out the output vector of random numbers
* @param[in] start start of the range
* @param[in] end end of the range
*/
template <typename OutputValueType, typename IndexType>
void uniformInt(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType start,
OutputValueType end)
{
static_assert(
std::is_same<OutputValueType, typename std::remove_cv<OutputValueType>::type>::value,
"uniformInt: The output vector must be a view of nonconst, "
"so that we can write to it.");
static_assert(std::is_integral<OutputValueType>::value,
"uniformInt: The elements of the output vector must have integral type.");
detail::uniformInt(
rng_state, out.data_handle(), out.extent(0), start, end, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `uniformInt`
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr the output array
* @param[in] len the number of elements in the output
* @param[in] start start of the range
* @param[in] end end of the range
*/
template <typename OutType, typename LenType = int>
void uniformInt(raft::resources const& handle,
RngState& rng_state,
OutType* ptr,
LenType len,
OutType start,
OutType end)
{
detail::uniformInt(rng_state, ptr, len, start, end, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate normal distributed numbers
* with a given mean and standard deviation
*
* @tparam OutputValueType data type of output random number
* @tparam IndexType data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out the output array
* @param[in] mu mean of the distribution
* @param[in] sigma std-dev of the distribution
*/
template <typename OutputValueType, typename IndexType>
void normal(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType mu,
OutputValueType sigma)
{
detail::normal(
rng_state, out.data_handle(), out.extent(0), mu, sigma, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `normal`.
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr the output array
* @param[in] len the number of elements in the output
* @param[in] mu mean of the distribution
* @param[in] sigma std-dev of the distribution
*/
template <typename OutType, typename LenType = int>
void normal(raft::resources const& handle,
RngState& rng_state,
OutType* ptr,
LenType len,
OutType mu,
OutType sigma)
{
detail::normal(rng_state, ptr, len, mu, sigma, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate normal distributed integers
*
* @tparam OutputValueType Integral type; value type of the output vector
* @tparam IndexType Integral type of the output vector's length
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out the output array
* @param[in] mu mean of the distribution
* @param[in] sigma standard deviation of the distribution
*/
template <typename OutputValueType, typename IndexType>
void normalInt(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType mu,
OutputValueType sigma)
{
static_assert(
std::is_same<OutputValueType, typename std::remove_cv<OutputValueType>::type>::value,
"normalInt: The output vector must be a view of nonconst, "
"so that we can write to it.");
static_assert(std::is_integral<OutputValueType>::value,
"normalInt: The output vector's value type must be an integer.");
detail::normalInt(
rng_state, out.data_handle(), out.extent(0), mu, sigma, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `normalInt`
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr the output array
* @param[in] len the number of elements in the output
* @param[in] mu mean of the distribution
* @param[in] sigma std-dev of the distribution
*/
template <typename IntType, typename LenType = int>
void normalInt(raft::resources const& handle,
RngState& rng_state,
IntType* ptr,
LenType len,
IntType mu,
IntType sigma)
{
detail::normalInt(rng_state, ptr, len, mu, sigma, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate normal distributed table according to the given set of
* means and scalar standard deviations.
*
* Each row in this table conforms to a normally distributed n-dim vector
* whose mean is the input vector and standard deviation is the corresponding
* vector or scalar. Correlations among the dimensions itself are assumed to
* be absent.
*
* @tparam OutputValueType data type of output random number
* @tparam IndexType data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[in] mu_vec mean vector (of length `out.extent(1)`)
* @param[in] sigma Either the standard-deviation vector
* (of length `out.extent(1)`) of each component,
* or a scalar standard deviation for all components.
* @param[out] out the output table
*/
template <typename OutputValueType, typename IndexType>
void normalTable(
raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<const OutputValueType, IndexType> mu_vec,
std::variant<raft::device_vector_view<const OutputValueType, IndexType>, OutputValueType> sigma,
raft::device_matrix_view<OutputValueType, IndexType, raft::row_major> out)
{
const OutputValueType* sigma_vec_ptr = nullptr;
OutputValueType sigma_value{};
using sigma_vec_type = raft::device_vector_view<const OutputValueType, IndexType>;
if (std::holds_alternative<sigma_vec_type>(sigma)) {
auto sigma_vec = std::get<sigma_vec_type>(sigma);
RAFT_EXPECTS(sigma_vec.extent(0) == out.extent(1),
"normalTable: The sigma vector "
"has length %zu, which does not equal the number of columns "
"in the output table %zu.",
static_cast<size_t>(sigma_vec.extent(0)),
static_cast<size_t>(out.extent(1)));
// The extra length check makes this work even if sigma_vec views a std::vector,
// where .data() need not return nullptr even if .size() is zero.
sigma_vec_ptr = sigma_vec.extent(0) == 0 ? nullptr : sigma_vec.data_handle();
} else {
sigma_value = std::get<OutputValueType>(sigma);
}
RAFT_EXPECTS(mu_vec.extent(0) == out.extent(1),
"normalTable: The mu vector "
"has length %zu, which does not equal the number of columns "
"in the output table %zu.",
static_cast<size_t>(mu_vec.extent(0)),
static_cast<size_t>(out.extent(1)));
detail::normalTable(rng_state,
out.data_handle(),
out.extent(0),
out.extent(1),
mu_vec.data_handle(),
sigma_vec_ptr,
sigma_value,
resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `normalTable`.
*
* Each row in this table conforms to a normally distributed n-dim vector
* whose mean is the input vector and standard deviation is the corresponding
* vector or scalar. Correlations among the dimensions itself are assumed to
* be absent.
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr the output table (dim = n_rows x n_cols)
* @param[in] n_rows number of rows in the table
* @param[in] n_cols number of columns in the table
* @param[in] mu_vec mean vector (dim = n_cols x 1).
* @param[in] sigma_vec std-dev vector of each component (dim = n_cols x 1). Pass
* a nullptr to use the same scalar 'sigma' across all components
* @param[in] sigma scalar sigma to be used if 'sigma_vec' is nullptr
*/
template <typename OutType, typename LenType = int>
void normalTable(raft::resources const& handle,
RngState& rng_state,
OutType* ptr,
LenType n_rows,
LenType n_cols,
const OutType* mu_vec,
const OutType* sigma_vec,
OutType sigma)
{
detail::normalTable(
rng_state, ptr, n_rows, n_cols, mu_vec, sigma_vec, sigma, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Fill a vector with the given value
*
* @tparam OutputValueType Value type of the output vector
* @tparam IndexType Integral type used to represent length of the output vector
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[in] val value with which to fill the output vector
* @param[out] out the output vector
*/
template <typename OutputValueType, typename IndexType>
void fill(raft::resources const& handle,
RngState& rng_state,
OutputValueType val,
raft::device_vector_view<OutputValueType, IndexType> out)
{
detail::fill(rng_state, out.data_handle(), out.extent(0), val, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `fill`
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr the output array
* @param[in] len the number of elements in the output
* @param[in] val value to be filled
*/
template <typename OutType, typename LenType = int>
void fill(
raft::resources const& handle, RngState& rng_state, OutType* ptr, LenType len, OutType val)
{
detail::fill(rng_state, ptr, len, val, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate bernoulli distributed boolean array
*
* @tparam OutputValueType Type of each element of the output vector;
* must be able to represent boolean values (e.g., `bool`)
* @tparam IndexType Integral type of the output vector's length
* @tparam Type Data type in which to compute the probabilities
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out the output vector
* @param[in] prob coin-toss probability for heads
*/
template <typename OutputValueType, typename IndexType, typename Type>
void bernoulli(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
Type prob)
{
detail::bernoulli(
rng_state, out.data_handle(), out.extent(0), prob, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `bernoulli`
*
* @tparam Type data type in which to compute the probabilities
* @tparam OutType output data type
* @tparam LenType data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr the output array
* @param[in] len the number of elements in the output
* @param[in] prob coin-toss probability for heads
*/
template <typename Type, typename OutType = bool, typename LenType = int>
void bernoulli(
raft::resources const& handle, RngState& rng_state, OutType* ptr, LenType len, Type prob)
{
detail::bernoulli(rng_state, ptr, len, prob, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate bernoulli distributed array and applies scale
*
* @tparam OutputValueType Data type in which to compute the probabilities
* @tparam IndexType Integral type of the output vector's length
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out the output vector
* @param[in] prob coin-toss probability for heads
* @param[in] scale scaling factor
*/
template <typename OutputValueType, typename IndexType>
void scaled_bernoulli(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType prob,
OutputValueType scale)
{
detail::scaled_bernoulli(
rng_state, out.data_handle(), out.extent(0), prob, scale, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `scaled_bernoulli`
*
* @tparam OutType data type in which to compute the probabilities
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr the output array
* @param[in] len the number of elements in the output
* @param[in] prob coin-toss probability for heads
* @param[in] scale scaling factor
*/
template <typename OutType, typename LenType = int>
void scaled_bernoulli(raft::resources const& handle,
RngState& rng_state,
OutType* ptr,
LenType len,
OutType prob,
OutType scale)
{
detail::scaled_bernoulli(rng_state, ptr, len, prob, scale, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate Gumbel distributed random numbers
*
* @tparam OutputValueType data type of output random number
* @tparam IndexType data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out output array
* @param[in] mu mean value
* @param[in] beta scale value
* @note https://en.wikipedia.org/wiki/Gumbel_distribution
*/
template <typename OutputValueType, typename IndexType = int>
void gumbel(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType mu,
OutputValueType beta)
{
detail::gumbel(
rng_state, out.data_handle(), out.extent(0), mu, beta, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `gumbel`.
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr output array
* @param[in] len number of elements in the output array
* @param[in] mu mean value
* @param[in] beta scale value
* @note https://en.wikipedia.org/wiki/Gumbel_distribution
*/
template <typename OutType, typename LenType = int>
void gumbel(raft::resources const& handle,
RngState& rng_state,
OutType* ptr,
LenType len,
OutType mu,
OutType beta)
{
detail::gumbel(rng_state, ptr, len, mu, beta, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate lognormal distributed numbers
*
* @tparam OutputValueType data type of output random number
* @tparam IndexType data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out the output array
* @param[in] mu mean of the distribution
* @param[in] sigma standard deviation of the distribution
*/
template <typename OutputValueType, typename IndexType>
void lognormal(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType mu,
OutputValueType sigma)
{
detail::lognormal(
rng_state, out.data_handle(), out.extent(0), mu, sigma, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `lognormal`.
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr the output array
* @param[in] len the number of elements in the output
* @param[in] mu mean of the distribution
* @param[in] sigma standard deviation of the distribution
*/
template <typename OutType, typename LenType = int>
void lognormal(raft::resources const& handle,
RngState& rng_state,
OutType* ptr,
LenType len,
OutType mu,
OutType sigma)
{
detail::lognormal(rng_state, ptr, len, mu, sigma, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate logistic distributed random numbers
*
* @tparam OutputValueType data type of output random number
* @tparam IndexType data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out output array
* @param[in] mu mean value
* @param[in] scale scale value
*/
template <typename OutputValueType, typename IndexType = int>
void logistic(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType mu,
OutputValueType scale)
{
detail::logistic(
rng_state, out.data_handle(), out.extent(0), mu, scale, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `logistic`.
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr output array
* @param[in] len number of elements in the output array
* @param[in] mu mean value
* @param[in] scale scale value
*/
template <typename OutType, typename LenType = int>
void logistic(raft::resources const& handle,
RngState& rng_state,
OutType* ptr,
LenType len,
OutType mu,
OutType scale)
{
detail::logistic(rng_state, ptr, len, mu, scale, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate exponentially distributed random numbers
*
* @tparam OutputValueType data type of output random number
* @tparam IndexType data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out output array
* @param[in] lambda the exponential distribution's lambda parameter
*/
template <typename OutputValueType, typename IndexType>
void exponential(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType lambda)
{
detail::exponential(
rng_state, out.data_handle(), out.extent(0), lambda, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `exponential`.
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr output array
* @param[in] len number of elements in the output array
* @param[in] lambda the exponential distribution's lambda parameter
*/
template <typename OutType, typename LenType = int>
void exponential(
raft::resources const& handle, RngState& rng_state, OutType* ptr, LenType len, OutType lambda)
{
detail::exponential(rng_state, ptr, len, lambda, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate rayleigh distributed random numbers
*
* @tparam OutputValueType data type of output random number
* @tparam IndexType data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out output array
* @param[in] sigma the distribution's sigma parameter
*/
template <typename OutputValueType, typename IndexType>
void rayleigh(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType sigma)
{
detail::rayleigh(
rng_state, out.data_handle(), out.extent(0), sigma, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `rayleigh`.
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr output array
* @param[in] len number of elements in the output array
* @param[in] sigma the distribution's sigma parameter
*/
template <typename OutType, typename LenType = int>
void rayleigh(
raft::resources const& handle, RngState& rng_state, OutType* ptr, LenType len, OutType sigma)
{
detail::rayleigh(rng_state, ptr, len, sigma, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate laplace distributed random numbers
*
* @tparam OutputValueType data type of output random number
* @tparam IndexType data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out output array
* @param[in] mu the mean
* @param[in] scale the scale
*/
template <typename OutputValueType, typename IndexType>
void laplace(raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutputValueType, IndexType> out,
OutputValueType mu,
OutputValueType scale)
{
detail::laplace(
rng_state, out.data_handle(), out.extent(0), mu, scale, resource::get_cuda_stream(handle));
}
/**
* @brief Legacy raw pointer overload of `laplace`.
*
* @tparam OutType data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] ptr output array
* @param[in] len number of elements in the output array
* @param[in] mu the mean
* @param[in] scale the scale
*/
template <typename OutType, typename LenType = int>
void laplace(raft::resources const& handle,
RngState& rng_state,
OutType* ptr,
LenType len,
OutType mu,
OutType scale)
{
detail::laplace(rng_state, ptr, len, mu, scale, resource::get_cuda_stream(handle));
}
/**
* @ingroup univariate_random_sampling
* @brief Generate random integers, where the probability of i is weights[i]/sum(weights)
*
* Usage example:
* @code{.cpp}
* #include <raft/core/device_mdarray.hpp>
* #include <raft/core/resources.hpp>
* #include <raft/random/rng.cuh>
*
* raft::resources handle;
* ...
* raft::random::RngState rng(seed);
* auto indices = raft::make_device_vector<int>(handle, n_samples);
* raft::random::discrete(handle, rng, indices.view(), weights);
* @endcode
*
* @tparam OutType integer output type
* @tparam WeightType weight type
* @tparam IndexType data type used to represent length of the arrays
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out output array
* @param[in] weights weight array
*/
template <typename OutType, typename WeightType, typename IndexType>
std::enable_if_t<std::is_integral_v<OutType>> discrete(
raft::resources const& handle,
RngState& rng_state,
raft::device_vector_view<OutType, IndexType> out,
raft::device_vector_view<const WeightType, IndexType> weights)
{
detail::discrete(rng_state,
out.data_handle(),
weights.data_handle(),
out.extent(0),
weights.extent(0),
resource::get_cuda_stream(handle));
}
/**
* @brief Legacy version of @c sample_without_replacement (see above)
* that takes raw arrays instead of device mdspan.
*
* @tparam DataT data type
* @tparam WeightsT weights type
* @tparam IdxT index type
*
* @param[in] handle raft handle for resource management
* @param[in] rng_state random number generator state
* @param[out] out output sampled array (of length 'sampledLen')
* @param[out] outIdx indices of the sampled array (of length 'sampledLen'). Pass
* a nullptr if this is not required.
* @param[in] in input array to be sampled (of length 'len')
* @param[in] wts weights array (of length 'len'). Pass a nullptr if uniform
* sampling is desired
* @param[in] sampledLen output sampled array length
* @param[in] len input array length
*/
template <typename DataT, typename WeightsT, typename IdxT = int>
void sampleWithoutReplacement(raft::resources const& handle,
RngState& rng_state,
DataT* out,
IdxT* outIdx,
const DataT* in,
const WeightsT* wts,
IdxT sampledLen,
IdxT len)
{
detail::sampleWithoutReplacement(
rng_state, out, outIdx, in, wts, sampledLen, len, resource::get_cuda_stream(handle));
}
/**
* @brief Generates the 'a' and 'b' parameters for a modulo affine
* transformation equation: `(ax + b) % n`
*
* @tparam IdxT integer type
*
* @param[in] rng_state random number generator state
* @param[in] n the modulo range
* @param[out] a slope parameter
* @param[out] b intercept parameter
*/
template <typename IdxT>
void affine_transform_params(RngState const& rng_state, IdxT n, IdxT& a, IdxT& b)
{
detail::affine_transform_params(rng_state, n, a, b);
}
///////////////////////////////////////////////////////////////////////////
// Everything below this point is deprecated and will be removed //
///////////////////////////////////////////////////////////////////////////
// without the macro, clang-format seems to go insane
#define DEPR [[deprecated("Use 'RngState' with the new flat functions instead")]]
using detail::bernoulli;
using detail::exponential;
using detail::fill;
using detail::gumbel;
using detail::laplace;
using detail::logistic;
using detail::lognormal;
using detail::normal;
using detail::normalInt;
using detail::normalTable;
using detail::rayleigh;
using detail::scaled_bernoulli;
using detail::uniform;
using detail::uniformInt;
using detail::sampleWithoutReplacement;
class DEPR Rng : public detail::RngImpl {
public:
/**
* @brief ctor
* @param _s 64b seed used to initialize the RNG
* @param _t backend device RNG generator type
* @note Refer to the `Rng::seed` method for details about seeding the engine
*/
Rng(uint64_t _s, GeneratorType _t = GenPhilox) : detail::RngImpl(_s, _t) {}
/**
* @brief Generates the 'a' and 'b' parameters for a modulo affine
* transformation equation: `(ax + b) % n`
*
* @tparam IdxT integer type
*
* @param[in] n the modulo range
* @param[out] a slope parameter
* @param[out] b intercept parameter
*/
template <typename IdxT>
void affine_transform_params(IdxT n, IdxT& a, IdxT& b)
{
detail::RngImpl::affine_transform_params(n, a, b);
}
/**
* @brief Generate uniformly distributed numbers in the given range
* @tparam Type data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param ptr the output array
* @param len the number of elements in the output
* @param start start of the range
* @param end end of the range
* @param stream stream where to launch the kernel
* @{
*/
template <typename OutType, typename LenType = int>
void uniform(OutType* ptr, LenType len, OutType start, OutType end, cudaStream_t stream)
{
detail::RngImpl::uniform(ptr, len, start, end, stream);
}
template <typename OutType, typename LenType = int>
void uniformInt(OutType* ptr, LenType len, OutType start, OutType end, cudaStream_t stream)
{
detail::RngImpl::uniformInt(ptr, len, start, end, stream);
}
/** @} */
/**
* @brief Generate normal distributed numbers
* @tparam Type data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param ptr the output array
* @param len the number of elements in the output
* @param mu mean of the distribution
* @param sigma std-dev of the distribution
* @param stream stream where to launch the kernel
* @{
*/
template <typename OutType, typename LenType = int>
void normal(OutType* ptr, LenType len, OutType mu, OutType sigma, cudaStream_t stream)
{
detail::RngImpl::normal(ptr, len, mu, sigma, stream);
}
template <typename IntType, typename LenType = int>
void normalInt(IntType* ptr, LenType len, IntType mu, IntType sigma, cudaStream_t stream)
{
detail::RngImpl::normalInt(ptr, len, mu, sigma, stream);
}
/** @} */
/**
* @brief Generate normal distributed table according to the given set of
* means and scalar standard deviations.
*
* Each row in this table conforms to a normally distributed n-dim vector
* whose mean is the input vector and standard deviation is the corresponding
* vector or scalar. Correlations among the dimensions itself is assumed to
* be absent.
*
* @tparam Type data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param ptr the output table (dim = n_rows x n_cols)
* @param n_rows number of rows in the table
* @param n_cols number of columns in the table
* @param mu_vec mean vector (dim = n_cols x 1).
* @param sigma_vec std-dev vector of each component (dim = n_cols x 1). Pass
* a nullptr to use the same scalar 'sigma' across all components
* @param sigma scalar sigma to be used if 'sigma_vec' is nullptr
* @param stream stream where to launch the kernel
*/
template <typename OutType, typename LenType = int>
void normalTable(OutType* ptr,
LenType n_rows,
LenType n_cols,
const OutType* mu_vec,
const OutType* sigma_vec,
OutType sigma,
cudaStream_t stream)
{
detail::RngImpl::normalTable(ptr, n_rows, n_cols, mu_vec, sigma_vec, sigma, stream);
}
/**
* @brief Fill an array with the given value
* @tparam Type data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param ptr the output array
* @param len the number of elements in the output
* @param val value to be filled
* @param stream stream where to launch the kernel
*/
template <typename OutType, typename LenType = int>
void fill(OutType* ptr, LenType len, OutType val, cudaStream_t stream)
{
detail::RngImpl::fill(ptr, len, val, stream);
}
/**
* @brief Generate bernoulli distributed boolean array
*
* @tparam Type data type in which to compute the probabilities
* @tparam OutType output data type
* @tparam LenType data type used to represent length of the arrays
*
* @param[out] ptr the output array
* @param[in] len the number of elements in the output
* @param[in] prob coin-toss probability for heads
* @param[in] stream stream where to launch the kernel
*/
template <typename Type, typename OutType = bool, typename LenType = int>
void bernoulli(OutType* ptr, LenType len, Type prob, cudaStream_t stream)
{
detail::RngImpl::bernoulli(ptr, len, prob, stream);
}
/**
* @brief Generate bernoulli distributed array and applies scale
* @tparam Type data type in which to compute the probabilities
* @tparam LenType data type used to represent length of the arrays
* @param ptr the output array
* @param len the number of elements in the output
* @param prob coin-toss probability for heads
* @param scale scaling factor
* @param stream stream where to launch the kernel
*/
template <typename OutType, typename LenType = int>
void scaled_bernoulli(OutType* ptr, LenType len, OutType prob, OutType scale, cudaStream_t stream)
{
detail::RngImpl::scaled_bernoulli(ptr, len, prob, scale, stream);
}
/**
* @brief Generate Gumbel distributed random numbers
* @tparam Type data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param ptr output array
* @param len number of elements in the output array
* @param mu mean value
* @param beta scale value
* @param stream stream where to launch the kernel
* @note https://en.wikipedia.org/wiki/Gumbel_distribution
*/
template <typename OutType, typename LenType = int>
void gumbel(OutType* ptr, LenType len, OutType mu, OutType beta, cudaStream_t stream)
{
detail::RngImpl::gumbel(ptr, len, mu, beta, stream);
}
/**
* @brief Generate lognormal distributed numbers
* @tparam Type data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param ptr the output array
* @param len the number of elements in the output
* @param mu mean of the distribution
* @param sigma std-dev of the distribution
* @param stream stream where to launch the kernel
*/
template <typename OutType, typename LenType = int>
void lognormal(OutType* ptr, LenType len, OutType mu, OutType sigma, cudaStream_t stream)
{
detail::RngImpl::lognormal(ptr, len, mu, sigma, stream);
}
/**
* @brief Generate logistic distributed random numbers
* @tparam Type data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param ptr output array
* @param len number of elements in the output array
* @param mu mean value
* @param scale scale value
* @param stream stream where to launch the kernel
*/
template <typename OutType, typename LenType = int>
void logistic(OutType* ptr, LenType len, OutType mu, OutType scale, cudaStream_t stream)
{
detail::RngImpl::logistic(ptr, len, mu, scale, stream);
}
/**
* @brief Generate exponentially distributed random numbers
* @tparam Type data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param ptr output array
* @param len number of elements in the output array
* @param lambda the lambda
* @param stream stream where to launch the kernel
*/
template <typename OutType, typename LenType = int>
void exponential(OutType* ptr, LenType len, OutType lambda, cudaStream_t stream)
{
detail::RngImpl::exponential(ptr, len, lambda, stream);
}
/**
* @brief Generate rayleigh distributed random numbers
* @tparam Type data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param ptr output array
* @param len number of elements in the output array
* @param sigma the sigma
* @param stream stream where to launch the kernel
*/
template <typename OutType, typename LenType = int>
void rayleigh(OutType* ptr, LenType len, OutType sigma, cudaStream_t stream)
{
detail::RngImpl::rayleigh(ptr, len, sigma, stream);
}
/**
* @brief Generate laplace distributed random numbers
* @tparam Type data type of output random number
* @tparam LenType data type used to represent length of the arrays
* @param ptr output array
* @param len number of elements in the output array
* @param mu the mean
* @param scale the scale
* @param stream stream where to launch the kernel
*/
template <typename OutType, typename LenType = int>
void laplace(OutType* ptr, LenType len, OutType mu, OutType scale, cudaStream_t stream)
{
detail::RngImpl::laplace(ptr, len, mu, scale, stream);
}
void advance(uint64_t max_streams, uint64_t max_calls_per_subsequence)
{
detail::RngImpl::advance(max_streams, max_calls_per_subsequence);
}
/**
* @brief Sample the input array without replacement, optionally based on the
* input weight vector for each element in the array
*
* Implementation here is based on the `one-pass sampling` algo described here:
* https://www.ethz.ch/content/dam/ethz/special-interest/baug/ivt/ivt-dam/vpl/reports/1101-1200/ab1141.pdf
*
* @note In the sampled array the elements which are picked will always appear
* in the increasing order of their weights as computed using the exponential
* distribution. So, if you're particular about the order (for eg. array
* permutations), then this might not be the right choice!
*
* @tparam DataT data type
* @tparam WeightsT weights type
* @tparam IdxT index type
* @param handle
* @param out output sampled array (of length 'sampledLen')
* @param outIdx indices of the sampled array (of length 'sampledLen'). Pass
* a nullptr if this is not required.
* @param in input array to be sampled (of length 'len')
* @param wts weights array (of length 'len'). Pass a nullptr if uniform
* sampling is desired
* @param sampledLen output sampled array length
* @param len input array length
* @param stream cuda stream
*/
template <typename DataT, typename WeightsT, typename IdxT = int>
void sampleWithoutReplacement(raft::resources const& handle,
DataT* out,
IdxT* outIdx,
const DataT* in,
const WeightsT* wts,
IdxT sampledLen,
IdxT len,
cudaStream_t stream)
{
detail::RngImpl::sampleWithoutReplacement(
handle, out, outIdx, in, wts, sampledLen, len, stream);
}
};
#undef DEPR
}; // end namespace raft::random
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/random/rmat_rectangular_generator.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/rmat_rectangular_generator.cuh"
#include <raft/core/resources.hpp>
namespace raft::random {
/**
* @defgroup rmat RMAT Rectangular Generator
* @{
*/
/**
* @brief Generate a bipartite RMAT graph for a rectangular adjacency matrix.
*
* This is the most general of several overloads of `rmat_rectangular_gen`
* in this file, and thus has the most detailed documentation.
*
* @tparam IdxT Type of each node index
* @tparam ProbT Data type used for probability distributions (either fp32 or fp64)
*
* @param[in] handle RAFT handle, containing the CUDA stream on which to schedule work
* @param[in] r underlying state of the random generator. Especially useful when
* one wants to call this API for multiple times in order to generate
* a larger graph. For that case, just create this object with the
* initial seed once and after every call continue to pass the same
* object for the successive calls.
* @param[out] out Generated edgelist [on device], packed in array-of-structs fashion.
* In each row, the first element is the source node id,
* and the second element is the destination node id.
* @param[out] out_src Source node id's [on device].
* @param[out] out_dst Destination node id's [on device]. `out_src` and `out_dst`
* together form the struct-of-arrays representation of the same
* output data as `out`.
* @param[in] theta distribution of each quadrant at each level of resolution.
* Since these are probabilities, each of the 2x2 matrices for
* each level of the RMAT must sum to one. [on device]
* [dim = max(r_scale, c_scale) x 2 x 2]. Of course, it is assumed
* that each of the group of 2 x 2 numbers all sum up to 1.
* @param[in] r_scale 2^r_scale represents the number of source nodes
* @param[in] c_scale 2^c_scale represents the number of destination nodes
*
* @pre `out.extent(0) == 2 * `out_src.extent(0)` is `true`
* @pre `out_src.extent(0) == out_dst.extent(0)` is `true`
*
* We call the `r_scale != c_scale` case the "rectangular adjacency matrix" case
* (in other words, generating bipartite graphs). In this case, at `depth >= r_scale`,
* the distribution is assumed to be:
*
* `[theta[4 * depth] + theta[4 * depth + 2], theta[4 * depth + 1] + theta[4 * depth + 3]; 0, 0]`.
*
* Then for `depth >= c_scale`, the distribution is assumed to be:
*
* `[theta[4 * depth] + theta[4 * depth + 1], 0; theta[4 * depth + 2] + theta[4 * depth + 3], 0]`.
*
* @note This can generate duplicate edges and self-loops. It is the responsibility of the
* caller to clean them up accordingly.
*
* @note This also only generates directed graphs. If undirected graphs are needed, then a
* separate post-processing step is expected to be done by the caller.
*
* @{
*/
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen(
raft::resources const& handle,
raft::random::RngState& r,
raft::device_vector_view<const ProbT, IdxT> theta,
raft::device_mdspan<IdxT, raft::extents<IdxT, raft::dynamic_extent, 2>, raft::row_major> out,
raft::device_vector_view<IdxT, IdxT> out_src,
raft::device_vector_view<IdxT, IdxT> out_dst,
IdxT r_scale,
IdxT c_scale)
{
detail::rmat_rectangular_gen_output<IdxT> output(out, out_src, out_dst);
detail::rmat_rectangular_gen_impl(handle, r, theta, output, r_scale, c_scale);
}
/**
* @brief Overload of `rmat_rectangular_gen` that only generates
* the struct-of-arrays (two vectors) output representation.
*
* This overload only generates the struct-of-arrays (two vectors)
* output representation: output vector `out_src` of source node id's,
* and output vector `out_dst` of destination node id's.
*
* @pre `out_src.extent(0) == out_dst.extent(0)` is `true`
*/
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen(raft::resources const& handle,
raft::random::RngState& r,
raft::device_vector_view<const ProbT, IdxT> theta,
raft::device_vector_view<IdxT, IdxT> out_src,
raft::device_vector_view<IdxT, IdxT> out_dst,
IdxT r_scale,
IdxT c_scale)
{
detail::rmat_rectangular_gen_output<IdxT> output(out_src, out_dst);
detail::rmat_rectangular_gen_impl(handle, r, theta, output, r_scale, c_scale);
}
/**
* @brief Overload of `rmat_rectangular_gen` that only generates
* the array-of-structs (one vector) output representation.
*
* This overload only generates the array-of-structs (one vector)
* output representation: a single output vector `out`,
* where in each row, the first element is the source node id,
* and the second element is the destination node id.
*/
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen(
raft::resources const& handle,
raft::random::RngState& r,
raft::device_vector_view<const ProbT, IdxT> theta,
raft::device_mdspan<IdxT, raft::extents<IdxT, raft::dynamic_extent, 2>, raft::row_major> out,
IdxT r_scale,
IdxT c_scale)
{
detail::rmat_rectangular_gen_output<IdxT> output(out);
detail::rmat_rectangular_gen_impl(handle, r, theta, output, r_scale, c_scale);
}
/**
* @brief Overload of `rmat_rectangular_gen` that assumes the same
* a, b, c, d probability distributions across all the scales,
* and takes all three output vectors
* (`out` with the array-of-structs output representation,
* and `out_src` and `out_dst` with the struct-of-arrays
* output representation).
*
* `a`, `b, and `c` effectively replace the above overloads'
* `theta` parameter.
*
* @pre `out.extent(0) == 2 * `out_src.extent(0)` is `true`
* @pre `out_src.extent(0) == out_dst.extent(0)` is `true`
*/
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen(
raft::resources const& handle,
raft::random::RngState& r,
raft::device_mdspan<IdxT, raft::extents<IdxT, raft::dynamic_extent, 2>, raft::row_major> out,
raft::device_vector_view<IdxT, IdxT> out_src,
raft::device_vector_view<IdxT, IdxT> out_dst,
ProbT a,
ProbT b,
ProbT c,
IdxT r_scale,
IdxT c_scale)
{
detail::rmat_rectangular_gen_output<IdxT> output(out, out_src, out_dst);
detail::rmat_rectangular_gen_impl(handle, r, output, a, b, c, r_scale, c_scale);
}
/**
* @brief Overload of `rmat_rectangular_gen` that assumes the same
* a, b, c, d probability distributions across all the scales,
* and takes only two output vectors
* (the struct-of-arrays output representation).
*
* `a`, `b, and `c` effectively replace the above overloads'
* `theta` parameter.
*
* @pre `out_src.extent(0) == out_dst.extent(0)` is `true`
*/
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen(raft::resources const& handle,
raft::random::RngState& r,
raft::device_vector_view<IdxT, IdxT> out_src,
raft::device_vector_view<IdxT, IdxT> out_dst,
ProbT a,
ProbT b,
ProbT c,
IdxT r_scale,
IdxT c_scale)
{
detail::rmat_rectangular_gen_output<IdxT> output(out_src, out_dst);
detail::rmat_rectangular_gen_impl(handle, r, output, a, b, c, r_scale, c_scale);
}
/**
* @brief Overload of `rmat_rectangular_gen` that assumes the same
* a, b, c, d probability distributions across all the scales,
* and takes only one output vector
* (the array-of-structs output representation).
*
* `a`, `b, and `c` effectively replace the above overloads'
* `theta` parameter.
*/
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen(
raft::resources const& handle,
raft::random::RngState& r,
raft::device_mdspan<IdxT, raft::extents<IdxT, raft::dynamic_extent, 2>, raft::row_major> out,
ProbT a,
ProbT b,
ProbT c,
IdxT r_scale,
IdxT c_scale)
{
detail::rmat_rectangular_gen_output<IdxT> output(out);
detail::rmat_rectangular_gen_impl(handle, r, output, a, b, c, r_scale, c_scale);
}
/** @} */ // end group rmat
/**
* @brief Legacy overload of `rmat_rectangular_gen`
* taking raw arrays instead of mdspan.
*
* @tparam IdxT type of each node index
* @tparam ProbT data type used for probability distributions (either fp32 or fp64)
*
* @param[out] out generated edgelist [on device] [dim = n_edges x 2]. In each row
* the first element is the source node id, and the second element
* is the destination node id. If you don't need this output
* then pass a `nullptr` in its place.
* @param[out] out_src list of source node id's [on device] [len = n_edges]. If you
* don't need this output then pass a `nullptr` in its place.
* @param[out] out_dst list of destination node id's [on device] [len = n_edges]. If
* you don't need this output then pass a `nullptr` in its place.
* @param[in] theta distribution of each quadrant at each level of resolution.
* Since these are probabilities, each of the 2x2 matrices for
* each level of the RMAT must sum to one. [on device]
* [dim = max(r_scale, c_scale) x 2 x 2]. Of course, it is assumed
* that each of the group of 2 x 2 numbers all sum up to 1.
* @param[in] r_scale 2^r_scale represents the number of source nodes
* @param[in] c_scale 2^c_scale represents the number of destination nodes
* @param[in] n_edges number of edges to generate
* @param[in] stream cuda stream on which to schedule the work
* @param[in] r underlying state of the random generator. Especially useful when
* one wants to call this API for multiple times in order to generate
* a larger graph. For that case, just create this object with the
* initial seed once and after every call continue to pass the same
* object for the successive calls.
*/
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen(IdxT* out,
IdxT* out_src,
IdxT* out_dst,
const ProbT* theta,
IdxT r_scale,
IdxT c_scale,
IdxT n_edges,
cudaStream_t stream,
raft::random::RngState& r)
{
detail::rmat_rectangular_gen_caller(
out, out_src, out_dst, theta, r_scale, c_scale, n_edges, stream, r);
}
/**
* @brief Legacy overload of `rmat_rectangular_gen`
* taking raw arrays instead of mdspan.
* This overload assumes the same a, b, c, d probability distributions
* across all the scales.
*/
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen(IdxT* out,
IdxT* out_src,
IdxT* out_dst,
ProbT a,
ProbT b,
ProbT c,
IdxT r_scale,
IdxT c_scale,
IdxT n_edges,
cudaStream_t stream,
raft::random::RngState& r)
{
detail::rmat_rectangular_gen_caller(
out, out_src, out_dst, a, b, c, r_scale, c_scale, n_edges, stream, r);
}
/** @} */
} // end namespace raft::random
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/random/multi_variable_gaussian.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MVG_H
#define __MVG_H
#pragma once
#include "detail/multi_variable_gaussian.cuh"
#include <raft/core/resources.hpp>
#include <raft/random/random_types.hpp>
namespace raft::random {
/**
* \defgroup multi_variable_gaussian Compute multi-variable Gaussian
* @{
*/
template <typename ValueType>
void multi_variable_gaussian(raft::resources const& handle,
rmm::mr::device_memory_resource& mem_resource,
std::optional<raft::device_vector_view<const ValueType, int>> x,
raft::device_matrix_view<ValueType, int, raft::col_major> P,
raft::device_matrix_view<ValueType, int, raft::col_major> X,
const multi_variable_gaussian_decomposition_method method)
{
detail::compute_multi_variable_gaussian_impl(handle, mem_resource, x, P, X, method);
}
template <typename ValueType>
void multi_variable_gaussian(raft::resources const& handle,
std::optional<raft::device_vector_view<const ValueType, int>> x,
raft::device_matrix_view<ValueType, int, raft::col_major> P,
raft::device_matrix_view<ValueType, int, raft::col_major> X,
const multi_variable_gaussian_decomposition_method method)
{
rmm::mr::device_memory_resource* mem_resource_ptr = rmm::mr::get_current_device_resource();
RAFT_EXPECTS(mem_resource_ptr != nullptr,
"compute_multi_variable_gaussian: "
"rmm::mr::get_current_device_resource() returned null; "
"please report this bug to the RAPIDS RAFT developers.");
detail::compute_multi_variable_gaussian_impl(handle, *mem_resource_ptr, x, P, X, method);
}
/** @} */
}; // end of namespace raft::random
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/random/rng_device.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __RNG_DEVICE_H
#define __RNG_DEVICE_H
#pragma once
#include "detail/rng_device.cuh"
#include "rng_state.hpp"
namespace raft {
namespace random {
using detail::DeviceState;
using detail::PCGenerator;
using detail::PhiloxGenerator;
using detail::BernoulliDistParams;
using detail::ExponentialDistParams;
using detail::GumbelDistParams;
using detail::InvariantDistParams;
using detail::LaplaceDistParams;
using detail::LogisticDistParams;
using detail::LogNormalDistParams;
using detail::NormalDistParams;
using detail::NormalIntDistParams;
using detail::NormalTableDistParams;
using detail::RayleighDistParams;
using detail::SamplingParams;
using detail::ScaledBernoulliDistParams;
using detail::UniformDistParams;
using detail::UniformIntDistParams;
// Not strictly needed due to C++ ADL rules
using detail::custom_next;
// this is necessary again since all arguments are primitive types
using detail::box_muller_transform;
}; // end namespace random
}; // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/random/permute.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __PERMUTE_H
#define __PERMUTE_H
#pragma once
#include "detail/permute.cuh"
#include <optional>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <type_traits>
namespace raft::random {
namespace permute_impl {
template <typename T, typename InputOutputValueType, typename IdxType, typename Layout>
struct perms_out_view {};
template <typename InputOutputValueType, typename IdxType, typename Layout>
struct perms_out_view<std::nullopt_t, InputOutputValueType, IdxType, Layout> {
// permsOut won't have a value anyway,
// so we can pick any integral value type we want.
using type = raft::device_vector_view<IdxType, IdxType>;
};
template <typename PermutationIndexType,
typename InputOutputValueType,
typename IdxType,
typename Layout>
struct perms_out_view<std::optional<raft::device_vector_view<PermutationIndexType, IdxType>>,
InputOutputValueType,
IdxType,
Layout> {
using type = raft::device_vector_view<PermutationIndexType, IdxType>;
};
template <typename T, typename InputOutputValueType, typename IdxType, typename Layout>
using perms_out_view_t = typename perms_out_view<T, InputOutputValueType, IdxType, Layout>::type;
} // namespace permute_impl
/**
* \defgroup permute Permutation
* @{
*/
/**
* @brief Randomly permute the rows of the input matrix.
*
* We do not support in-place permutation, so that we can compute
* in parallel without race conditions. This function is useful
* for shuffling input data sets in machine learning algorithms.
*
* @tparam InputOutputValueType Type of each element of the input matrix,
* and the type of each element of the output matrix (if provided)
* @tparam IntType Integer type of each element of `permsOut`
* @tparam IdxType Integer type of the extents of the mdspan parameters
* @tparam Layout Either `raft::row_major` or `raft::col_major`
*
* @param[in] handle RAFT handle containing the CUDA stream
* on which to run.
* @param[in] in input matrix
* @param[out] permsOut If provided, the indices of the permutation.
* @param[out] out If provided, the output matrix, containing the
* permuted rows of the input matrix `in`. (Not providing this
* is only useful if you provide `permsOut`.)
*
* @pre If `permsOut.has_value()` is `true`,
* then `(*permsOut).extent(0) == in.extent(0)` is `true`.
*
* @pre If `out.has_value()` is `true`,
* then `(*out).extents() == in.extents()` is `true`.
*
* @note This is NOT a uniform permutation generator!
* It only generates a small fraction of all possible random permutations.
* If your application needs a high-quality permutation generator,
* then we recommend Knuth Shuffle.
*/
template <typename InputOutputValueType, typename IntType, typename IdxType, typename Layout>
void permute(raft::resources const& handle,
raft::device_matrix_view<const InputOutputValueType, IdxType, Layout> in,
std::optional<raft::device_vector_view<IntType, IdxType>> permsOut,
std::optional<raft::device_matrix_view<InputOutputValueType, IdxType, Layout>> out)
{
static_assert(std::is_integral_v<IntType>,
"permute: The type of each element "
"of permsOut (if provided) must be an integral type.");
static_assert(std::is_integral_v<IdxType>,
"permute: The index type "
"of each mdspan argument must be an integral type.");
constexpr bool is_row_major = std::is_same_v<Layout, raft::row_major>;
constexpr bool is_col_major = std::is_same_v<Layout, raft::col_major>;
static_assert(is_row_major || is_col_major,
"permute: Layout must be either "
"raft::row_major or raft::col_major (or one of their aliases)");
const bool permsOut_has_value = permsOut.has_value();
const bool out_has_value = out.has_value();
RAFT_EXPECTS(!permsOut_has_value || (*permsOut).extent(0) == in.extent(0),
"permute: If 'permsOut' is provided, then its extent(0) "
"must equal the number of rows of the input matrix 'in'.");
RAFT_EXPECTS(!out_has_value || (*out).extents() == in.extents(),
"permute: If 'out' is provided, then both its extents "
"must match the extents of the input matrix 'in'.");
IntType* permsOut_ptr = permsOut_has_value ? (*permsOut).data_handle() : nullptr;
InputOutputValueType* out_ptr = out_has_value ? (*out).data_handle() : nullptr;
if (permsOut_ptr != nullptr || out_ptr != nullptr) {
const IdxType N = in.extent(0);
const IdxType D = in.extent(1);
detail::permute<InputOutputValueType, IntType, IdxType>(permsOut_ptr,
out_ptr,
in.data_handle(),
D,
N,
is_row_major,
resource::get_cuda_stream(handle));
}
}
/**
* @brief Overload of `permute` that compiles if users pass in `std::nullopt`
* for either or both of `permsOut` and `out`.
*/
template <typename InputOutputValueType,
typename IdxType,
typename Layout,
typename PermsOutType,
typename OutType>
void permute(raft::resources const& handle,
raft::device_matrix_view<const InputOutputValueType, IdxType, Layout> in,
PermsOutType&& permsOut,
OutType&& out)
{
// If PermsOutType is std::optional<device_vector_view<T, IdxType>>
// for some T, then that type T need not be related to any of the
// other template parameters. Thus, we have to deduce it specially.
using perms_out_view_type = permute_impl::
perms_out_view_t<std::decay_t<PermsOutType>, InputOutputValueType, IdxType, Layout>;
using out_view_type = raft::device_matrix_view<InputOutputValueType, IdxType, Layout>;
static_assert(std::is_same_v<std::decay_t<OutType>, std::nullopt_t> ||
std::is_same_v<std::decay_t<OutType>, std::optional<out_view_type>>,
"permute: The type of 'out' must be either std::optional<"
"raft::device_matrix_view<InputOutputViewType, IdxType, Layout>>, "
"or std::nullopt.");
std::optional<perms_out_view_type> permsOut_arg = std::forward<PermsOutType>(permsOut);
std::optional<out_view_type> out_arg = std::forward<OutType>(out);
permute(handle, in, permsOut_arg, out_arg);
}
/** @} */
/**
* @brief Legacy overload of `permute` that takes raw arrays instead of mdspan.
*
* @tparam Type Type of each element of the input matrix to be permuted
* @tparam IntType Integer type of each element of the permsOut matrix
* @tparam IdxType Integer type of the dimensions of the matrices
* @tparam TPB threads per block (do not use any value other than the default)
*
* @param[out] perms If nonnull, the indices of the permutation
* @param[out] out If nonnull, the output matrix, containing the
* permuted rows of the input matrix @c in. (Not providing this
* is only useful if you provide @c perms.)
* @param[in] in input matrix
* @param[in] D number of columns in the matrices
* @param[in] N number of rows in the matrices
* @param[in] rowMajor true if the matrices are row major,
* false if they are column major
* @param[in] stream CUDA stream on which to run
*/
template <typename Type, typename IntType = int, typename IdxType = int, int TPB = 256>
void permute(IntType* perms,
Type* out,
const Type* in,
IntType D,
IntType N,
bool rowMajor,
cudaStream_t stream)
{
detail::permute<Type, IntType, IdxType, TPB>(perms, out, in, D, N, rowMajor, stream);
}
}; // end namespace raft::random
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/random/rng_state.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __RNG_STATE_H
#define __RNG_STATE_H
#pragma once
#include <cstdint>
namespace raft {
namespace random {
/** all different generator types used */
enum GeneratorType {
/** curand-based philox generator */
GenPhilox = 0,
/** Permuted Congruential Generator */
GenPC
};
/**
* The RNG state used to keep RNG state around on the host.
*/
struct RngState {
explicit RngState(uint64_t _seed) : seed(_seed) {}
RngState(uint64_t _seed, GeneratorType _type) : seed(_seed), type(_type) {}
RngState(uint64_t _seed, uint64_t _base_subsequence, GeneratorType _type)
: seed(_seed), base_subsequence(_base_subsequence), type(_type)
{
}
uint64_t seed{0};
uint64_t base_subsequence{0};
/**
* The generator type. PCGenerator has been extensively tested and is faster
* than Philox, thus we use it as the default.
*/
GeneratorType type{GeneratorType::GenPC};
void advance(uint64_t max_uniq_subsequences_used,
uint64_t max_numbers_generated_per_subsequence = 0)
{
base_subsequence += max_uniq_subsequences_used;
}
};
}; // end namespace random
}; // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/random/random_types.hpp | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft::random {
/**
* \ingroup multi_variable_gaussian
* @{
*/
/**
* @brief Matrix decomposition method for `multi_variable_gaussian` to use.
*
* `multi_variable_gaussian` can use any of the following methods.
*
* - `CHOLESKY`: Uses Cholesky decomposition on the normal equations.
* This may be faster than the other two methods, but less accurate.
*
* - `JACOBI`: Uses the singular value decomposition (SVD) computed with
* cuSOLVER's gesvdj algorithm, which is based on the Jacobi method
* (sweeps of plane rotations). This exposes more parallelism
* for small and medium size matrices than the QR option below.
*
* - `QR`: Uses the SVD computed with cuSOLVER's gesvd algorithm,
* which is based on the QR algorithm.
*/
enum class multi_variable_gaussian_decomposition_method { CHOLESKY, JACOBI, QR };
/** @} */
}; // end of namespace raft::random
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/random/make_blobs.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MAKE_BLOBS_H
#define __MAKE_BLOBS_H
#pragma once
#include "detail/make_blobs.cuh"
#include <optional>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
namespace raft::random {
/**
* @brief GPU-equivalent of sklearn.datasets.make_blobs
*
* @tparam DataT output data type
* @tparam IdxT indexing arithmetic type
*
* @param[out] out generated data [on device]
* [dim = n_rows x n_cols]
* @param[out] labels labels for the generated data [on device]
* [len = n_rows]
* @param[in] n_rows number of rows in the generated data
* @param[in] n_cols number of columns in the generated data
* @param[in] n_clusters number of clusters (or classes) to generate
* @param[in] stream cuda stream to schedule the work on
* @param[in] row_major whether input `centers` and output `out`
* buffers are to be stored in row or column
* major layout
* @param[in] centers centers of each of the cluster, pass a nullptr
* if you need this also to be generated randomly
* [on device] [dim = n_clusters x n_cols]
* @param[in] cluster_std standard deviation of each cluster center,
* pass a nullptr if this is to be read from the
* `cluster_std_scalar`. [on device]
* [len = n_clusters]
* @param[in] cluster_std_scalar if 'cluster_std' is nullptr, then use this as
* the std-dev across all dimensions.
* @param[in] shuffle shuffle the generated dataset and labels
* @param[in] center_box_min min value of box from which to pick cluster
* centers. Useful only if 'centers' is nullptr
* @param[in] center_box_max max value of box from which to pick cluster
* centers. Useful only if 'centers' is nullptr
* @param[in] seed seed for the RNG
* @param[in] type RNG type
*/
template <typename DataT, typename IdxT>
void make_blobs(DataT* out,
IdxT* labels,
IdxT n_rows,
IdxT n_cols,
IdxT n_clusters,
cudaStream_t stream,
bool row_major = true,
const DataT* centers = nullptr,
const DataT* cluster_std = nullptr,
const DataT cluster_std_scalar = (DataT)1.0,
bool shuffle = true,
DataT center_box_min = (DataT)-10.0,
DataT center_box_max = (DataT)10.0,
uint64_t seed = 0ULL,
GeneratorType type = GenPC)
{
detail::make_blobs_caller(out,
labels,
n_rows,
n_cols,
n_clusters,
stream,
row_major,
centers,
cluster_std,
cluster_std_scalar,
shuffle,
center_box_min,
center_box_max,
seed,
type);
}
/**
* @defgroup make_blobs Generate Isotropic Gaussian Clusters
* @{
*/
/**
* @brief GPU-equivalent of sklearn.datasets.make_blobs
*
* @tparam DataT output data type
* @tparam IdxT indexing arithmetic type
*
* @param[in] handle raft handle for managing expensive resources
* @param[out] out generated data [on device]
* [dim = n_rows x n_cols]
* @param[out] labels labels for the generated data [on device]
* [len = n_rows]
* @param[in] n_clusters number of clusters (or classes) to generate
* @param[in] centers centers of each of the cluster, pass a nullptr
* if you need this also to be generated randomly
* [on device] [dim = n_clusters x n_cols]
* @param[in] cluster_std standard deviation of each cluster center,
* pass a nullptr if this is to be read from the
* `cluster_std_scalar`. [on device]
* [len = n_clusters]
* @param[in] cluster_std_scalar if 'cluster_std' is nullptr, then use this as
* the std-dev across all dimensions.
* @param[in] shuffle shuffle the generated dataset and labels
* @param[in] center_box_min min value of box from which to pick cluster
* centers. Useful only if 'centers' is nullptr
* @param[in] center_box_max max value of box from which to pick cluster
* centers. Useful only if 'centers' is nullptr
* @param[in] seed seed for the RNG
* @param[in] type RNG type
*/
template <typename DataT, typename IdxT, typename layout>
void make_blobs(
raft::resources const& handle,
raft::device_matrix_view<DataT, IdxT, layout> out,
raft::device_vector_view<IdxT, IdxT> labels,
IdxT n_clusters = 5,
std::optional<raft::device_matrix_view<DataT, IdxT, layout>> centers = std::nullopt,
std::optional<raft::device_vector_view<DataT, IdxT>> const cluster_std = std::nullopt,
const DataT cluster_std_scalar = (DataT)1.0,
bool shuffle = true,
DataT center_box_min = (DataT)-10.0,
DataT center_box_max = (DataT)10.0,
uint64_t seed = 0ULL,
GeneratorType type = GenPC)
{
if (centers.has_value()) {
RAFT_EXPECTS(centers.value().extent(0) == (IdxT)n_clusters,
"n_centers must equal size of centers");
}
if (cluster_std.has_value()) {
RAFT_EXPECTS(cluster_std.value().extent(0) == (IdxT)n_clusters,
"n_centers must equal size of cluster_std");
}
RAFT_EXPECTS(out.extent(0) == labels.extent(0),
"Number of labels must equal the number of row in output matrix");
RAFT_EXPECTS(out.is_exhaustive(), "Output must be contiguous.");
bool row_major = std::is_same<layout, raft::layout_c_contiguous>::value;
auto prm_centers = centers.has_value() ? centers.value().data_handle() : nullptr;
auto prm_cluster_std = cluster_std.has_value() ? cluster_std.value().data_handle() : nullptr;
detail::make_blobs_caller(out.data_handle(),
labels.data_handle(),
(IdxT)out.extent(0),
(IdxT)out.extent(1),
n_clusters,
resource::get_cuda_stream(handle),
row_major,
prm_centers,
prm_cluster_std,
cluster_std_scalar,
shuffle,
center_box_min,
center_box_max,
seed,
type);
}
/** @} */ // end group make_blobs
} // end namespace raft::random
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/detail/make_regression.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Adapted from scikit-learn
* https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
*/
#pragma once
#include <algorithm>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/add.cuh>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/init.cuh>
#include <raft/linalg/qr.cuh>
#include <raft/linalg/transpose.cuh>
#include <raft/matrix/diagonal.cuh>
#include <raft/random/permute.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::random {
namespace detail {
/* Internal auxiliary function to help build the singular profile */
template <typename DataT, typename IdxT>
RAFT_KERNEL _singular_profile_kernel(DataT* out, IdxT n, DataT tail_strength, IdxT rank)
{
IdxT tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
DataT sval = static_cast<DataT>(tid) / rank;
DataT low_rank = ((DataT)1.0 - tail_strength) * raft::exp(-sval * sval);
DataT tail = tail_strength * raft::exp((DataT)-0.1 * sval);
out[tid] = low_rank + tail;
}
}
/* Internal auxiliary function to generate a low-rank matrix */
template <typename DataT, typename IdxT>
static void _make_low_rank_matrix(raft::resources const& handle,
DataT* out,
IdxT n_rows,
IdxT n_cols,
IdxT effective_rank,
DataT tail_strength,
raft::random::RngState& r,
cudaStream_t stream)
{
cublasHandle_t cublas_handle = resource::get_cublas_handle(handle);
IdxT n = std::min(n_rows, n_cols);
// Generate random (ortho normal) vectors with QR decomposition
rmm::device_uvector<DataT> rd_mat_0(n_rows * n, stream);
rmm::device_uvector<DataT> rd_mat_1(n_cols * n, stream);
normal(r, rd_mat_0.data(), n_rows * n, (DataT)0.0, (DataT)1.0, stream);
normal(r, rd_mat_1.data(), n_cols * n, (DataT)0.0, (DataT)1.0, stream);
rmm::device_uvector<DataT> q0(n_rows * n, stream);
rmm::device_uvector<DataT> q1(n_cols * n, stream);
raft::linalg::qrGetQ(handle, rd_mat_0.data(), q0.data(), n_rows, n, stream);
raft::linalg::qrGetQ(handle, rd_mat_1.data(), q1.data(), n_cols, n, stream);
// Build the singular profile by assembling signal and noise components
rmm::device_uvector<DataT> singular_vec(n, stream);
_singular_profile_kernel<<<raft::ceildiv<IdxT>(n, 256), 256, 0, stream>>>(
singular_vec.data(), n, tail_strength, effective_rank);
RAFT_CUDA_TRY(cudaPeekAtLastError());
rmm::device_uvector<DataT> singular_mat(n * n, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(singular_mat.data(), 0, n * n * sizeof(DataT), stream));
raft::matrix::set_diagonal(handle,
make_device_vector_view<const DataT, IdxT>(singular_vec.data(), n),
make_device_matrix_view<DataT, IdxT>(singular_mat.data(), n, n));
// Generate the column-major matrix
rmm::device_uvector<DataT> temp_q0s(n_rows * n, stream);
rmm::device_uvector<DataT> temp_out(n_rows * n_cols, stream);
DataT alpha = 1.0, beta = 0.0;
raft::linalg::detail::cublasgemm(cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n_rows,
n,
n,
&alpha,
q0.data(),
n_rows,
singular_mat.data(),
n,
&beta,
temp_q0s.data(),
n_rows,
stream);
raft::linalg::detail::cublasgemm(cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
n_rows,
n_cols,
n,
&alpha,
temp_q0s.data(),
n_rows,
q1.data(),
n_cols,
&beta,
temp_out.data(),
n_rows,
stream);
// Transpose from column-major to row-major
raft::linalg::transpose(handle, temp_out.data(), out, n_rows, n_cols, stream);
}
/* Internal auxiliary function to permute rows in the given matrix according
* to a given permutation vector */
template <typename DataT, typename IdxT>
RAFT_KERNEL _gather2d_kernel(
DataT* out, const DataT* in, const IdxT* perms, IdxT n_rows, IdxT n_cols)
{
IdxT tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n_rows) {
const DataT* row_in = in + n_cols * perms[tid];
DataT* row_out = out + n_cols * tid;
for (IdxT i = 0; i < n_cols; i++) {
row_out[i] = row_in[i];
}
}
}
template <typename DataT, typename IdxT>
void make_regression_caller(raft::resources const& handle,
DataT* out,
DataT* values,
IdxT n_rows,
IdxT n_cols,
IdxT n_informative,
cudaStream_t stream,
DataT* coef = nullptr,
IdxT n_targets = (IdxT)1,
DataT bias = (DataT)0.0,
IdxT effective_rank = (IdxT)-1,
DataT tail_strength = (DataT)0.5,
DataT noise = (DataT)0.0,
bool shuffle = true,
uint64_t seed = 0ULL,
raft::random::GeneratorType type = raft::random::GenPC)
{
n_informative = std::min(n_informative, n_cols);
cublasHandle_t cublas_handle = resource::get_cublas_handle(handle);
cublasSetPointerMode(cublas_handle, CUBLAS_POINTER_MODE_HOST);
raft::random::RngState r(seed, type);
if (effective_rank < 0) {
// Randomly generate a well conditioned input set
normal(r, out, n_rows * n_cols, (DataT)0.0, (DataT)1.0, stream);
} else {
// Randomly generate a low rank, fat tail input set
_make_low_rank_matrix(handle, out, n_rows, n_cols, effective_rank, tail_strength, r, stream);
}
// Use the right output buffer for the values
rmm::device_uvector<DataT> tmp_values(0, stream);
DataT* _values;
if (shuffle) {
tmp_values.resize(n_rows * n_targets, stream);
_values = tmp_values.data();
} else {
_values = values;
}
// Create a column-major matrix of output values only if it has more
// than 1 column
rmm::device_uvector<DataT> values_col(0, stream);
DataT* _values_col;
if (n_targets > 1) {
values_col.resize(n_rows * n_targets, stream);
_values_col = values_col.data();
} else {
_values_col = _values;
}
// Use the right buffer for the coefficients
rmm::device_uvector<DataT> tmp_coef(0, stream);
DataT* _coef;
if (coef != nullptr && !shuffle) {
_coef = coef;
} else {
tmp_coef.resize(n_cols * n_targets, stream);
_coef = tmp_coef.data();
}
// Generate a ground truth model with only n_informative features
uniform(r, _coef, n_informative * n_targets, (DataT)1.0, (DataT)100.0, stream);
if (coef && n_informative != n_cols) {
RAFT_CUDA_TRY(cudaMemsetAsync(_coef + n_informative * n_targets,
0,
(n_cols - n_informative) * n_targets * sizeof(DataT),
stream));
}
// Compute the output values
DataT alpha = (DataT)1.0, beta = (DataT)0.0;
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_handle,
CUBLAS_OP_T,
CUBLAS_OP_T,
n_rows,
n_targets,
n_informative,
&alpha,
out,
n_cols,
_coef,
n_targets,
&beta,
_values_col,
n_rows,
stream));
// Transpose the values from column-major to row-major if needed
if (n_targets > 1) {
raft::linalg::transpose(handle, _values_col, _values, n_rows, n_targets, stream);
}
if (bias != 0.0) {
// Add bias
raft::linalg::addScalar(_values, _values, bias, n_rows * n_targets, stream);
}
rmm::device_uvector<DataT> white_noise(0, stream);
if (noise != 0.0) {
// Add white noise
white_noise.resize(n_rows * n_targets, stream);
normal(r, white_noise.data(), n_rows * n_targets, (DataT)0.0, noise, stream);
raft::linalg::add(_values, _values, white_noise.data(), n_rows * n_targets, stream);
}
if (shuffle) {
rmm::device_uvector<DataT> tmp_out(n_rows * n_cols, stream);
rmm::device_uvector<IdxT> perms_samples(n_rows, stream);
rmm::device_uvector<IdxT> perms_features(n_cols, stream);
constexpr IdxT Nthreads = 256;
// Shuffle the samples from out to tmp_out
raft::random::permute<DataT, IdxT, IdxT>(
perms_samples.data(), tmp_out.data(), out, n_cols, n_rows, true, stream);
IdxT nblks_rows = raft::ceildiv<IdxT>(n_rows, Nthreads);
_gather2d_kernel<<<nblks_rows, Nthreads, 0, stream>>>(
values, _values, perms_samples.data(), n_rows, n_targets);
RAFT_CUDA_TRY(cudaPeekAtLastError());
// Shuffle the features from tmp_out to out
raft::random::permute<DataT, IdxT, IdxT>(
perms_features.data(), out, tmp_out.data(), n_rows, n_cols, false, stream);
// Shuffle the coefficients accordingly
if (coef != nullptr) {
IdxT nblks_cols = raft::ceildiv<IdxT>(n_cols, Nthreads);
_gather2d_kernel<<<nblks_cols, Nthreads, 0, stream>>>(
coef, _coef, perms_features.data(), n_cols, n_targets);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
}
} // namespace detail
} // namespace raft::random
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/detail/curand_wrappers.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <curand.h>
namespace raft::random {
namespace detail {
// @todo: We probably want to scrape through and replace any consumers of
// these wrappers with our RNG
/** check for curand runtime API errors and assert accordingly */
#define CURAND_CHECK(call) \
do { \
curandStatus_t status = call; \
ASSERT(status == CURAND_STATUS_SUCCESS, "FAIL: curand-call='%s'. Reason:%d\n", #call, status); \
} while (0)
/**
* @defgroup normal curand normal random number generation operations
* @{
*/
template <typename T>
curandStatus_t curandGenerateNormal(
curandGenerator_t generator, T* outputPtr, size_t n, T mean, T stddev);
template <>
inline curandStatus_t curandGenerateNormal(
curandGenerator_t generator, float* outputPtr, size_t n, float mean, float stddev)
{
return curandGenerateNormal(generator, outputPtr, n, mean, stddev);
}
template <>
inline curandStatus_t curandGenerateNormal(
curandGenerator_t generator, double* outputPtr, size_t n, double mean, double stddev)
{
return curandGenerateNormalDouble(generator, outputPtr, n, mean, stddev);
}
/** @} */
}; // end namespace detail
}; // end namespace raft::random | 0 |
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/detail/rmat_rectangular_generator_types.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/random/rng_device.cuh>
#include <raft/random/rng_state.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <optional>
#include <variant>
namespace raft {
namespace random {
namespace detail {
/**
* @brief Implementation detail for checking output vector parameter(s)
* of `raft::random::rmat_rectangular_gen`.
*
* `raft::random::rmat_rectangular_gen` lets users specify
* output vector(s) in three different ways.
*
* 1. One vector: `out`, an "array-of-structs" representation
* of the edge list.
*
* 2. Two vectors: `out_src` and `out_dst`, together forming
* a "struct of arrays" representation of the edge list.
*
* 3. Three vectors: `out`, `out_src`, and `out_dst`.
* `out` is as in (1),
* and `out_src` and `out_dst` are as in (2).
*
* This class prevents users from doing anything other than that,
* and makes it easier for the three cases to share a common implementation.
* It also prevents duplication of run-time vector length checking
* (`out` must have twice the number of elements as `out_src` and `out_dst`,
* and `out_src` and `out_dst` must have the same length).
*
* @tparam IdxT Type of each node index; must be integral.
*
* The following examples show how to create an output parameter.
*
* @code
* rmat_rectangular_gen_output<IdxT> output1(out);
* rmat_rectangular_gen_output<IdxT> output2(out_src, out_dst);
* rmat_rectangular_gen_output<IdxT> output3(out, out_src, out_dst);
* @endcode
*/
template <typename IdxT>
class rmat_rectangular_gen_output {
public:
using out_view_type =
raft::device_mdspan<IdxT, raft::extents<IdxT, raft::dynamic_extent, 2>, raft::row_major>;
using out_src_view_type = raft::device_vector_view<IdxT, IdxT>;
using out_dst_view_type = raft::device_vector_view<IdxT, IdxT>;
private:
class output_pair {
public:
output_pair(const out_src_view_type& src, const out_dst_view_type& dst) : src_(src), dst_(dst)
{
RAFT_EXPECTS(src.extent(0) == dst.extent(0),
"rmat_rectangular_gen: "
"out_src.extent(0) = %zu != out_dst.extent(0) = %zu",
static_cast<std::size_t>(src.extent(0)),
static_cast<std::size_t>(dst.extent(0)));
}
out_src_view_type out_src_view() const { return src_; }
out_dst_view_type out_dst_view() const { return dst_; }
IdxT number_of_edges() const { return src_.extent(0); }
bool empty() const { return src_.extent(0) == 0 && dst_.extent(0) == 0; }
private:
out_src_view_type src_;
out_dst_view_type dst_;
};
class output_triple {
public:
output_triple(const out_view_type& out,
const out_src_view_type& src,
const out_dst_view_type& dst)
: out_(out), pair_(src, dst)
{
RAFT_EXPECTS(out.extent(0) == IdxT(2) * dst.extent(0),
"rmat_rectangular_gen: "
"out.extent(0) = %zu != 2 * out_dst.extent(0) = %zu",
static_cast<std::size_t>(out.extent(0)),
static_cast<std::size_t>(IdxT(2) * dst.extent(0)));
}
out_view_type out_view() const { return out_; }
out_src_view_type out_src_view() const { return pair_.out_src_view(); }
out_dst_view_type out_dst_view() const { return pair_.out_dst_view(); }
IdxT number_of_edges() const { return pair_.number_of_edges(); }
bool empty() const { return out_.extent(0) == 0 && pair_.empty(); }
private:
out_view_type out_;
output_pair pair_;
};
public:
/**
* @brief You're not allowed to construct this with no vectors.
*/
rmat_rectangular_gen_output() = delete;
/**
* @brief Constructor taking a single vector, that packs the source
* node ids and destination node ids in array-of-structs fashion.
*
* @param[out] out Generated edgelist [on device]. In each row, the
* first element is the source node id, and the second element is
* the destination node id.
*/
rmat_rectangular_gen_output(const out_view_type& out) : data_(out) {}
/**
* @brief Constructor taking two vectors, that store the source node
* ids and the destination node ids separately, in
* struct-of-arrays fashion.
*
* @param[out] out_src Source node id's [on device] [len = n_edges].
*
* @param[out] out_dst Destination node id's [on device] [len = n_edges].
*/
rmat_rectangular_gen_output(const out_src_view_type& src, const out_dst_view_type& dst)
: data_(output_pair(src, dst))
{
}
/**
* @brief Constructor taking all three vectors.
*
* @param[out] out Generated edgelist [on device]. In each row, the
* first element is the source node id, and the second element is
* the destination node id.
*
* @param[out] out_src Source node id's [on device] [len = n_edges].
*
* @param[out] out_dst Destination node id's [on device] [len = n_edges].
*/
rmat_rectangular_gen_output(const out_view_type& out,
const out_src_view_type& src,
const out_dst_view_type& dst)
: data_(output_triple(out, src, dst))
{
}
/**
* @brief Whether the vector(s) are all length zero.
*/
bool empty() const
{
if (std::holds_alternative<out_view_type>(data_)) {
return std::get<out_view_type>(data_).extent(0) == 0;
} else if (std::holds_alternative<output_pair>(data_)) {
return std::get<output_pair>(data_).empty();
} else { // std::holds_alternative<output_triple>(data_)
return std::get<output_triple>(data_).empty();
}
}
/**
* @brief Vector for the output single edgelist; the argument given
* to the one-argument constructor, or the first argument of the
* three-argument constructor; `std::nullopt` if not provided.
*/
std::optional<out_view_type> out_view() const
{
if (std::holds_alternative<out_view_type>(data_)) {
return std::get<out_view_type>(data_);
} else if (std::holds_alternative<output_triple>(data_)) {
return std::get<output_triple>(data_).out_view();
} else { // if (std::holds_alternative<>(output_pair))
return std::nullopt;
}
}
/**
* @brief Vector for the output source edgelist; the first argument
* given to the two-argument constructor, or the second argument
* of the three-argument constructor; `std::nullopt` if not provided.
*/
std::optional<out_src_view_type> out_src_view() const
{
if (std::holds_alternative<output_pair>(data_)) {
return std::get<output_pair>(data_).out_src_view();
} else if (std::holds_alternative<output_triple>(data_)) {
return std::get<output_triple>(data_).out_src_view();
} else { // if (std::holds_alternative<out_view_type>(data_))
return std::nullopt;
}
}
/**
* @brief Vector for the output destination edgelist; the second
* argument given to the two-argument constructor, or the third
* argument of the three-argument constructor;
* `std::nullopt` if not provided.
*/
std::optional<out_dst_view_type> out_dst_view() const
{
if (std::holds_alternative<output_pair>(data_)) {
return std::get<output_pair>(data_).out_dst_view();
} else if (std::holds_alternative<output_triple>(data_)) {
return std::get<output_triple>(data_).out_dst_view();
} else { // if (std::holds_alternative<out_view_type>(data_))
return std::nullopt;
}
}
/**
* @brief Number of edges in the graph; zero if no output vector
* was provided to the constructor.
*/
IdxT number_of_edges() const
{
if (std::holds_alternative<out_view_type>(data_)) {
return std::get<out_view_type>(data_).extent(0);
} else if (std::holds_alternative<output_pair>(data_)) {
return std::get<output_pair>(data_).number_of_edges();
} else { // if (std::holds_alternative<output_triple>(data_))
return std::get<output_triple>(data_).number_of_edges();
}
}
private:
std::variant<out_view_type, output_pair, output_triple> data_;
};
} // end namespace detail
} // end namespace random
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/detail/rmat_rectangular_generator.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "rmat_rectangular_generator_types.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/random/rng_device.cuh>
#include <raft/random/rng_state.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace random {
namespace detail {
template <typename IdxT, typename ProbT>
DI void gen_and_update_bits(IdxT& src_id,
IdxT& dst_id,
ProbT a,
ProbT ab,
ProbT abc,
IdxT r_scale,
IdxT c_scale,
IdxT curr_depth,
raft::random::PCGenerator& gen)
{
bool src_bit, dst_bit;
ProbT val;
gen.next(val);
if (val <= a) {
src_bit = dst_bit = false;
} else if (val <= ab) {
src_bit = false;
dst_bit = true;
} else if (val <= abc) {
src_bit = true;
dst_bit = false;
} else {
src_bit = dst_bit = true;
}
if (curr_depth < r_scale) { src_id |= (IdxT(src_bit) << (r_scale - curr_depth - 1)); }
if (curr_depth < c_scale) { dst_id |= (IdxT(dst_bit) << (c_scale - curr_depth - 1)); }
}
template <typename IdxT>
DI void store_ids(
IdxT* out, IdxT* out_src, IdxT* out_dst, IdxT src_id, IdxT dst_id, IdxT idx, IdxT n_edges)
{
if (idx < n_edges) {
if (out != nullptr) {
// uncoalesced gmem accesses!
out[idx * 2] = src_id;
out[idx * 2 + 1] = dst_id;
}
if (out_src != nullptr) { out_src[idx] = src_id; }
if (out_dst != nullptr) { out_dst[idx] = dst_id; }
}
}
template <typename IdxT, typename ProbT>
RAFT_KERNEL rmat_gen_kernel(IdxT* out,
IdxT* out_src,
IdxT* out_dst,
const ProbT* theta,
IdxT r_scale,
IdxT c_scale,
IdxT n_edges,
IdxT max_scale,
raft::random::RngState r)
{
IdxT idx = threadIdx.x + ((IdxT)blockIdx.x * blockDim.x);
extern __shared__ ProbT s_theta[];
auto theta_len = max_scale * 2 * 2;
// load the probabilities into shared memory and then convert them into cdf's
// currently there are smem bank conflicts due to the way these are accessed
for (int i = threadIdx.x; i < theta_len; i += blockDim.x) {
s_theta[i] = theta[i];
}
__syncthreads();
for (int i = threadIdx.x; i < max_scale; i += blockDim.x) {
auto a = s_theta[4 * i];
auto b = s_theta[4 * i + 1];
auto c = s_theta[4 * i + 2];
s_theta[4 * i + 1] = a + b;
s_theta[4 * i + 2] = a + b + c;
s_theta[4 * i + 3] += a + b + c;
}
__syncthreads();
IdxT src_id{0}, dst_id{0};
raft::random::PCGenerator gen{r.seed, r.base_subsequence + idx, 0};
for (IdxT i = 0; i < max_scale; ++i) {
auto a = s_theta[i * 4], ab = s_theta[i * 4 + 1], abc = s_theta[i * 4 + 2];
gen_and_update_bits(src_id, dst_id, a, ab, abc, r_scale, c_scale, i, gen);
}
store_ids(out, out_src, out_dst, src_id, dst_id, idx, n_edges);
}
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen_caller(IdxT* out,
IdxT* out_src,
IdxT* out_dst,
const ProbT* theta,
IdxT r_scale,
IdxT c_scale,
IdxT n_edges,
cudaStream_t stream,
raft::random::RngState& r)
{
if (n_edges <= 0) return;
static constexpr int N_THREADS = 512;
auto max_scale = max(r_scale, c_scale);
size_t smem_size = sizeof(ProbT) * max_scale * 2 * 2;
auto n_blks = raft::ceildiv<IdxT>(n_edges, N_THREADS);
rmat_gen_kernel<<<n_blks, N_THREADS, smem_size, stream>>>(
out, out_src, out_dst, theta, r_scale, c_scale, n_edges, max_scale, r);
RAFT_CUDA_TRY(cudaGetLastError());
r.advance(n_edges, max_scale);
}
template <typename IdxT, typename ProbT>
RAFT_KERNEL rmat_gen_kernel(IdxT* out,
IdxT* out_src,
IdxT* out_dst,
ProbT a,
ProbT b,
ProbT c,
IdxT r_scale,
IdxT c_scale,
IdxT n_edges,
IdxT max_scale,
raft::random::RngState r)
{
IdxT idx = threadIdx.x + ((IdxT)blockIdx.x * blockDim.x);
IdxT src_id{0}, dst_id{0};
raft::random::PCGenerator gen{r.seed, r.base_subsequence + idx, 0};
auto min_scale = min(r_scale, c_scale);
IdxT i = 0;
for (; i < min_scale; ++i) {
gen_and_update_bits(src_id, dst_id, a, a + b, a + b + c, r_scale, c_scale, i, gen);
}
for (; i < r_scale; ++i) {
gen_and_update_bits(src_id, dst_id, a + b, a + b, ProbT(1), r_scale, c_scale, i, gen);
}
for (; i < c_scale; ++i) {
gen_and_update_bits(src_id, dst_id, a + c, ProbT(1), ProbT(1), r_scale, c_scale, i, gen);
}
store_ids(out, out_src, out_dst, src_id, dst_id, idx, n_edges);
}
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen_caller(IdxT* out,
IdxT* out_src,
IdxT* out_dst,
ProbT a,
ProbT b,
ProbT c,
IdxT r_scale,
IdxT c_scale,
IdxT n_edges,
cudaStream_t stream,
raft::random::RngState& r)
{
if (n_edges <= 0) return;
static constexpr int N_THREADS = 512;
auto max_scale = max(r_scale, c_scale);
auto n_blks = raft::ceildiv<IdxT>(n_edges, N_THREADS);
rmat_gen_kernel<<<n_blks, N_THREADS, 0, stream>>>(
out, out_src, out_dst, a, b, c, r_scale, c_scale, n_edges, max_scale, r);
RAFT_CUDA_TRY(cudaGetLastError());
r.advance(n_edges, max_scale);
}
/**
* @brief Implementation of `raft::random::rmat_rectangular_gen_impl`.
*
* @tparam IdxT type of each node index
* @tparam ProbT data type used for probability distributions (either fp32 or fp64)
* @param[in] handle RAFT handle, containing the CUDA stream on which to schedule work
* @param[in] r underlying state of the random generator. Especially useful when
* one wants to call this API for multiple times in order to generate
* a larger graph. For that case, just create this object with the
* initial seed once and after every call continue to pass the same
* object for the successive calls.
* @param[out] output Encapsulation of one, two, or three output vectors.
* @param[in] theta distribution of each quadrant at each level of resolution.
* Since these are probabilities, each of the 2x2 matrices for
* each level of the RMAT must sum to one. [on device]
* [dim = max(r_scale, c_scale) x 2 x 2]. Of course, it is assumed
* that each of the group of 2 x 2 numbers all sum up to 1.
* @param[in] r_scale 2^r_scale represents the number of source nodes
* @param[in] c_scale 2^c_scale represents the number of destination nodes
*/
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen_impl(raft::resources const& handle,
raft::random::RngState& r,
raft::device_vector_view<const ProbT, IdxT> theta,
raft::random::detail::rmat_rectangular_gen_output<IdxT> output,
IdxT r_scale,
IdxT c_scale)
{
static_assert(std::is_integral_v<IdxT>,
"rmat_rectangular_gen: "
"Template parameter IdxT must be an integral type");
if (output.empty()) {
return; // nothing to do; not an error
}
const IdxT expected_theta_len = IdxT(4) * (r_scale >= c_scale ? r_scale : c_scale);
RAFT_EXPECTS(theta.extent(0) == expected_theta_len,
"rmat_rectangular_gen: "
"theta.extent(0) = %zu != 2 * 2 * max(r_scale = %zu, c_scale = %zu) = %zu",
static_cast<std::size_t>(theta.extent(0)),
static_cast<std::size_t>(r_scale),
static_cast<std::size_t>(c_scale),
static_cast<std::size_t>(expected_theta_len));
auto out = output.out_view();
auto out_src = output.out_src_view();
auto out_dst = output.out_dst_view();
const bool out_has_value = out.has_value();
const bool out_src_has_value = out_src.has_value();
const bool out_dst_has_value = out_dst.has_value();
IdxT* out_ptr = out_has_value ? (*out).data_handle() : nullptr;
IdxT* out_src_ptr = out_src_has_value ? (*out_src).data_handle() : nullptr;
IdxT* out_dst_ptr = out_dst_has_value ? (*out_dst).data_handle() : nullptr;
const IdxT n_edges = output.number_of_edges();
rmat_rectangular_gen_caller(out_ptr,
out_src_ptr,
out_dst_ptr,
theta.data_handle(),
r_scale,
c_scale,
n_edges,
resource::get_cuda_stream(handle),
r);
}
/**
* @brief Overload of `rmat_rectangular_gen` that assumes the same
* a, b, c, d probability distributions across all the scales.
*
* `a`, `b, and `c` effectively replace the above overload's
* `theta` parameter.
*/
template <typename IdxT, typename ProbT>
void rmat_rectangular_gen_impl(raft::resources const& handle,
raft::random::RngState& r,
raft::random::detail::rmat_rectangular_gen_output<IdxT> output,
ProbT a,
ProbT b,
ProbT c,
IdxT r_scale,
IdxT c_scale)
{
static_assert(std::is_integral_v<IdxT>,
"rmat_rectangular_gen: "
"Template parameter IdxT must be an integral type");
if (output.empty()) {
return; // nothing to do; not an error
}
auto out = output.out_view();
auto out_src = output.out_src_view();
auto out_dst = output.out_dst_view();
const bool out_has_value = out.has_value();
const bool out_src_has_value = out_src.has_value();
const bool out_dst_has_value = out_dst.has_value();
IdxT* out_ptr = out_has_value ? (*out).data_handle() : nullptr;
IdxT* out_src_ptr = out_src_has_value ? (*out_src).data_handle() : nullptr;
IdxT* out_dst_ptr = out_dst_has_value ? (*out_dst).data_handle() : nullptr;
const IdxT n_edges = output.number_of_edges();
detail::rmat_rectangular_gen_caller(out_ptr,
out_src_ptr,
out_dst_ptr,
a,
b,
c,
r_scale,
c_scale,
n_edges,
resource::get_cuda_stream(handle),
r);
}
} // end namespace detail
} // end namespace random
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/random | rapidsai_public_repos/raft/cpp/include/raft/random/detail/rng_impl_deprecated.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DISCLAIMER: this file is deprecated and will be removed in a future release
*/
#pragma once
#include "rng_device.cuh"
#include <curand_kernel.h>
#include <raft/core/resources.hpp>
#include <raft/random/rng_state.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/detail/cub_wrappers.cuh>
#include <raft/util/scatter.cuh>
#include <random>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace random {
namespace detail {
#define METHOD_DEPR(new_name) [[deprecated("Use the flat '" #new_name "' function instead")]]
class RngImpl {
public:
RngImpl(uint64_t seed, GeneratorType _t = GenPhilox)
: state{seed, 0, _t},
type(_t),
// simple heuristic to make sure all SMs will be occupied properly
// and also not too many initialization calls will be made by each thread
nBlocks(4 * getMultiProcessorCount())
{
}
template <typename IdxT>
METHOD_DEPR(affine_transform_params)
void affine_transform_params(IdxT n, IdxT& a, IdxT& b)
{
// always keep 'a' to be coprime to 'n'
std::mt19937_64 mt_rng(state.seed + state.base_subsequence);
a = mt_rng() % n;
while (gcd(a, n) != 1) {
++a;
if (a >= n) a = 0;
}
// the bias term 'b' can be any number in the range of [0, n)
b = mt_rng() % n;
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(uniform)
void uniform(OutType* ptr, LenType len, OutType start, OutType end, cudaStream_t stream)
{
static_assert(std::is_floating_point<OutType>::value,
"Type for 'uniform' can only be floating point!");
UniformDistParams<OutType> params;
params.start = start;
params.end = end;
kernel_dispatch<OutType, LenType, 1, UniformDistParams<OutType>>(ptr, len, stream, params);
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(uniformInt)
void uniformInt(OutType* ptr, LenType len, OutType start, OutType end, cudaStream_t stream)
{
static_assert(std::is_integral<OutType>::value, "Type for 'uniformInt' can only be integer!");
ASSERT(end > start, "'end' must be greater than 'start'");
if (sizeof(OutType) == 4) {
UniformIntDistParams<OutType, uint32_t> params;
params.start = start;
params.end = end;
params.diff = uint32_t(params.end - params.start);
kernel_dispatch<OutType, LenType, 1, UniformIntDistParams<OutType, uint32_t>>(
ptr, len, stream, params);
} else {
UniformIntDistParams<OutType, uint64_t> params;
params.start = start;
params.end = end;
params.diff = uint64_t(params.end - params.start);
kernel_dispatch<OutType, LenType, 1, UniformIntDistParams<OutType, uint64_t>>(
ptr, len, stream, params);
}
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(normal)
void normal(OutType* ptr, LenType len, OutType mu, OutType sigma, cudaStream_t stream)
{
static_assert(std::is_floating_point<OutType>::value,
"Type for 'normal' can only be floating point!");
NormalDistParams<OutType> params;
params.mu = mu;
params.sigma = sigma;
kernel_dispatch<OutType, LenType, 2, NormalDistParams<OutType>>(ptr, len, stream, params);
}
template <typename IntType, typename LenType = int>
METHOD_DEPR(normalInt)
void normalInt(IntType* ptr, LenType len, IntType mu, IntType sigma, cudaStream_t stream)
{
static_assert(std::is_integral<IntType>::value, "Type for 'normalInt' can only be integer!");
NormalIntDistParams<IntType> params;
params.mu = mu;
params.sigma = sigma;
kernel_dispatch<IntType, LenType, 2, NormalIntDistParams<IntType>>(ptr, len, stream, params);
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(normalTable)
void normalTable(OutType* ptr,
LenType n_rows,
LenType n_cols,
const OutType* mu_vec,
const OutType* sigma_vec,
OutType sigma,
cudaStream_t stream)
{
NormalTableDistParams<OutType, LenType> params;
params.n_rows = n_rows;
params.n_cols = n_cols;
params.mu_vec = mu_vec;
params.sigma = sigma;
params.sigma_vec = sigma_vec;
LenType len = n_rows * n_cols;
kernel_dispatch<OutType, LenType, 2, NormalTableDistParams<OutType, LenType>>(
ptr, len, stream, params);
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(fill)
void fill(OutType* ptr, LenType len, OutType val, cudaStream_t stream)
{
InvariantDistParams<OutType> params;
params.const_val = val;
kernel_dispatch<OutType, LenType, 1, InvariantDistParams<OutType>>(ptr, len, stream, params);
}
template <typename Type, typename OutType = bool, typename LenType = int>
METHOD_DEPR(bernoulli)
void bernoulli(OutType* ptr, LenType len, Type prob, cudaStream_t stream)
{
BernoulliDistParams<Type> params;
params.prob = prob;
kernel_dispatch<OutType, LenType, 1, BernoulliDistParams<Type>>(ptr, len, stream, params);
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(scaled_bernoulli)
void scaled_bernoulli(OutType* ptr, LenType len, OutType prob, OutType scale, cudaStream_t stream)
{
static_assert(std::is_floating_point<OutType>::value,
"Type for 'scaled_bernoulli' can only be floating point!");
ScaledBernoulliDistParams<OutType> params;
params.prob = prob;
params.scale = scale;
kernel_dispatch<OutType, LenType, 1, ScaledBernoulliDistParams<OutType>>(
ptr, len, stream, params);
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(gumbel)
void gumbel(OutType* ptr, LenType len, OutType mu, OutType beta, cudaStream_t stream)
{
GumbelDistParams<OutType> params;
params.mu = mu;
params.beta = beta;
kernel_dispatch<OutType, LenType, 1, GumbelDistParams<OutType>>(ptr, len, stream, params);
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(lognormal)
void lognormal(OutType* ptr, LenType len, OutType mu, OutType sigma, cudaStream_t stream)
{
LogNormalDistParams<OutType> params;
params.mu = mu;
params.sigma = sigma;
kernel_dispatch<OutType, LenType, 2, LogNormalDistParams<OutType>>(ptr, len, stream, params);
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(logistic)
void logistic(OutType* ptr, LenType len, OutType mu, OutType scale, cudaStream_t stream)
{
LogisticDistParams<OutType> params;
params.mu = mu;
params.scale = scale;
kernel_dispatch<OutType, LenType, 1, LogisticDistParams<OutType>>(ptr, len, stream, params);
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(exponential)
void exponential(OutType* ptr, LenType len, OutType lambda, cudaStream_t stream)
{
ExponentialDistParams<OutType> params;
params.lambda = lambda;
kernel_dispatch<OutType, LenType, 1, ExponentialDistParams<OutType>>(ptr, len, stream, params);
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(rayleigh)
void rayleigh(OutType* ptr, LenType len, OutType sigma, cudaStream_t stream)
{
RayleighDistParams<OutType> params;
params.sigma = sigma;
kernel_dispatch<OutType, LenType, 1, RayleighDistParams<OutType>>(ptr, len, stream, params);
}
template <typename OutType, typename LenType = int>
METHOD_DEPR(laplace)
void laplace(OutType* ptr, LenType len, OutType mu, OutType scale, cudaStream_t stream)
{
LaplaceDistParams<OutType> params;
params.mu = mu;
params.scale = scale;
kernel_dispatch<OutType, LenType, 1, LaplaceDistParams<OutType>>(ptr, len, stream, params);
}
void advance(uint64_t max_uniq_subsequences_used,
uint64_t max_numbers_generated_per_subsequence = 0)
{
state.advance(max_uniq_subsequences_used, max_numbers_generated_per_subsequence);
}
template <typename OutType, typename LenType, int ITEMS_PER_CALL, typename ParamType>
void kernel_dispatch(OutType* ptr, LenType len, cudaStream_t stream, ParamType params)
{
switch (state.type) {
case GenPhilox:
fillKernel<OutType, LenType, PhiloxGenerator, ITEMS_PER_CALL>
<<<nBlocks, nThreads, 0, stream>>>(
state.seed, state.base_subsequence, 0, ptr, len, params);
break;
case GenPC:
fillKernel<OutType, LenType, PCGenerator, ITEMS_PER_CALL><<<nBlocks, nThreads, 0, stream>>>(
state.seed, state.base_subsequence, 0, ptr, len, params);
break;
default: break;
}
// The max_numbers_generated_per_subsequence parameter does not matter for now, using 16 for now
advance(uint64_t(nBlocks) * nThreads, 16);
return;
}
template <typename DataT, typename WeightsT, typename IdxT = int>
METHOD_DEPR(sampleWithoutReplacement)
void sampleWithoutReplacement(raft::resources const& handle,
DataT* out,
IdxT* outIdx,
const DataT* in,
const WeightsT* wts,
IdxT sampledLen,
IdxT len,
cudaStream_t stream)
{
ASSERT(sampledLen <= len, "sampleWithoutReplacement: 'sampledLen' cant be more than 'len'.");
rmm::device_uvector<WeightsT> expWts(len, stream);
rmm::device_uvector<WeightsT> sortedWts(len, stream);
rmm::device_uvector<IdxT> inIdx(len, stream);
rmm::device_uvector<IdxT> outIdxBuff(len, stream);
auto* inIdxPtr = inIdx.data();
// generate modified weights
SamplingParams<WeightsT, IdxT> params;
params.inIdxPtr = inIdxPtr;
params.wts = wts;
kernel_dispatch<WeightsT, IdxT, 1, SamplingParams<WeightsT, IdxT>>(
expWts.data(), len, stream, params);
///@todo: use a more efficient partitioning scheme instead of full sort
// sort the array and pick the top sampledLen items
IdxT* outIdxPtr = outIdxBuff.data();
rmm::device_uvector<char> workspace(0, stream);
sortPairs(workspace, expWts.data(), sortedWts.data(), inIdxPtr, outIdxPtr, (int)len, stream);
if (outIdx != nullptr) {
RAFT_CUDA_TRY(cudaMemcpyAsync(
outIdx, outIdxPtr, sizeof(IdxT) * sampledLen, cudaMemcpyDeviceToDevice, stream));
}
scatter<DataT, IdxT>(out, in, outIdxPtr, sampledLen, stream);
}
RngState state;
GeneratorType type;
/** number of blocks to launch */
int nBlocks;
static const int nThreads = 256;
};
#undef METHOD_DEPR
}; // end namespace detail
}; // end namespace random
}; // end namespace raft
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.