repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/stats/minmax.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MINMAX_H
#define __MINMAX_H
#pragma once
#include <optional>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/minmax.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <limits>
namespace raft {
namespace stats {
/**
* @brief Computes min/max across every column of the input matrix, as well as
* optionally allow to subsample based on the given row/col ID mapping vectors
*
* @tparam T the data type
* @tparam TPB number of threads per block
* @param data input data
* @param rowids actual row ID mappings. It is of length nrows. If you want to
* skip this index lookup entirely, pass nullptr
* @param colids actual col ID mappings. It is of length ncols. If you want to
* skip this index lookup entirely, pass nullptr
* @param nrows number of rows of data to be worked upon. The actual rows of the
* input "data" can be bigger than this!
* @param ncols number of cols of data to be worked upon. The actual cols of the
* input "data" can be bigger than this!
* @param row_stride stride (in number of elements) between 2 adjacent columns
* @param globalmin final col-wise global minimum (size = ncols)
* @param globalmax final col-wise global maximum (size = ncols)
* @param sampledcols output sampled data. Pass nullptr if you don't need this
* @param stream cuda stream
* @note This method makes the following assumptions:
* 1. input and output matrices are assumed to be col-major
* 2. ncols is small enough to fit the whole of min/max values across all cols
* in shared memory
*/
template <typename T, int TPB = 512>
void minmax(const T* data,
const unsigned* rowids,
const unsigned* colids,
int nrows,
int ncols,
int row_stride,
T* globalmin,
T* globalmax,
T* sampledcols,
cudaStream_t stream)
{
detail::minmax<T, TPB>(
data, rowids, colids, nrows, ncols, row_stride, globalmin, globalmax, sampledcols, stream);
}
/**
* @defgroup stats_minmax Min/Max
* @{
*/
/**
* @brief Computes min/max across every column of the input matrix, as well as
* optionally allow to subsample based on the given row/col ID mapping vectors
*
* @tparam value_t Data type of input matrix element.
* @tparam idx_t Index type of matrix extent.
* @param[in] handle the raft handle
* @param[in] data input data col-major of size [nrows, ncols], unless rowids or
* colids length is smaller
* @param[in] rowids optional row ID mappings of length nrows. If you want to
* skip this index lookup entirely, pass std::nullopt
* @param[in] colids optional col ID mappings of length ncols. If you want to
* skip this index lookup entirely, pass std::nullopt
* @param[out] globalmin final col-wise global minimum (size = ncols)
* @param[out] globalmax final col-wise global maximum (size = ncols)
* @param[out] sampledcols output sampled data. Pass std::nullopt if you don't need this
* @note This method makes the following assumptions:
* 1. input and output matrices are assumed to be col-major
* 2. ncols is small enough to fit the whole of min/max values across all cols
* in shared memory
*/
template <typename value_t, typename idx_t>
void minmax(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, raft::col_major> data,
std::optional<raft::device_vector_view<const unsigned, idx_t>> rowids,
std::optional<raft::device_vector_view<const unsigned, idx_t>> colids,
raft::device_vector_view<value_t, idx_t> globalmin,
raft::device_vector_view<value_t, idx_t> globalmax,
std::optional<raft::device_vector_view<value_t, idx_t>> sampledcols)
{
const unsigned* rowids_ptr = nullptr;
const unsigned* colids_ptr = nullptr;
value_t* sampledcols_ptr = nullptr;
auto nrows = data.extent(0);
auto ncols = data.extent(1);
auto row_stride = data.stride(1);
if (rowids.has_value()) {
rowids_ptr = rowids.value().data_handle();
RAFT_EXPECTS(rowids.value().extent(0) <= nrows, "Rowids size is greater than nrows");
nrows = rowids.value().extent(0);
}
if (colids.has_value()) {
colids_ptr = colids.value().data_handle();
RAFT_EXPECTS(colids.value().extent(0) <= ncols, "Colids size is greater than ncols");
ncols = colids.value().extent(0);
}
if (sampledcols.has_value()) { sampledcols_ptr = sampledcols.value().data_handle(); }
RAFT_EXPECTS(globalmin.extent(0) == ncols, "Size mismatch between globalmin and ncols");
RAFT_EXPECTS(globalmax.extent(0) == ncols, "Size mismatch between globalmax and ncols");
detail::minmax<value_t>(data.data_handle(),
rowids_ptr,
colids_ptr,
nrows,
ncols,
row_stride,
globalmin.data_handle(),
globalmax.data_handle(),
sampledcols_ptr,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_minmax
}; // namespace stats
}; // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/stats/meanvar.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MEANVAR_H
#define __MEANVAR_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/meanvar.cuh>
namespace raft::stats {
/**
* @brief Compute mean and variance for each column of a given matrix.
*
* The operation is performed in a single sweep. Consider using it when you need to compute
* both mean and variance, or when you need to compute variance but don't have the mean.
* It's almost twice faster than running `mean` and `vars` sequentially, because all three
* kernels are memory-bound.
*
* @tparam Type the data type
* @tparam IdxType Integer type used for addressing
* @param [out] mean the output mean vector of size D
* @param [out] var the output variance vector of size D
* @param [in] data the input matrix of size [N, D]
* @param [in] D number of columns of data
* @param [in] N number of rows of data
* @param [in] sample whether to evaluate sample variance or not. In other words, whether to
* normalize the variance using N-1 or N, for true or false respectively.
* @param [in] rowMajor whether the input data is row- or col-major, for true or false respectively.
* @param [in] stream
*/
template <typename Type, typename IdxType = int>
void meanvar(Type* mean,
Type* var,
const Type* data,
IdxType D,
IdxType N,
bool sample,
bool rowMajor,
cudaStream_t stream)
{
detail::meanvar(mean, var, data, D, N, sample, rowMajor, stream);
}
/**
* @defgroup stats_mean_var Mean and Variance
* @{
*/
/**
* @brief Compute mean and variance for each column of a given matrix.
*
* The operation is performed in a single sweep. Consider using it when you need to compute
* both mean and variance, or when you need to compute variance but don't have the mean.
* It's almost twice faster than running `mean` and `vars` sequentially, because all three
* kernels are memory-bound.
*
* @tparam value_t the data type
* @tparam idx_t Integer type used for addressing
* @tparam layout_t Layout type of the input matrix.
* @param[in] handle the raft handle
* @param[in] data the input matrix of size [N, D]
* @param[out] mean the output mean vector of size D
* @param[out] var the output variance vector of size D
* @param[in] sample whether to evaluate sample variance or not. In other words, whether to
* normalize the variance using N-1 or N, for true or false respectively.
*/
template <typename value_t, typename idx_t, typename layout_t>
void meanvar(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, layout_t> data,
raft::device_vector_view<value_t, idx_t> mean,
raft::device_vector_view<value_t, idx_t> var,
bool sample)
{
static_assert(
std::is_same_v<layout_t, raft::row_major> || std::is_same_v<layout_t, raft::col_major>,
"Data layout not supported");
RAFT_EXPECTS(data.extent(1) == var.extent(0), "Size mismatch between data and var");
RAFT_EXPECTS(mean.size() == var.size(), "Size mismatch between mean and var");
RAFT_EXPECTS(mean.is_exhaustive(), "mean must be contiguous");
RAFT_EXPECTS(var.is_exhaustive(), "var must be contiguous");
RAFT_EXPECTS(data.is_exhaustive(), "data must be contiguous");
detail::meanvar(mean.data_handle(),
var.data_handle(),
data.data_handle(),
data.extent(1),
data.extent(0),
sample,
std::is_same_v<layout_t, raft::row_major>,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_mean_var
}; // namespace raft::stats
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/stats/stats_types.hpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cudart_utils.hpp>
namespace raft::stats {
/**
* @ingroup stats_histogram
* @{
*/
/**
* @brief Types of support histogram implementations
*/
enum HistType {
/** shared mem atomics but with bins to be 1b int's */
HistTypeSmemBits1 = 1,
/** shared mem atomics but with bins to be 2b int's */
HistTypeSmemBits2 = 2,
/** shared mem atomics but with bins to be 4b int's */
HistTypeSmemBits4 = 4,
/** shared mem atomics but with bins to ba 1B int's */
HistTypeSmemBits8 = 8,
/** shared mem atomics but with bins to be 2B int's */
HistTypeSmemBits16 = 16,
/** use only global atomics */
HistTypeGmem,
/** uses shared mem atomics to reduce global traffic */
HistTypeSmem,
/**
* uses shared mem atomics with match_any intrinsic to further reduce shared
* memory traffic. This can only be enabled on Volta and later architectures.
* If one tries to enable this for older arch's, it will fall back to
* `HistTypeSmem`.
* @note This is to be used only when the input dataset leads to a lot of
* repetitions in a given warp, else, this algo can be much slower than
* `HistTypeSmem`!
*/
HistTypeSmemMatchAny,
/** builds a hashmap of active bins in shared mem */
HistTypeSmemHash,
/** decide at runtime the best algo for the given inputs */
HistTypeAuto
};
/** @} */
/**
* @ingroup stats_information_criterion
* @{
*/
/**
* @brief Supported types of information criteria
*/
enum IC_Type { AIC, AICc, BIC };
/** @} */
}; // end namespace raft::stats
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/mean.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/eltwise.cuh>
#include <raft/util/cuda_utils.cuh>
#include <cub/cub.cuh>
namespace raft {
namespace stats {
namespace detail {
///@todo: ColsPerBlk has been tested only for 32!
template <typename Type, typename IdxType, int TPB, int ColsPerBlk = 32>
RAFT_KERNEL meanKernelRowMajor(Type* mu, const Type* data, IdxType D, IdxType N)
{
const int RowsPerBlkPerIter = TPB / ColsPerBlk;
IdxType thisColId = threadIdx.x % ColsPerBlk;
IdxType thisRowId = threadIdx.x / ColsPerBlk;
IdxType colId = thisColId + ((IdxType)blockIdx.y * ColsPerBlk);
IdxType rowId = thisRowId + ((IdxType)blockIdx.x * RowsPerBlkPerIter);
Type thread_data = Type(0);
const IdxType stride = RowsPerBlkPerIter * gridDim.x;
for (IdxType i = rowId; i < N; i += stride)
thread_data += (colId < D) ? data[i * D + colId] : Type(0);
__shared__ Type smu[ColsPerBlk];
if (threadIdx.x < ColsPerBlk) smu[threadIdx.x] = Type(0);
__syncthreads();
raft::myAtomicAdd(smu + thisColId, thread_data);
__syncthreads();
if (threadIdx.x < ColsPerBlk) raft::myAtomicAdd(mu + colId, smu[thisColId]);
}
template <typename Type, typename IdxType, int TPB>
RAFT_KERNEL meanKernelColMajor(Type* mu, const Type* data, IdxType D, IdxType N)
{
typedef cub::BlockReduce<Type, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
Type thread_data = Type(0);
IdxType colStart = N * blockIdx.x;
for (IdxType i = threadIdx.x; i < N; i += TPB) {
IdxType idx = colStart + i;
thread_data += data[idx];
}
Type acc = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) { mu[blockIdx.x] = acc / N; }
}
template <typename Type, typename IdxType = int>
void mean(
Type* mu, const Type* data, IdxType D, IdxType N, bool sample, bool rowMajor, cudaStream_t stream)
{
static const int TPB = 256;
if (rowMajor) {
static const int RowsPerThread = 4;
static const int ColsPerBlk = 32;
static const int RowsPerBlk = (TPB / ColsPerBlk) * RowsPerThread;
dim3 grid(raft::ceildiv(N, (IdxType)RowsPerBlk), raft::ceildiv(D, (IdxType)ColsPerBlk));
RAFT_CUDA_TRY(cudaMemsetAsync(mu, 0, sizeof(Type) * D, stream));
meanKernelRowMajor<Type, IdxType, TPB, ColsPerBlk><<<grid, TPB, 0, stream>>>(mu, data, D, N);
RAFT_CUDA_TRY(cudaPeekAtLastError());
Type ratio = Type(1) / (sample ? Type(N - 1) : Type(N));
raft::linalg::scalarMultiply(mu, mu, ratio, D, stream);
} else {
meanKernelColMajor<Type, IdxType, TPB><<<D, TPB, 0, stream>>>(mu, data, D, N);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
} // namespace detail
} // namespace stats
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/cov.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/linalg/gemm.cuh>
#include <raft/stats/mean_center.cuh>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief Compute covariance of the input matrix
*
* Mean operation is assumed to be performed on a given column.
*
* @tparam Type the data type
* @param covar the output covariance matrix
* @param data the input matrix (this will get mean-centered at the end!)
* @param mu mean vector of the input matrix
* @param D number of columns of data
* @param N number of rows of data
* @param sample whether to evaluate sample covariance or not. In other words,
* whether to normalize the output using N-1 or N, for true or false,
* respectively
* @param rowMajor whether the input data is row or col major
* @param stable whether to run the slower-but-numerically-stable version or not
* @param handle cublas handle
* @param stream cuda stream
* @note if stable=true, then the input data will be mean centered after this
* function returns!
*/
template <typename Type>
void cov(raft::resources const& handle,
Type* covar,
Type* data,
const Type* mu,
std::size_t D,
std::size_t N,
bool sample,
bool rowMajor,
bool stable,
cudaStream_t stream)
{
if (stable) {
cublasHandle_t cublas_h = resource::get_cublas_handle(handle);
// since mean operation is assumed to be along a given column, broadcast
// must be along rows!
raft::stats::meanCenter(data, data, mu, D, N, rowMajor, true, stream);
Type alpha = Type(1) / (sample ? Type(N - 1) : Type(N));
Type beta = Type(0);
if (rowMajor) {
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_h,
CUBLAS_OP_N,
CUBLAS_OP_T,
D,
D,
N,
&alpha,
data,
D,
data,
D,
&beta,
covar,
D,
stream));
} else {
raft::linalg::gemm(
handle, data, N, D, data, covar, D, D, CUBLAS_OP_T, CUBLAS_OP_N, alpha, beta, stream);
}
} else {
///@todo: implement this using cutlass + customized epilogue!
ASSERT(false, "cov: Implement stable=false case!");
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/contingencyMatrix.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/reduce.h>
#include <cub/cub.cuh>
#include <math.h>
namespace raft {
namespace stats {
namespace detail {
typedef enum {
IMPL_NONE,
SMEM_ATOMICS,
GLOBAL_ATOMICS,
SORT_AND_GATOMICS
} ContingencyMatrixImplType;
template <typename T, typename OutT = int>
RAFT_KERNEL devConstructContingencyMatrix(const T* groundTruth,
const T* predicted,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outMatWidth)
{
int elementId = threadIdx.x + blockDim.x * blockIdx.x;
if (elementId < nSamples) {
T gt = groundTruth[elementId];
T pd = predicted[elementId];
auto outputIdx = (gt - outIdxOffset) * outMatWidth + pd - outIdxOffset;
raft::myAtomicAdd(outMat + outputIdx, OutT(1));
}
}
template <typename T, typename OutT = int>
void computeCMatWAtomics(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outDimN,
cudaStream_t stream)
{
RAFT_CUDA_TRY(
cudaFuncSetCacheConfig(devConstructContingencyMatrix<T, OutT>, cudaFuncCachePreferL1));
static const int block = 128;
auto grid = raft::ceildiv(nSamples, block);
devConstructContingencyMatrix<T, OutT><<<grid, block, 0, stream>>>(
groundTruth, predictedLabel, nSamples, outMat, outIdxOffset, outDimN);
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename T, typename OutT = int>
RAFT_KERNEL devConstructContingencyMatrixSmem(const T* groundTruth,
const T* predicted,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outMatWidth)
{
extern __shared__ char smem[];
auto* sMemMatrix = reinterpret_cast<OutT*>(smem);
for (int smemIdx = threadIdx.x; smemIdx < outMatWidth * outMatWidth; smemIdx += blockDim.x) {
sMemMatrix[smemIdx] = 0;
}
__syncthreads();
int elementId = threadIdx.x + blockDim.x * blockIdx.x;
if (elementId < nSamples) {
T gt = groundTruth[elementId];
T pd = predicted[elementId];
auto outputIdx = (gt - outIdxOffset) * outMatWidth + pd - outIdxOffset;
raft::myAtomicAdd(sMemMatrix + outputIdx, OutT(1));
}
__syncthreads();
for (int smemIdx = threadIdx.x; smemIdx < outMatWidth * outMatWidth; smemIdx += blockDim.x) {
raft::myAtomicAdd(outMat + smemIdx, sMemMatrix[smemIdx]);
}
}
template <typename T, typename OutT = int>
void computeCMatWSmemAtomics(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outDimN,
cudaStream_t stream)
{
static const int block = 128;
auto grid = raft::ceildiv(nSamples, block);
size_t smemSizePerBlock = outDimN * outDimN * sizeof(OutT);
devConstructContingencyMatrixSmem<T, OutT><<<grid, block, smemSizePerBlock, stream>>>(
groundTruth, predictedLabel, nSamples, outMat, outIdxOffset, outDimN);
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename T, typename OutT = int>
void contingencyMatrixWSort(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
T minLabel,
T maxLabel,
void* workspace,
size_t workspaceSize,
cudaStream_t stream)
{
T* outKeys = reinterpret_cast<T*>(workspace);
auto alignedBufferSz = raft::alignTo<size_t>(nSamples * sizeof(T), 256);
T* outValue = reinterpret_cast<T*>((size_t)workspace + alignedBufferSz);
void* pWorkspaceCub = reinterpret_cast<void*>((size_t)workspace + 2 * alignedBufferSz);
auto bitsToSort = log2<int>(maxLabel);
if (!raft::isPo2(maxLabel)) ++bitsToSort;
// we dont really need perfect sorting, should get by with some sort of
// binning-reordering operation
///@todo: future work - explore "efficient" custom binning kernels vs cub sort
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortPairs(pWorkspaceCub,
workspaceSize,
groundTruth,
outKeys,
predictedLabel,
outValue,
nSamples,
0,
bitsToSort,
stream));
auto outDimM_N = int(maxLabel - minLabel + 1);
computeCMatWAtomics<T, OutT>(outKeys, outValue, nSamples, outMat, minLabel, outDimM_N, stream);
}
template <typename OutT = int>
ContingencyMatrixImplType getImplVersion(OutT outDimN)
{
int currDevice = 0;
int l2CacheSize = 0;
// no way to query this from CUDA APIs, value for CC 7.0, 3.0
int maxBlocksResidentPerSM = 16;
RAFT_CUDA_TRY(cudaGetDevice(&currDevice));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&l2CacheSize, cudaDevAttrL2CacheSize, currDevice));
auto maxSmemPerBlock = raft::getSharedMemPerBlock();
ContingencyMatrixImplType implVersion = IMPL_NONE;
// keeping 8 block per SM to get good utilization
// can go higher but reduced L1 size degrades perf
OutT upperLimitSmemAtomics =
std::floor(std::sqrt(maxSmemPerBlock / (sizeof(OutT) * (maxBlocksResidentPerSM / 2))));
OutT upperLimitL2Atomics = std::floor(std::sqrt(l2CacheSize / sizeof(OutT)));
if (outDimN <= upperLimitSmemAtomics)
implVersion = SMEM_ATOMICS;
else if (outDimN <= upperLimitL2Atomics)
implVersion = GLOBAL_ATOMICS;
else
implVersion = SORT_AND_GATOMICS;
return implVersion;
}
/**
* @brief use this to allocate output matrix size
* size of matrix = (maxLabel - minLabel + 1)^2 * sizeof(int)
* @param groundTruth: device 1-d array for ground truth (num of rows)
* @param nSamples: number of elements in input array
* @param stream: cuda stream for execution
* @param minLabel: [out] calculated min value in input array
* @param maxLabel: [out] calculated max value in input array
*/
template <typename T>
void getInputClassCardinality(
const T* groundTruth, const int nSamples, cudaStream_t stream, T& minLabel, T& maxLabel)
{
thrust::device_ptr<const T> dTrueLabel = thrust::device_pointer_cast(groundTruth);
auto min_max =
thrust::minmax_element(thrust::cuda::par.on(stream), dTrueLabel, dTrueLabel + nSamples);
minLabel = *min_max.first;
maxLabel = *min_max.second;
}
/**
* @brief Calculate workspace size for running contingency matrix calculations
* @tparam T label type
* @tparam OutT output matrix type
* @param nSamples: number of elements in input array
* @param groundTruth: device 1-d array for ground truth (num of rows)
* @param stream: cuda stream for execution
* @param minLabel: Optional, min value in input array
* @param maxLabel: Optional, max value in input array
*/
template <typename T, typename OutT = int>
size_t getContingencyMatrixWorkspaceSize(int nSamples,
const T* groundTruth,
cudaStream_t stream,
T minLabel = std::numeric_limits<T>::max(),
T maxLabel = std::numeric_limits<T>::max())
{
size_t workspaceSize = 0;
// below is a redundant computation - can be avoided
if (minLabel == std::numeric_limits<T>::max() || maxLabel == std::numeric_limits<T>::max()) {
getInputClassCardinality<T>(groundTruth, nSamples, stream, minLabel, maxLabel);
}
auto outDimN = OutT(maxLabel - minLabel + 1);
ContingencyMatrixImplType implVersion = getImplVersion<OutT>(outDimN);
if (implVersion == SORT_AND_GATOMICS) {
void* pWorkspaceCub{};
size_t tmpStorageBytes = 0;
// no-op pointers to get workspace size
T* pTmpUnused{};
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortPairs(
pWorkspaceCub, tmpStorageBytes, pTmpUnused, pTmpUnused, pTmpUnused, pTmpUnused, nSamples));
auto tmpStagingMemorySize = raft::alignTo<size_t>(nSamples * sizeof(T), 256);
tmpStagingMemorySize *= 2;
workspaceSize = tmpStagingMemorySize + tmpStorageBytes;
}
return workspaceSize;
}
/**
* @brief construct contingency matrix given input ground truth and prediction
* labels. Users should call function getInputClassCardinality to find
* and allocate memory for output. Similarly workspace requirements
* should be checked using function getContingencyMatrixWorkspaceSize
* @tparam T label type
* @tparam OutT output matrix type
* @param groundTruth: device 1-d array for ground truth (num of rows)
* @param predictedLabel: device 1-d array for prediction (num of columns)
* @param nSamples: number of elements in input array
* @param outMat: output buffer for contingecy matrix
* @param stream: cuda stream for execution
* @param workspace: Optional, workspace memory allocation
* @param workspaceSize: Optional, size of workspace memory
* @param minLabel: Optional, min value in input ground truth array
* @param maxLabel: Optional, max value in input ground truth array
*/
template <typename T, typename OutT = int>
void contingencyMatrix(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
cudaStream_t stream,
void* workspace = nullptr,
size_t workspaceSize = 0,
T minLabel = std::numeric_limits<T>::max(),
T maxLabel = std::numeric_limits<T>::max())
{
// assumptions:
// output is not at par with scikit learn - output will be square matrix
// always with numRows = numColumns = numOfClassesInTrueLabel
// it is also assumed that true labels are monotically increasing
// if for some reason groundTruth completely skips some labels
// eg: {0,1,2,5} instead of {0,1,2,3}.
// Output matrix will still have empty rows for label value {3,4}
// Users can use "make_monotonic" to convert their discontinuous input label
// range to a monotonically increasing one //
// this also serves as way to measure co-occurrence/joint counts for NLP tasks which
// can be used to then compute pointwise mutual information and mutual information
if (minLabel == std::numeric_limits<T>::max() || maxLabel == std::numeric_limits<T>::max()) {
getInputClassCardinality<T>(groundTruth, nSamples, stream, minLabel, maxLabel);
}
auto outDimM_N = OutT(maxLabel - minLabel + 1);
RAFT_CUDA_TRY(cudaMemsetAsync(outMat, 0, sizeof(OutT) * outDimM_N * outDimM_N, stream));
ContingencyMatrixImplType implVersion = getImplVersion<OutT>(outDimM_N);
switch (implVersion) {
case SMEM_ATOMICS:
// smem atomics and then single global mem atomics only works
// when all label count can fit in smem for a block
// helps when GLOBAL_ATOMICS performance blocked by atomic update
// serialization -when very less labels ~10 labels
computeCMatWSmemAtomics<T, OutT>(
groundTruth, predictedLabel, nSamples, outMat, minLabel, outDimM_N, stream);
break;
case GLOBAL_ATOMICS:
// launch kernel - global atomic ops per (groundTruth,predictedValue) pair
computeCMatWAtomics<T, OutT>(
groundTruth, predictedLabel, nSamples, outMat, minLabel, outDimM_N, stream);
break;
// more L2 thrashing if atomic OPs land in completely different mem
// segment - when more labels
case SORT_AND_GATOMICS:
contingencyMatrixWSort<T, OutT>(groundTruth,
predictedLabel,
nSamples,
outMat,
minLabel,
maxLabel,
workspace,
workspaceSize,
stream);
break;
case IMPL_NONE: break;
}
}
}; // namespace detail
}; // namespace stats
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/trustworthiness_score.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/distance.cuh>
#include <raft/matrix/col_wise_sort.cuh>
#include <raft/spatial/knn/knn.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#define N_THREADS 512
namespace raft {
namespace stats {
namespace detail {
/**
* @brief Build the lookup table
* @param[out] lookup_table: Lookup table giving nearest neighbor order
* of pairwise distance calculations given sample index
* @param[in] X_ind: Sorted indexes of pairwise distance calculations of X
* @param n: Number of samples
* @param work: Number of elements to consider
*/
RAFT_KERNEL build_lookup_table(int* lookup_table, const int* X_ind, int n, int work)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= work) return;
int sample_idx = i / n;
int nn_idx = i % n;
int idx = X_ind[i];
lookup_table[(sample_idx * n) + idx] = nn_idx;
}
/**
* @brief Compute a the rank of trustworthiness score
* @param[out] rank: Resulting rank
* @param[out] lookup_table: Lookup table giving nearest neighbor order
* of pairwise distance calculations given sample index
* @param[in] emb_ind: Indexes of KNN on embeddings
* @param n: Number of samples
* @param n_neighbors: Number of neighbors considered by trustworthiness score
* @param work: Batch to consider (to do it at once use n * n_neighbors)
*/
template <typename knn_index_t>
RAFT_KERNEL compute_rank(double* rank,
const int* lookup_table,
const knn_index_t* emb_ind,
int n,
int n_neighbors,
int work)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= work) return;
int sample_idx = i / n_neighbors;
knn_index_t emb_nn_ind = emb_ind[i];
int r = lookup_table[(sample_idx * n) + emb_nn_ind];
int tmp = r - n_neighbors + 1;
if (tmp > 0) raft::myAtomicAdd<double>(rank, tmp);
}
/**
* @brief Compute a kNN and returns the indices of the nearest neighbors
* @param h Raft handle
* @param[in] input Input matrix containing the dataset
* @param n Number of samples
* @param d Number of features
* @param n_neighbors number of neighbors
* @param[out] indices KNN indexes
* @param[out] distances KNN distances
*/
template <raft::distance::DistanceType distance_type, typename math_t>
void run_knn(const raft::resources& h,
math_t* input,
int n,
int d,
int n_neighbors,
int64_t* indices,
math_t* distances)
{
std::vector<math_t*> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = input;
sizes[0] = n;
raft::spatial::knn::brute_force_knn<int64_t, float, int>(h,
ptrs,
sizes,
d,
input,
n,
indices,
distances,
n_neighbors,
true,
true,
nullptr,
distance_type);
}
/**
* @brief Compute the trustworthiness score
* @param h Raft handle
* @param X[in]: Data in original dimension
* @param X_embedded[in]: Data in target dimension (embedding)
* @param n: Number of samples
* @param m: Number of features in high/original dimension
* @param d: Number of features in low/embedded dimension
* @param n_neighbors Number of neighbors considered by trustworthiness score
* @param batchSize Batch size
* @return Trustworthiness score
*/
template <typename math_t, raft::distance::DistanceType distance_type>
double trustworthiness_score(const raft::resources& h,
const math_t* X,
math_t* X_embedded,
int n,
int m,
int d,
int n_neighbors,
int batchSize = 512)
{
cudaStream_t stream = resource::get_cuda_stream(h);
const int KNN_ALLOC = n * (n_neighbors + 1);
rmm::device_uvector<int64_t> emb_ind(KNN_ALLOC, stream);
rmm::device_uvector<math_t> emb_dist(KNN_ALLOC, stream);
run_knn<distance_type>(h, X_embedded, n, d, n_neighbors + 1, emb_ind.data(), emb_dist.data());
const int PAIRWISE_ALLOC = batchSize * n;
rmm::device_uvector<int> X_ind(PAIRWISE_ALLOC, stream);
rmm::device_uvector<math_t> X_dist(PAIRWISE_ALLOC, stream);
rmm::device_uvector<int> lookup_table(PAIRWISE_ALLOC, stream);
double t = 0.0;
rmm::device_scalar<double> t_dbuf(stream);
int toDo = n;
while (toDo > 0) {
int curBatchSize = min(toDo, batchSize);
// Takes at most batchSize vectors at a time
raft::distance::pairwise_distance(
h, &X[(n - toDo) * m], X, X_dist.data(), curBatchSize, n, m, distance_type);
size_t colSortWorkspaceSize = 0;
bool bAllocWorkspace = false;
raft::matrix::sort_cols_per_row(X_dist.data(),
X_ind.data(),
curBatchSize,
n,
bAllocWorkspace,
nullptr,
colSortWorkspaceSize,
stream);
if (bAllocWorkspace) {
rmm::device_uvector<char> sortColsWorkspace(colSortWorkspaceSize, stream);
raft::matrix::sort_cols_per_row(X_dist.data(),
X_ind.data(),
curBatchSize,
n,
bAllocWorkspace,
sortColsWorkspace.data(),
colSortWorkspaceSize,
stream);
}
int work = curBatchSize * n;
int n_blocks = raft::ceildiv(work, N_THREADS);
build_lookup_table<<<n_blocks, N_THREADS, 0, stream>>>(
lookup_table.data(), X_ind.data(), n, work);
RAFT_CUDA_TRY(cudaMemsetAsync(t_dbuf.data(), 0, sizeof(double), stream));
work = curBatchSize * (n_neighbors + 1);
n_blocks = raft::ceildiv(work, N_THREADS);
compute_rank<<<n_blocks, N_THREADS, 0, stream>>>(
t_dbuf.data(),
lookup_table.data(),
&emb_ind.data()[(n - toDo) * (n_neighbors + 1)],
n,
n_neighbors + 1,
work);
RAFT_CUDA_TRY(cudaPeekAtLastError());
t += t_dbuf.value(stream);
toDo -= curBatchSize;
}
t = 1.0 - ((2.0 / ((n * n_neighbors) * ((2.0 * n) - (3.0 * n_neighbors) - 1.0))) * t);
return t;
}
} // namespace detail
} // namespace stats
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/v_measure.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file v_measure.cuh
*/
#include <raft/stats/homogeneity_score.cuh>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief Function to calculate the v-measure between two clusters
*
* @param truthClusterArray: the array of truth classes of type T
* @param predClusterArray: the array of predicted classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
* @param beta: v_measure parameter
*/
template <typename T>
double v_measure(const T* truthClusterArray,
const T* predClusterArray,
int size,
T lowerLabelRange,
T upperLabelRange,
cudaStream_t stream,
double beta = 1.0)
{
double computedHomogeity, computedCompleteness, computedVMeasure;
computedHomogeity = raft::stats::homogeneity_score(
truthClusterArray, predClusterArray, size, lowerLabelRange, upperLabelRange, stream);
computedCompleteness = raft::stats::homogeneity_score(
predClusterArray, truthClusterArray, size, lowerLabelRange, upperLabelRange, stream);
if (computedCompleteness + computedHomogeity == 0.0)
computedVMeasure = 0.0;
else
computedVMeasure = ((1 + beta) * computedHomogeity * computedCompleteness /
(beta * computedHomogeity + computedCompleteness));
return computedVMeasure;
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/rand_index.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file rand_index.cuh
* @todo TODO(Ganesh Venkataramana):
* <pre>
* The below rand_index calculation implementation is a Brute force one that uses
(nElements*nElements) threads (2 dimensional grids and blocks)
* For small datasets, this will suffice; but for larger ones, work done by the threads increase
dramatically.
* A more mathematically intensive implementation that uses half the above threads can be done,
which will prove to be more efficient for larger datasets
* the idea is as follows:
* instead of 2D block and grid configuration with a total of (nElements*nElements) threads (where
each (i,j) through these threads represent an ordered pair selection of 2 data points), a 1D block
and grid configuration with a total of (nElements*(nElements))/2 threads (each thread index
represents an element part of the set of unordered pairwise selections from the dataset (nChoose2))
* In this setup, one has to generate a one-to-one mapping between this 1D thread index (for each
kernel) and the unordered pair of chosen datapoints.
* More specifically, thread0-> {dataPoint1, dataPoint0}, thread1-> {dataPoint2, dataPoint0},
thread2-> {dataPoint2, dataPoint1} ... thread((nElements*(nElements))/2 - 1)->
{dataPoint(nElements-1),dataPoint(nElements-2)}
* say ,
* threadNum: thread index | threadNum = threadIdx.x + BlockIdx.x*BlockDim.x,
* i : index of dataPoint i
* j : index of dataPoint j
* then the mapping is as follows:
* i = ceil((-1 + sqrt(1 + 8*(1 + threadNum)))/2) = floor((1 + sqrt(1 + 8*threadNum))/2)
* j = threadNum - i(i-1)/2
* after obtaining the the pair of datapoints, calculation of rand index is the same as done in
this implementation
* Caveat: since the kernel implementation involves use of emulated sqrt() operations:
* the number of instructions executed per kernel is ~40-50 times
* as the O(nElements*nElements) increase beyond the floating point limit, floating point
inaccuracies occur, and hence the above floor(...) != ceil(...)
* </pre>
*/
#pragma once
#include <cub/cub.cuh>
#include <math.h>
#include <raft/core/interruptible.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief kernel to calculate the values of a and b
* @param firstClusterArray: the array of classes of type T
* @param secondClusterArray: the array of classes of type T
* @param size: the size of the data points
* @param a: number of pairs of points that both the clusters have classified the same
* @param b: number of pairs of points that both the clusters have classified differently
*/
template <typename T, int BLOCK_DIM_X, int BLOCK_DIM_Y>
RAFT_KERNEL computeTheNumerator(
const T* firstClusterArray, const T* secondClusterArray, uint64_t size, uint64_t* a, uint64_t* b)
{
// calculating the indices of pairs of datapoints compared by the current thread
uint64_t j = threadIdx.x + blockIdx.x * blockDim.x;
uint64_t i = threadIdx.y + blockIdx.y * blockDim.y;
// thread-local variables to count a and b
uint64_t myA = 0, myB = 0;
if (i < size && j < size && j < i) {
// checking if the pair have been classified the same by both the clusters
if (firstClusterArray[i] == firstClusterArray[j] &&
secondClusterArray[i] == secondClusterArray[j]) {
++myA;
}
// checking if the pair have been classified differently by both the clusters
else if (firstClusterArray[i] != firstClusterArray[j] &&
secondClusterArray[i] != secondClusterArray[j]) {
++myB;
}
}
// specialize blockReduce for a 2D block of 1024 threads of type uint64_t
typedef cub::BlockReduce<uint64_t, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y>
BlockReduce;
// Allocate shared memory for blockReduce
__shared__ typename BlockReduce::TempStorage temp_storage;
// summing up thread-local counts specific to a block
myA = BlockReduce(temp_storage).Sum(myA);
__syncthreads();
myB = BlockReduce(temp_storage).Sum(myB);
__syncthreads();
// executed once per block
if (threadIdx.x == 0 && threadIdx.y == 0) {
raft::myAtomicAdd<unsigned long long int>((unsigned long long int*)a, myA);
raft::myAtomicAdd<unsigned long long int>((unsigned long long int*)b, myB);
}
}
/**
* @brief Function to calculate RandIndex
* <a href="https://en.wikipedia.org/wiki/Rand_index">more info on rand index</a>
* @param firstClusterArray: the array of classes of type T
* @param secondClusterArray: the array of classes of type T
* @param size: the size of the data points of type uint64_t
* @param stream: the cudaStream object
*/
template <typename T>
double compute_rand_index(const T* firstClusterArray,
const T* secondClusterArray,
uint64_t size,
cudaStream_t stream)
{
// rand index for size less than 2 is not defined
ASSERT(size >= 2, "Rand Index for size less than 2 not defined!");
// allocating and initializing memory for a and b in the GPU
rmm::device_uvector<uint64_t> arr_buf(2, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(arr_buf.data(), 0, 2 * sizeof(uint64_t), stream));
// kernel configuration
static const int BLOCK_DIM_Y = 16, BLOCK_DIM_X = 16;
dim3 numThreadsPerBlock(BLOCK_DIM_X, BLOCK_DIM_Y);
dim3 numBlocks(raft::ceildiv<int>(size, numThreadsPerBlock.x),
raft::ceildiv<int>(size, numThreadsPerBlock.y));
// calling the kernel
computeTheNumerator<T, BLOCK_DIM_X, BLOCK_DIM_Y><<<numBlocks, numThreadsPerBlock, 0, stream>>>(
firstClusterArray, secondClusterArray, size, arr_buf.data(), arr_buf.data() + 1);
// synchronizing and updating the calculated values of a and b from device to host
uint64_t ab_host[2] = {0};
raft::update_host(ab_host, arr_buf.data(), 2, stream);
raft::interruptible::synchronize(stream);
// error handling
RAFT_CUDA_TRY(cudaGetLastError());
// denominator
uint64_t nChooseTwo = size * (size - 1) / 2;
// calculating the rand_index
return (double)(((double)(ab_host[0] + ab_host[1])) / (double)nChooseTwo);
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/adjusted_rand_index.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file adjusted_rand_index.cuh
* @brief The adjusted Rand index is the corrected-for-chance version of the Rand index.
* Such a correction for chance establishes a baseline by using the expected similarity
* of all pair-wise comparisons between clusterings specified by a random model.
*/
#pragma once
#include "contingencyMatrix.cuh"
#include <cub/cub.cuh>
#include <math.h>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/reduce.cuh>
#include <raft/stats/histogram.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief Lambda to calculate the number of unordered pairs in a given input
*
* @tparam Type: Data type of the input
* @param in: the input to the functional mapping
* @param i: the indexing(not used in this case)
*/
template <typename Type>
struct nCTwo {
HDI Type operator()(Type in, int i = 0)
{
return in % 2 ? ((in - 1) >> 1) * in : (in >> 1) * (in - 1);
}
};
template <typename DataT, typename IdxT>
struct Binner {
Binner(DataT minL) : minLabel(minL) {}
DI int operator()(DataT val, IdxT row, IdxT col) { return int(val - minLabel); }
private:
DataT minLabel;
}; // struct Binner
/**
* @brief Function to count the number of unique elements in the input array
*
* @tparam T data-type for input arrays
*
* @param[in] arr input array [on device] [len = size]
* @param[in] size the size of the input array
* @param[out] minLabel the lower bound of the range of labels
* @param[out] maxLabel the upper bound of the range of labels
* @param[in] stream cuda stream
*
* @return the number of unique elements in the array
*/
template <typename T>
int countUnique(const T* arr, int size, T& minLabel, T& maxLabel, cudaStream_t stream)
{
auto ptr = thrust::device_pointer_cast(arr);
auto minmax = thrust::minmax_element(thrust::cuda::par.on(stream), ptr, ptr + size);
minLabel = *minmax.first;
maxLabel = *minmax.second;
auto totalLabels = int(maxLabel - minLabel + 1);
rmm::device_uvector<int> labelCounts(totalLabels, stream);
rmm::device_scalar<int> nUniq(stream);
raft::stats::histogram<T, int>(
raft::stats::HistTypeAuto,
labelCounts.data(),
totalLabels,
arr,
size,
1,
stream,
[minLabel] __device__(T val, int row, int col) { return int(val - minLabel); });
raft::linalg::mapThenSumReduce<int>(
nUniq.data(),
totalLabels,
[] __device__(const T& val) { return val != 0; },
stream,
labelCounts.data());
auto numUniques = nUniq.value(stream);
return numUniques;
}
/**
* @brief Function to calculate Adjusted RandIndex as described
* <a href="https://en.wikipedia.org/wiki/Rand_index">here</a>
* @tparam T data-type for input label arrays
* @tparam MathT integral data-type used for computing n-choose-r
* @param firstClusterArray: the array of classes
* @param secondClusterArray: the array of classes
* @param size: the size of the data points of type int
* @param stream: the cudaStream object
*/
template <typename T, typename MathT = int>
double compute_adjusted_rand_index(const T* firstClusterArray,
const T* secondClusterArray,
int size,
cudaStream_t stream)
{
ASSERT(size >= 2, "Rand Index for size less than 2 not defined!");
T minFirst, maxFirst, minSecond, maxSecond;
auto nUniqFirst = countUnique(firstClusterArray, size, minFirst, maxFirst, stream);
auto nUniqSecond = countUnique(secondClusterArray, size, minSecond, maxSecond, stream);
auto lowerLabelRange = std::min(minFirst, minSecond);
auto upperLabelRange = std::max(maxFirst, maxSecond);
auto nClasses = upperLabelRange - lowerLabelRange + 1;
// degenerate case of single cluster or clusters each with just one element
if (nUniqFirst == nUniqSecond) {
if (nUniqFirst == 1 || nUniqFirst == size) return 1.0;
}
auto nUniqClasses = MathT(nClasses);
rmm::device_uvector<MathT> dContingencyMatrix(nUniqClasses * nUniqClasses, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(
dContingencyMatrix.data(), 0, nUniqClasses * nUniqClasses * sizeof(MathT), stream));
auto workspaceSz = getContingencyMatrixWorkspaceSize<T, MathT>(
size, firstClusterArray, stream, lowerLabelRange, upperLabelRange);
rmm::device_uvector<char> workspaceBuff(workspaceSz, stream);
contingencyMatrix<T, MathT>(firstClusterArray,
secondClusterArray,
size,
dContingencyMatrix.data(),
stream,
workspaceBuff.data(),
workspaceSz,
lowerLabelRange,
upperLabelRange);
rmm::device_uvector<MathT> a(nUniqClasses, stream);
rmm::device_uvector<MathT> b(nUniqClasses, stream);
rmm::device_scalar<MathT> d_aCTwoSum(stream);
rmm::device_scalar<MathT> d_bCTwoSum(stream);
rmm::device_scalar<MathT> d_nChooseTwoSum(stream);
MathT h_aCTwoSum, h_bCTwoSum, h_nChooseTwoSum;
RAFT_CUDA_TRY(cudaMemsetAsync(a.data(), 0, nUniqClasses * sizeof(MathT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(b.data(), 0, nUniqClasses * sizeof(MathT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_aCTwoSum.data(), 0, sizeof(MathT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_bCTwoSum.data(), 0, sizeof(MathT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_nChooseTwoSum.data(), 0, sizeof(MathT), stream));
// calculating the sum of NijC2
raft::linalg::mapThenSumReduce<MathT, nCTwo<MathT>>(d_nChooseTwoSum.data(),
nUniqClasses * nUniqClasses,
nCTwo<MathT>(),
stream,
dContingencyMatrix.data(),
dContingencyMatrix.data());
// calculating the row-wise sums
raft::linalg::reduce<MathT, MathT>(
a.data(), dContingencyMatrix.data(), nUniqClasses, nUniqClasses, 0, true, true, stream);
// calculating the column-wise sums
raft::linalg::reduce<MathT, MathT>(
b.data(), dContingencyMatrix.data(), nUniqClasses, nUniqClasses, 0, true, false, stream);
// calculating the sum of number of unordered pairs for every element in a
raft::linalg::mapThenSumReduce<MathT, nCTwo<MathT>>(
d_aCTwoSum.data(), nUniqClasses, nCTwo<MathT>(), stream, a.data(), a.data());
// calculating the sum of number of unordered pairs for every element of b
raft::linalg::mapThenSumReduce<MathT, nCTwo<MathT>>(
d_bCTwoSum.data(), nUniqClasses, nCTwo<MathT>(), stream, b.data(), b.data());
// updating in the host memory
raft::update_host(&h_nChooseTwoSum, d_nChooseTwoSum.data(), 1, stream);
raft::update_host(&h_aCTwoSum, d_aCTwoSum.data(), 1, stream);
raft::update_host(&h_bCTwoSum, d_bCTwoSum.data(), 1, stream);
// calculating the ARI
auto nChooseTwo = double(size) * double(size - 1) / 2.0;
auto expectedIndex = double(h_aCTwoSum) * double(h_bCTwoSum) / double(nChooseTwo);
auto maxIndex = (double(h_bCTwoSum) + double(h_aCTwoSum)) / 2.0;
auto index = double(h_nChooseTwoSum);
if (maxIndex - expectedIndex)
return (index - expectedIndex) / (maxIndex - expectedIndex);
else
return 0;
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/kl_divergence.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file kl_divergence.cuh
* @brief The KL divergence tells us how well the probability distribution Q AKA candidatePDF
* approximates the probability distribution P AKA modelPDF.
*/
#pragma once
#include <math.h>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief the KL Diverence mapping function
*
* @tparam Type: Data type of the input
* @param modelPDF: the model probability density function of type DataT
* @param candidatePDF: the candidate probability density function of type DataT
*/
template <typename Type>
struct KLDOp {
HDI Type operator()(Type modelPDF, Type candidatePDF)
{
if (modelPDF == 0.0)
return 0;
else
return modelPDF * (log(modelPDF) - log(candidatePDF));
}
};
/**
* @brief Function to calculate KL Divergence
* <a href="https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence">more info on KL
* Divergence</a>
*
* @tparam DataT: Data type of the input array
* @param modelPDF: the model array of probability density functions of type DataT
* @param candidatePDF: the candidate array of probability density functions of type DataT
* @param size: the size of the data points of type int
* @param stream: the cudaStream object
*/
template <typename DataT>
DataT kl_divergence(const DataT* modelPDF, const DataT* candidatePDF, int size, cudaStream_t stream)
{
rmm::device_scalar<DataT> d_KLDVal(stream);
RAFT_CUDA_TRY(cudaMemsetAsync(d_KLDVal.data(), 0, sizeof(DataT), stream));
raft::linalg::mapThenSumReduce<DataT, KLDOp<DataT>, size_t, 256, const DataT*>(
d_KLDVal.data(), (size_t)size, KLDOp<DataT>(), stream, modelPDF, candidatePDF);
DataT h_KLDVal;
raft::update_host(&h_KLDVal, d_KLDVal.data(), 1, stream);
raft::interruptible::synchronize(stream);
return h_KLDVal;
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/histogram.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/stats/stats_types.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/seive.hpp>
#include <raft/util/vectorized.cuh>
#include <stdint.h>
// This file is a shameless amalgamation of independent works done by
// Lars Nyland and Andy Adinets
///@todo: add cub's histogram as another option
namespace raft {
namespace stats {
namespace detail {
/** Default mapper which just returns the value of the data itself */
template <typename DataT, typename IdxT>
struct IdentityBinner {
DI int operator()(DataT val, IdxT row, IdxT col) { return int(val); }
};
static const int ThreadsPerBlock = 256;
template <typename IdxT, int VecLen>
dim3 computeGridDim(IdxT nrows, IdxT ncols, const void* kernel)
{
int occupancy;
RAFT_CUDA_TRY(
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy, kernel, ThreadsPerBlock, 0));
const auto maxBlks = occupancy * raft::getMultiProcessorCount();
int nblksx = raft::ceildiv<int>(VecLen ? nrows / VecLen : nrows, ThreadsPerBlock);
// for cases when there aren't a lot of blocks for computing one histogram
nblksx = std::min(nblksx, maxBlks);
return dim3(nblksx, ncols);
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen, typename CoreOp>
DI void histCoreOp(const DataT* data, IdxT nrows, IdxT nbins, BinnerOp binner, CoreOp op, IdxT col)
{
IdxT offset = col * nrows;
auto bdim = IdxT(blockDim.x);
IdxT tid = threadIdx.x + bdim * blockIdx.x;
tid *= VecLen;
IdxT stride = bdim * gridDim.x * VecLen;
int nCeil = raft::alignTo<int>(nrows, stride);
typedef raft::TxN_t<DataT, VecLen> VecType;
VecType a;
for (auto i = tid; i < nCeil; i += stride) {
if (i < nrows) { a.load(data, offset + i); }
#pragma unroll
for (int j = 0; j < VecLen; ++j) {
int binId = binner(a.val.data[j], i + j, col);
op(binId, i + j, col);
}
}
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen>
RAFT_KERNEL gmemHistKernel(int* bins, const DataT* data, IdxT nrows, IdxT nbins, BinnerOp binner)
{
auto op = [=] __device__(int binId, IdxT row, IdxT col) {
if (row >= nrows) return;
auto binOffset = col * nbins;
#if __CUDA_ARCH__ < 700
raft::myAtomicAdd(bins + binOffset + binId, 1);
#else
auto amask = __activemask();
auto mask = __match_any_sync(amask, binId);
auto leader = __ffs(mask) - 1;
if (raft::laneId() == leader) { raft::myAtomicAdd(bins + binOffset + binId, __popc(mask)); }
#endif // __CUDA_ARCH__
};
histCoreOp<DataT, BinnerOp, IdxT, VecLen>(data, nrows, nbins, binner, op, blockIdx.y);
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen>
void gmemHist(int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
BinnerOp binner,
cudaStream_t stream)
{
auto blks = computeGridDim<IdxT, VecLen>(
nrows, ncols, (const void*)gmemHistKernel<DataT, BinnerOp, IdxT, VecLen>);
gmemHistKernel<DataT, BinnerOp, IdxT, VecLen>
<<<blks, ThreadsPerBlock, 0, stream>>>(bins, data, nrows, nbins, binner);
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen, bool UseMatchAny>
RAFT_KERNEL smemHistKernel(int* bins, const DataT* data, IdxT nrows, IdxT nbins, BinnerOp binner)
{
extern __shared__ unsigned sbins[];
for (auto i = threadIdx.x; i < nbins; i += blockDim.x) {
sbins[i] = 0;
}
__syncthreads();
auto op = [=] __device__(int binId, IdxT row, IdxT col) {
if (row >= nrows) return;
#if __CUDA_ARCH__ < 700
raft::myAtomicAdd<unsigned int>(sbins + binId, 1);
#else
if (UseMatchAny) {
auto amask = __activemask();
auto mask = __match_any_sync(amask, binId);
auto leader = __ffs(mask) - 1;
if (raft::laneId() == leader) {
raft::myAtomicAdd<unsigned int>(sbins + binId, __popc(mask));
}
} else {
raft::myAtomicAdd<unsigned int>(sbins + binId, 1);
}
#endif // __CUDA_ARCH__
};
IdxT col = blockIdx.y;
histCoreOp<DataT, BinnerOp, IdxT, VecLen>(data, nrows, nbins, binner, op, col);
__syncthreads();
auto binOffset = col * nbins;
for (auto i = threadIdx.x; i < nbins; i += blockDim.x) {
auto val = sbins[i];
if (val > 0) { raft::myAtomicAdd<unsigned int>((unsigned int*)bins + binOffset + i, val); }
}
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen, bool UseMatchAny>
void smemHist(int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
BinnerOp binner,
cudaStream_t stream)
{
auto blks = computeGridDim<IdxT, VecLen>(
nrows, ncols, (const void*)smemHistKernel<DataT, BinnerOp, IdxT, VecLen, UseMatchAny>);
size_t smemSize = nbins * sizeof(unsigned);
smemHistKernel<DataT, BinnerOp, IdxT, VecLen, UseMatchAny>
<<<blks, ThreadsPerBlock, smemSize, stream>>>(bins, data, nrows, nbins, binner);
}
template <unsigned _BIN_BITS>
struct BitsInfo {
static unsigned const BIN_BITS = _BIN_BITS;
static unsigned const WORD_BITS = sizeof(unsigned) * 8;
static unsigned const WORD_BINS = WORD_BITS / BIN_BITS;
static unsigned const BIN_MASK = (1 << BIN_BITS) - 1;
};
template <unsigned BIN_BITS>
DI void incrementBin(unsigned* sbins, int* bins, int nbins, int binId)
{
typedef BitsInfo<BIN_BITS> Bits;
auto iword = binId / Bits::WORD_BINS;
auto ibin = binId % Bits::WORD_BINS;
auto sh = ibin * Bits::BIN_BITS;
auto old_word = atomicAdd(sbins + iword, unsigned(1 << sh));
auto new_word = old_word + unsigned(1 << sh);
if ((new_word >> sh & Bits::BIN_MASK) != 0) return;
// overflow
raft::myAtomicAdd<unsigned int>((unsigned int*)bins + binId, Bits::BIN_MASK + 1);
for (int dbin = 1; ibin + dbin < Bits::WORD_BINS && binId + dbin < nbins; ++dbin) {
auto sh1 = (ibin + dbin) * Bits::BIN_BITS;
if ((new_word >> sh1 & Bits::BIN_MASK) == 0) {
// overflow
raft::myAtomicAdd<unsigned int>((unsigned int*)bins + binId + dbin, Bits::BIN_MASK);
} else {
// correction
raft::myAtomicAdd(bins + binId + dbin, -1);
break;
}
}
}
template <>
DI void incrementBin<1>(unsigned* sbins, int* bins, int nbins, int binId)
{
typedef BitsInfo<1> Bits;
auto iword = binId / Bits::WORD_BITS;
auto sh = binId % Bits::WORD_BITS;
auto old_word = atomicXor(sbins + iword, unsigned(1 << sh));
if ((old_word >> sh & 1) != 0) raft::myAtomicAdd(bins + binId, 2);
}
template <typename DataT, typename BinnerOp, typename IdxT, int BIN_BITS, int VecLen>
RAFT_KERNEL smemBitsHistKernel(
int* bins, const DataT* data, IdxT nrows, IdxT nbins, BinnerOp binner)
{
extern __shared__ unsigned sbins[];
typedef BitsInfo<BIN_BITS> Bits;
auto nwords = raft::ceildiv<int>(nbins, Bits::WORD_BINS);
for (auto j = threadIdx.x; j < nwords; j += blockDim.x) {
sbins[j] = 0;
}
__syncthreads();
IdxT col = blockIdx.y;
IdxT binOffset = col * nbins;
auto op = [=] __device__(int binId, IdxT row, IdxT col) {
if (row >= nrows) return;
incrementBin<Bits::BIN_BITS>(sbins, bins + binOffset, (int)nbins, binId);
};
histCoreOp<DataT, BinnerOp, IdxT, VecLen>(data, nrows, nbins, binner, op, col);
__syncthreads();
for (auto j = threadIdx.x; j < (int)nbins; j += blockDim.x) {
auto shift = j % Bits::WORD_BINS * Bits::BIN_BITS;
int count = sbins[j / Bits::WORD_BINS] >> shift & Bits::BIN_MASK;
if (count > 0) raft::myAtomicAdd(bins + binOffset + j, count);
}
}
template <typename DataT, typename BinnerOp, typename IdxT, int BIN_BITS, int VecLen>
void smemBitsHist(int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
BinnerOp binner,
cudaStream_t stream)
{
typedef BitsInfo<BIN_BITS> Bits;
auto blks = computeGridDim<IdxT, VecLen>(
nrows, ncols, (const void*)smemBitsHistKernel<DataT, BinnerOp, IdxT, Bits::BIN_BITS, VecLen>);
size_t smemSize = raft::ceildiv<size_t>(nbins, Bits::WORD_BITS / Bits::BIN_BITS) * sizeof(int);
smemBitsHistKernel<DataT, BinnerOp, IdxT, Bits::BIN_BITS, VecLen>
<<<blks, ThreadsPerBlock, smemSize, stream>>>(bins, data, nrows, nbins, binner);
}
#define INVALID_KEY -1
DI void clearHashTable(int2* ht, int hashSize)
{
for (auto i = threadIdx.x; i < hashSize; i += blockDim.x) {
ht[i] = {INVALID_KEY, 0};
}
}
DI int findEntry(int2* ht, int hashSize, int binId, int threshold)
{
int idx = binId % hashSize;
int t;
int count = 0;
while ((t = atomicCAS(&(ht[idx].x), INVALID_KEY, binId)) != INVALID_KEY && t != binId) {
++count;
if (count >= threshold) {
idx = INVALID_KEY;
break;
}
++idx;
if (idx >= hashSize) { idx = 0; }
}
return idx;
}
DI void flushHashTable(int2* ht, int hashSize, int* bins, int nbins, int col)
{
int binOffset = col * nbins;
for (auto i = threadIdx.x; i < hashSize; i += blockDim.x) {
if (ht[i].x != INVALID_KEY && ht[i].y > 0) {
raft::myAtomicAdd(bins + binOffset + ht[i].x, ht[i].y);
}
ht[i] = {INVALID_KEY, 0};
}
}
#undef INVALID_KEY
///@todo: honor VecLen template param
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen>
RAFT_KERNEL smemHashHistKernel(int* bins,
const DataT* data,
IdxT nrows,
IdxT nbins,
BinnerOp binner,
int hashSize,
int threshold)
{
extern __shared__ int2 ht[];
int* needFlush = (int*)&(ht[hashSize]);
if (threadIdx.x == 0) { needFlush[0] = 0; }
clearHashTable(ht, hashSize);
__syncthreads();
auto op = [=] __device__(int binId, IdxT row, IdxT col) {
bool iNeedFlush = false;
if (row < nrows) {
int hidx = findEntry(ht, hashSize, binId, threshold);
if (hidx >= 0) {
raft::myAtomicAdd(&(ht[hidx].y), 1);
} else {
needFlush[0] = 1;
iNeedFlush = true;
}
}
__syncthreads();
if (needFlush[0]) {
flushHashTable(ht, hashSize, bins, nbins, col);
__syncthreads();
if (threadIdx.x == 0) { needFlush[0] = 0; }
__syncthreads();
}
if (iNeedFlush) {
int hidx = findEntry(ht, hashSize, binId, threshold);
// all threads are bound to get one valid entry as all threads in this
// block will make forward progress due to the __syncthreads call in the
// subsequent iteration
raft::myAtomicAdd(&(ht[hidx].y), 1);
}
};
IdxT col = blockIdx.y;
histCoreOp<DataT, BinnerOp, IdxT, VecLen>(data, nrows, nbins, binner, op, col);
__syncthreads();
flushHashTable(ht, hashSize, bins, nbins, col);
}
inline int computeHashTableSize()
{
// we shouldn't have this much of shared memory available anytime soon!
static const unsigned maxBinsEverPossible = 256 * 1024;
static raft::common::Seive primes(maxBinsEverPossible);
unsigned smem = raft::getSharedMemPerBlock();
// divide-by-2 because hash table entry stores 2 elements: idx and count
auto binsPossible = smem / sizeof(unsigned) / 2;
for (; binsPossible > 1; --binsPossible) {
if (primes.isPrime(binsPossible)) return (int)binsPossible;
}
return 1; // should not happen!
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen>
void smemHashHist(int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
BinnerOp binner,
cudaStream_t stream)
{
static const int flushThreshold = 10;
auto blks = computeGridDim<IdxT, 1>(
nrows, ncols, (const void*)smemHashHistKernel<DataT, BinnerOp, IdxT, 1>);
int hashSize = computeHashTableSize();
size_t smemSize = hashSize * sizeof(int2) + sizeof(int);
smemHashHistKernel<DataT, BinnerOp, IdxT, 1><<<blks, ThreadsPerBlock, smemSize, stream>>>(
bins, data, nrows, nbins, binner, hashSize, flushThreshold);
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen>
void histogramVecLen(HistType type,
int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
cudaStream_t stream,
BinnerOp binner)
{
RAFT_CUDA_TRY(cudaMemsetAsync(bins, 0, ncols * nbins * sizeof(int), stream));
switch (type) {
case HistTypeGmem:
gmemHist<DataT, BinnerOp, IdxT, VecLen>(bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmem:
smemHist<DataT, BinnerOp, IdxT, VecLen, false>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemMatchAny:
smemHist<DataT, BinnerOp, IdxT, VecLen, true>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemBits16:
smemBitsHist<DataT, BinnerOp, IdxT, 16, VecLen>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemBits8:
smemBitsHist<DataT, BinnerOp, IdxT, 8, VecLen>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemBits4:
smemBitsHist<DataT, BinnerOp, IdxT, 4, VecLen>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemBits2:
smemBitsHist<DataT, BinnerOp, IdxT, 2, VecLen>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemBits1:
smemBitsHist<DataT, BinnerOp, IdxT, 1, VecLen>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemHash:
smemHashHist<DataT, BinnerOp, IdxT, VecLen>(bins, nbins, data, nrows, ncols, binner, stream);
break;
default: ASSERT(false, "histogram: Invalid type passed '%d'!", type);
};
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename DataT, typename BinnerOp, typename IdxT>
void histogramImpl(HistType type,
int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
cudaStream_t stream,
BinnerOp binner)
{
size_t bytes = nrows * sizeof(DataT);
if (nrows <= 0) return;
if (16 % sizeof(DataT) == 0 && bytes % 16 == 0) {
histogramVecLen<DataT, BinnerOp, IdxT, 16 / sizeof(DataT)>(
type, bins, nbins, data, nrows, ncols, stream, binner);
} else if (8 % sizeof(DataT) == 0 && bytes % 8 == 0) {
histogramVecLen<DataT, BinnerOp, IdxT, 8 / sizeof(DataT)>(
type, bins, nbins, data, nrows, ncols, stream, binner);
} else if (4 % sizeof(DataT) == 0 && bytes % 4 == 0) {
histogramVecLen<DataT, BinnerOp, IdxT, 4 / sizeof(DataT)>(
type, bins, nbins, data, nrows, ncols, stream, binner);
} else if (2 % sizeof(DataT) == 0 && bytes % 2 == 0) {
histogramVecLen<DataT, BinnerOp, IdxT, 2 / sizeof(DataT)>(
type, bins, nbins, data, nrows, ncols, stream, binner);
} else {
histogramVecLen<DataT, BinnerOp, IdxT, 1>(
type, bins, nbins, data, nrows, ncols, stream, binner);
}
}
template <typename IdxT>
HistType selectBestHistAlgo(IdxT nbins)
{
size_t smem = raft::getSharedMemPerBlock();
size_t requiredSize = nbins * sizeof(unsigned);
if (requiredSize <= smem) { return HistTypeSmem; }
for (int bits = 16; bits >= 1; bits >>= 1) {
auto nBytesForBins = raft::ceildiv<size_t>(bits * nbins, 8);
requiredSize = raft::alignTo<size_t>(nBytesForBins, sizeof(unsigned));
if (requiredSize <= smem) { return static_cast<HistType>(bits); }
}
return HistTypeGmem;
}
/**
* @brief Perform histogram on the input data. It chooses the right load size
* based on the input data vector length. It also supports large-bin cases
* using a specialized smem-based hashing technique.
* @tparam DataT input data type
* @tparam IdxT data type used to compute indices
* @tparam BinnerOp takes the input data and computes its bin index
* @param type histogram implementation type to choose
* @param bins the output bins (length = ncols * nbins)
* @param nbins number of bins
* @param data input data (length = ncols * nrows)
* @param nrows data array length in each column (or batch)
* @param ncols number of columns (or batch size)
* @param stream cuda stream
* @param binner the operation that computes the bin index of the input data
*
* @note signature of BinnerOp is `int func(DataT, IdxT);`
*/
template <typename DataT, typename IdxT = int, typename BinnerOp = IdentityBinner<DataT, IdxT>>
void histogram(HistType type,
int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
cudaStream_t stream,
BinnerOp binner = IdentityBinner<DataT, IdxT>())
{
HistType computedType = type;
if (type == HistTypeAuto) { computedType = selectBestHistAlgo(nbins); }
histogramImpl<DataT, BinnerOp, IdxT>(
computedType, bins, nbins, data, nrows, ncols, stream, binner);
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/weighted_mean.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/reduce.cuh>
#include <raft/stats/sum.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief Compute the row-wise weighted mean of the input matrix with a
* vector of weights
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param mu the output mean vector
* @param data the input matrix
* @param weights weight of size D if along_row is true, else of size N
* @param D number of columns of data
* @param N number of rows of data
* @param row_major data input matrix is row-major or not
* @param along_rows whether to reduce along rows or columns
* @param stream cuda stream to launch work on
*/
template <typename Type, typename IdxType = int>
void weightedMean(Type* mu,
const Type* data,
const Type* weights,
IdxType D,
IdxType N,
bool row_major,
bool along_rows,
cudaStream_t stream)
{
// sum the weights & copy back to CPU
auto weight_size = along_rows ? D : N;
Type WS = 0;
raft::stats::sum(mu, weights, (IdxType)1, weight_size, false, stream);
raft::update_host(&WS, mu, 1, stream);
raft::linalg::reduce(
mu,
data,
D,
N,
(Type)0,
row_major,
along_rows,
stream,
false,
[weights] __device__(Type v, IdxType i) { return v * weights[i]; },
raft::add_op{},
raft::div_const_op<Type>(WS));
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/homogeneity_score.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file homogeneity_score.cuh
*
* @brief A clustering result satisfies homogeneity if all of its clusters
* contain only data points which are members of a single class.
*/
#pragma once
#include <raft/stats/entropy.cuh>
#include <raft/stats/mutual_info_score.cuh>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief Function to calculate the homogeneity score between two clusters
* <a href="https://en.wikipedia.org/wiki/Homogeneity_(statistics)">more info on mutual
* information</a>
* @param truthClusterArray: the array of truth classes of type T
* @param predClusterArray: the array of predicted classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
*/
template <typename T>
double homogeneity_score(const T* truthClusterArray,
const T* predClusterArray,
int size,
T lowerLabelRange,
T upperLabelRange,
cudaStream_t stream)
{
if (size == 0) return 1.0;
double computedMI, computedEntropy;
computedMI = raft::stats::mutual_info_score(
truthClusterArray, predClusterArray, size, lowerLabelRange, upperLabelRange, stream);
computedEntropy =
raft::stats::entropy(truthClusterArray, size, lowerLabelRange, upperLabelRange, stream);
double homogeneity;
if (computedEntropy) {
homogeneity = computedMI / computedEntropy;
} else
homogeneity = 1.0;
return homogeneity;
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/stddev.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/binary_op.cuh>
#include <raft/util/cuda_utils.cuh>
#include <cub/cub.cuh>
namespace raft {
namespace stats {
namespace detail {
///@todo: ColPerBlk has been tested only for 32!
template <typename Type, typename IdxType, int TPB, int ColsPerBlk = 32>
RAFT_KERNEL stddevKernelRowMajor(Type* std, const Type* data, IdxType D, IdxType N)
{
const int RowsPerBlkPerIter = TPB / ColsPerBlk;
IdxType thisColId = threadIdx.x % ColsPerBlk;
IdxType thisRowId = threadIdx.x / ColsPerBlk;
IdxType colId = thisColId + ((IdxType)blockIdx.y * ColsPerBlk);
IdxType rowId = thisRowId + ((IdxType)blockIdx.x * RowsPerBlkPerIter);
Type thread_data = Type(0);
const IdxType stride = RowsPerBlkPerIter * gridDim.x;
for (IdxType i = rowId; i < N; i += stride) {
Type val = (colId < D) ? data[i * D + colId] : Type(0);
thread_data += val * val;
}
__shared__ Type sstd[ColsPerBlk];
if (threadIdx.x < ColsPerBlk) sstd[threadIdx.x] = Type(0);
__syncthreads();
raft::myAtomicAdd(sstd + thisColId, thread_data);
__syncthreads();
if (threadIdx.x < ColsPerBlk) raft::myAtomicAdd(std + colId, sstd[thisColId]);
}
template <typename Type, typename IdxType, int TPB>
RAFT_KERNEL stddevKernelColMajor(Type* std, const Type* data, const Type* mu, IdxType D, IdxType N)
{
typedef cub::BlockReduce<Type, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
Type thread_data = Type(0);
IdxType colStart = N * blockIdx.x;
Type m = mu[blockIdx.x];
for (IdxType i = threadIdx.x; i < N; i += TPB) {
IdxType idx = colStart + i;
Type diff = data[idx] - m;
thread_data += diff * diff;
}
Type acc = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) { std[blockIdx.x] = raft::sqrt(acc / N); }
}
template <typename Type, typename IdxType, int TPB>
RAFT_KERNEL varsKernelColMajor(Type* var, const Type* data, const Type* mu, IdxType D, IdxType N)
{
typedef cub::BlockReduce<Type, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
Type thread_data = Type(0);
IdxType colStart = N * blockIdx.x;
Type m = mu[blockIdx.x];
for (IdxType i = threadIdx.x; i < N; i += TPB) {
IdxType idx = colStart + i;
Type diff = data[idx] - m;
thread_data += diff * diff;
}
Type acc = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) { var[blockIdx.x] = acc / N; }
}
/**
* @brief Compute stddev of the input matrix
*
* Stddev operation is assumed to be performed on a given column.
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param std the output stddev vector
* @param data the input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param sample whether to evaluate sample stddev or not. In other words,
* whether
* to normalize the output using N-1 or N, for true or false, respectively
* @param rowMajor whether the input data is row or col major
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int>
void stddev(Type* std,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool sample,
bool rowMajor,
cudaStream_t stream)
{
static const int TPB = 256;
if (rowMajor) {
static const int RowsPerThread = 4;
static const int ColsPerBlk = 32;
static const int RowsPerBlk = (TPB / ColsPerBlk) * RowsPerThread;
dim3 grid(raft::ceildiv(N, (IdxType)RowsPerBlk), raft::ceildiv(D, (IdxType)ColsPerBlk));
RAFT_CUDA_TRY(cudaMemset(std, 0, sizeof(Type) * D));
stddevKernelRowMajor<Type, IdxType, TPB, ColsPerBlk><<<grid, TPB, 0, stream>>>(std, data, D, N);
Type ratio = Type(1) / (sample ? Type(N - 1) : Type(N));
raft::linalg::binaryOp(
std,
std,
mu,
D,
[ratio] __device__(Type a, Type b) { return raft::sqrt(a * ratio - b * b); },
stream);
} else {
stddevKernelColMajor<Type, IdxType, TPB><<<D, TPB, 0, stream>>>(std, data, mu, D, N);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* @brief Compute variance of the input matrix
*
* Variance operation is assumed to be performed on a given column.
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param var the output stddev vector
* @param data the input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param sample whether to evaluate sample stddev or not. In other words,
* whether
* to normalize the output using N-1 or N, for true or false, respectively
* @param rowMajor whether the input data is row or col major
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int>
void vars(Type* var,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool sample,
bool rowMajor,
cudaStream_t stream)
{
static const int TPB = 256;
if (rowMajor) {
static const int RowsPerThread = 4;
static const int ColsPerBlk = 32;
static const int RowsPerBlk = (TPB / ColsPerBlk) * RowsPerThread;
dim3 grid(raft::ceildiv(N, (IdxType)RowsPerBlk), raft::ceildiv(D, (IdxType)ColsPerBlk));
RAFT_CUDA_TRY(cudaMemset(var, 0, sizeof(Type) * D));
stddevKernelRowMajor<Type, IdxType, TPB, ColsPerBlk><<<grid, TPB, 0, stream>>>(var, data, D, N);
Type ratio = Type(1) / (sample ? Type(N - 1) : Type(N));
raft::linalg::binaryOp(
var, var, mu, D, [ratio] __device__(Type a, Type b) { return a * ratio - b * b; }, stream);
} else {
varsKernelColMajor<Type, IdxType, TPB><<<D, TPB, 0, stream>>>(var, data, mu, D, N);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
} // namespace detail
} // namespace stats
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/mean_center.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/vectorized.cuh>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief Center the input matrix wrt its mean
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @tparam TPB threads per block of the cuda kernel launched
* @param out the output mean-centered matrix
* @param data input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param rowMajor whether input is row or col major
* @param bcastAlongRows whether to broadcast vector along rows or columns
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void meanCenter(Type* out,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
raft::linalg::matrixVectorOp(
out, data, mu, D, N, rowMajor, bcastAlongRows, raft::sub_op{}, stream);
}
/**
* @brief Add the input matrix wrt its mean
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @tparam TPB threads per block of the cuda kernel launched
* @param out the output mean-added matrix
* @param data input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param rowMajor whether input is row or col major
* @param bcastAlongRows whether to broadcast vector along rows or columns
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void meanAdd(Type* out,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
raft::linalg::matrixVectorOp(
out, data, mu, D, N, rowMajor, bcastAlongRows, raft::add_op{}, stream);
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/entropy.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file entropy.cuh
* @brief Calculates the entropy for a labeling in nats.(ie, uses natural logarithm for the
* calculations)
*/
#pragma once
#include <cub/cub.cuh>
#include <math.h>
#include <raft/linalg/divide.cuh>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief Lambda to calculate the entropy of a sample given its probability value
*
* @param p: the input to the functional mapping
* @param q: dummy param
*/
struct entropyOp {
HDI double operator()(double p, double q)
{
if (p)
return -1 * (p) * (log(p));
else
return 0.0;
}
};
/**
* @brief function to calculate the bincounts of number of samples in every label
*
* @tparam LabelT: type of the labels
* @param labels: the pointer to the array containing labels for every data sample
* @param binCountArray: pointer to the 1D array that contains the count of samples per cluster
* @param nRows: number of data samples
* @param lowerLabelRange
* @param upperLabelRange
* @param workspace: device buffer containing workspace memory
* @param stream: the cuda stream where to launch this kernel
*/
template <typename LabelT>
void countLabels(const LabelT* labels,
double* binCountArray,
int nRows,
LabelT lowerLabelRange,
LabelT upperLabelRange,
rmm::device_uvector<char>& workspace,
cudaStream_t stream)
{
int num_levels = upperLabelRange - lowerLabelRange + 2;
LabelT lower_level = lowerLabelRange;
LabelT upper_level = upperLabelRange + 1;
size_t temp_storage_bytes = 0;
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
labels,
binCountArray,
num_levels,
lower_level,
upper_level,
nRows,
stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(workspace.data(),
temp_storage_bytes,
labels,
binCountArray,
num_levels,
lower_level,
upper_level,
nRows,
stream));
}
/**
* @brief Function to calculate entropy
* <a href="https://en.wikipedia.org/wiki/Entropy_(information_theory)">more info on entropy</a>
*
* @param clusterArray: the array of classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
* @return the entropy score
*/
template <typename T>
double entropy(const T* clusterArray,
const int size,
const T lowerLabelRange,
const T upperLabelRange,
cudaStream_t stream)
{
if (!size) return 1.0;
T numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
// declaring, allocating and initializing memory for bincount array and entropy values
rmm::device_uvector<double> prob(numUniqueClasses, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(prob.data(), 0, numUniqueClasses * sizeof(double), stream));
rmm::device_scalar<double> d_entropy(stream);
RAFT_CUDA_TRY(cudaMemsetAsync(d_entropy.data(), 0, sizeof(double), stream));
// workspace allocation
rmm::device_uvector<char> workspace(1, stream);
// calculating the bincounts and populating the prob array
countLabels(clusterArray, prob.data(), size, lowerLabelRange, upperLabelRange, workspace, stream);
// scalar dividing by size
raft::linalg::divideScalar<double>(
prob.data(), prob.data(), (double)size, numUniqueClasses, stream);
// calculating the aggregate entropy
raft::linalg::mapThenSumReduce<double, entropyOp>(
d_entropy.data(), numUniqueClasses, entropyOp(), stream, prob.data(), prob.data());
// updating in the host memory
double h_entropy;
raft::update_host(&h_entropy, d_entropy.data(), 1, stream);
raft::interruptible::synchronize(stream);
return h_entropy;
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/neighborhood_recall.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/math.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resources.hpp>
#include <cub/cub.cuh>
#include <cuda/atomic>
#include <optional>
namespace raft::stats::detail {
template <typename IndicesValueType,
typename DistanceValueType,
typename IndexType,
typename ScalarType>
RAFT_KERNEL neighborhood_recall(
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> indices,
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> ref_indices,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
distances,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
ref_distances,
raft::device_scalar_view<ScalarType> recall_score,
DistanceValueType const eps)
{
auto constexpr kThreadsPerBlock = 32;
IndexType const row_idx = blockIdx.x;
auto const lane_idx = threadIdx.x % kThreadsPerBlock;
// Each warp stores a recall score computed across the columns per row
IndexType thread_recall_score = 0;
for (IndexType col_idx = lane_idx; col_idx < indices.extent(1); col_idx += kThreadsPerBlock) {
for (IndexType ref_col_idx = 0; ref_col_idx < ref_indices.extent(1); ref_col_idx++) {
if (indices(row_idx, col_idx) == ref_indices(row_idx, ref_col_idx)) {
thread_recall_score += 1;
break;
} else if (distances.has_value()) {
auto dist = distances.value()(row_idx, col_idx);
auto ref_dist = ref_distances.value()(row_idx, ref_col_idx);
DistanceValueType diff = raft::abs(dist - ref_dist);
DistanceValueType m = std::max(raft::abs(dist), raft::abs(ref_dist));
DistanceValueType ratio = diff > eps ? diff / m : diff;
if (ratio <= eps) {
thread_recall_score += 1;
break;
}
}
}
}
// Reduce across a warp for row score
typedef cub::BlockReduce<IndexType, kThreadsPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
ScalarType row_recall_score = BlockReduce(temp_storage).Sum(thread_recall_score);
// Reduce across all rows for global score
if (lane_idx == 0) {
cuda::atomic_ref<ScalarType, cuda::thread_scope_device> device_recall_score{
*recall_score.data_handle()};
std::size_t const total_count = indices.extent(0) * indices.extent(1);
device_recall_score.fetch_add(row_recall_score / total_count);
}
}
template <typename IndicesValueType,
typename DistanceValueType,
typename IndexType,
typename ScalarType>
void neighborhood_recall(
raft::resources const& res,
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> indices,
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> ref_indices,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
distances,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
ref_distances,
raft::device_scalar_view<ScalarType> recall_score,
DistanceValueType const eps)
{
// One warp per row, launch a warp-width block per-row kernel
auto constexpr kThreadsPerBlock = 32;
auto const num_blocks = indices.extent(0);
neighborhood_recall<<<num_blocks, kThreadsPerBlock>>>(
indices, ref_indices, distances, ref_distances, recall_score, eps);
}
} // end namespace raft::stats::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/silhouette_score.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <cub/cub.cuh>
#include <iostream>
#include <math.h>
#include <numeric>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/distance.cuh>
#include <raft/distance/distance_types.hpp>
#include <raft/linalg/add.cuh>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/reduce.cuh>
#include <raft/linalg/reduce_cols_by_key.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_scalar.hpp>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief kernel that calculates the average intra-cluster distance for every sample data point and
* updates the cluster distance to max value
* @tparam DataT: type of the data samples
* @tparam LabelT: type of the labels
* @param sampleToClusterSumOfDistances: the pointer to the 2D array that contains the sum of
* distances from every sample to every cluster (nRows x nLabels)
* @param binCountArray: pointer to the 1D array that contains the count of samples per cluster (1 x
* nLabels)
* @param d_aArray: the pointer to the array of average intra-cluster distances for every sample in
* device memory (1 x nRows)
* @param labels: the pointer to the array containing labels for every data sample (1 x nRows)
* @param nRows: number of data samples
* @param nLabels: number of Labels
* @param MAX_VAL: DataT specific upper limit
*/
template <typename DataT, typename LabelT>
RAFT_KERNEL populateAKernel(DataT* sampleToClusterSumOfDistances,
DataT* binCountArray,
DataT* d_aArray,
const LabelT* labels,
int nRows,
int nLabels,
const DataT MAX_VAL)
{
// getting the current index
int sampleIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (sampleIndex >= nRows) return;
// sampleDistanceVector is an array that stores that particular row of the distanceMatrix
DataT* sampleToClusterSumOfDistancesVector =
&sampleToClusterSumOfDistances[sampleIndex * nLabels];
LabelT sampleCluster = labels[sampleIndex];
int sampleClusterIndex = (int)sampleCluster;
if (binCountArray[sampleClusterIndex] - 1 <= 0) {
d_aArray[sampleIndex] = -1;
return;
}
else {
d_aArray[sampleIndex] = (sampleToClusterSumOfDistancesVector[sampleClusterIndex]) /
(binCountArray[sampleClusterIndex] - 1);
// modifying the sampleDistanceVector to give sample average distance
sampleToClusterSumOfDistancesVector[sampleClusterIndex] = MAX_VAL;
}
}
/**
* @brief function to calculate the bincounts of number of samples in every label
* @tparam DataT: type of the data samples
* @tparam LabelT: type of the labels
* @param labels: the pointer to the array containing labels for every data sample (1 x nRows)
* @param binCountArray: pointer to the 1D array that contains the count of samples per cluster (1 x
* nLabels)
* @param nRows: number of data samples
* @param nUniqueLabels: number of Labels
* @param workspace: device buffer containing workspace memory
* @param stream: the cuda stream where to launch this kernel
*/
template <typename DataT, typename LabelT>
void countLabels(const LabelT* labels,
DataT* binCountArray,
int nRows,
int nUniqueLabels,
rmm::device_uvector<char>& workspace,
cudaStream_t stream)
{
int num_levels = nUniqueLabels + 1;
LabelT lower_level = 0;
LabelT upper_level = nUniqueLabels;
size_t temp_storage_bytes = 0;
rmm::device_uvector<int> countArray(nUniqueLabels, stream);
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
labels,
binCountArray,
num_levels,
lower_level,
upper_level,
nRows,
stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(workspace.data(),
temp_storage_bytes,
labels,
binCountArray,
num_levels,
lower_level,
upper_level,
nRows,
stream));
}
/**
* @brief structure that defines the division Lambda for elementwise op
*/
template <typename DataT>
struct DivOp {
HDI DataT operator()(DataT a, int b, int c)
{
if (b == 0)
return ULLONG_MAX;
else
return a / b;
}
};
/**
* @brief structure that defines the elementwise operation to calculate silhouette score using
* params 'a' and 'b'
*/
template <typename DataT>
struct SilOp {
HDI DataT operator()(DataT a, DataT b)
{
if (a == 0 && b == 0 || a == b)
return 0;
else if (a == -1)
return 0;
else if (a > b)
return (b - a) / a;
else
return (b - a) / b;
}
};
/**
* @brief main function that returns the average silhouette score for a given set of data and its
* clusterings
* @tparam DataT: type of the data samples
* @tparam LabelT: type of the labels
* @param X_in: pointer to the input Data samples array (nRows x nCols)
* @param nRows: number of data samples
* @param nCols: number of features
* @param labels: the pointer to the array containing labels for every data sample (1 x nRows)
* @param nLabels: number of Labels
* @param silhouette_scorePerSample: pointer to the array that is optionally taken in as input and
* is populated with the silhouette score for every sample (1 x nRows)
* @param stream: the cuda stream where to launch this kernel
* @param metric: the numerical value that maps to the type of distance metric to be used in the
* calculations
*/
template <typename DataT, typename LabelT>
DataT silhouette_score(
raft::resources const& handle,
const DataT* X_in,
int nRows,
int nCols,
const LabelT* labels,
int nLabels,
DataT* silhouette_scorePerSample,
cudaStream_t stream,
raft::distance::DistanceType metric = raft::distance::DistanceType::L2Unexpanded)
{
ASSERT(nLabels >= 2 && nLabels <= (nRows - 1),
"silhouette Score not defined for the given number of labels!");
// compute the distance matrix
rmm::device_uvector<DataT> distanceMatrix(nRows * nRows, stream);
rmm::device_uvector<char> workspace(1, stream);
raft::distance::pairwise_distance(
handle, X_in, X_in, distanceMatrix.data(), nRows, nRows, nCols, metric);
// deciding on the array of silhouette scores for each dataPoint
rmm::device_uvector<DataT> silhouette_scoreSamples(0, stream);
DataT* perSampleSilScore = nullptr;
if (silhouette_scorePerSample == nullptr) {
silhouette_scoreSamples.resize(nRows, stream);
perSampleSilScore = silhouette_scoreSamples.data();
} else {
perSampleSilScore = silhouette_scorePerSample;
}
RAFT_CUDA_TRY(cudaMemsetAsync(perSampleSilScore, 0, nRows * sizeof(DataT), stream));
// getting the sample count per cluster
rmm::device_uvector<DataT> binCountArray(nLabels, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(binCountArray.data(), 0, nLabels * sizeof(DataT), stream));
countLabels(labels, binCountArray.data(), nRows, nLabels, workspace, stream);
// calculating the sample-cluster-distance-sum-array
rmm::device_uvector<DataT> sampleToClusterSumOfDistances(nRows * nLabels, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(
sampleToClusterSumOfDistances.data(), 0, nRows * nLabels * sizeof(DataT), stream));
raft::linalg::reduce_cols_by_key(distanceMatrix.data(),
labels,
sampleToClusterSumOfDistances.data(),
nRows,
nRows,
nLabels,
stream);
// creating the a array and b array
rmm::device_uvector<DataT> d_aArray(nRows, stream);
rmm::device_uvector<DataT> d_bArray(nRows, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(d_aArray.data(), 0, nRows * sizeof(DataT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_bArray.data(), 0, nRows * sizeof(DataT), stream));
// kernel that populates the d_aArray
// kernel configuration
dim3 numThreadsPerBlock(32, 1, 1);
dim3 numBlocks(raft::ceildiv<int>(nRows, numThreadsPerBlock.x), 1, 1);
// calling the kernel
populateAKernel<<<numBlocks, numThreadsPerBlock, 0, stream>>>(
sampleToClusterSumOfDistances.data(),
binCountArray.data(),
d_aArray.data(),
labels,
nRows,
nLabels,
std::numeric_limits<DataT>::max());
// elementwise dividing by bincounts
rmm::device_uvector<DataT> averageDistanceBetweenSampleAndCluster(nRows * nLabels, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(
averageDistanceBetweenSampleAndCluster.data(), 0, nRows * nLabels * sizeof(DataT), stream));
raft::linalg::matrixVectorOp(averageDistanceBetweenSampleAndCluster.data(),
sampleToClusterSumOfDistances.data(),
binCountArray.data(),
binCountArray.data(),
nLabels,
nRows,
true,
true,
DivOp<DataT>(),
stream);
// calculating row-wise minimum
raft::linalg::reduce<DataT, DataT, int, raft::identity_op, raft::min_op>(
d_bArray.data(),
averageDistanceBetweenSampleAndCluster.data(),
nLabels,
nRows,
std::numeric_limits<DataT>::max(),
true,
true,
stream,
false,
raft::identity_op{},
raft::min_op{});
// calculating the silhouette score per sample using the d_aArray and d_bArray
raft::linalg::binaryOp<DataT, SilOp<DataT>>(
perSampleSilScore, d_aArray.data(), d_bArray.data(), nRows, SilOp<DataT>(), stream);
// calculating the sum of all the silhouette score
rmm::device_scalar<DataT> d_avgSilhouetteScore(stream);
RAFT_CUDA_TRY(cudaMemsetAsync(d_avgSilhouetteScore.data(), 0, sizeof(DataT), stream));
raft::linalg::mapThenSumReduce<double, raft::identity_op>(d_avgSilhouetteScore.data(),
nRows,
raft::identity_op(),
stream,
perSampleSilScore,
perSampleSilScore);
DataT avgSilhouetteScore = d_avgSilhouetteScore.value(stream);
resource::sync_stream(handle, stream);
avgSilhouetteScore /= nRows;
return avgSilhouetteScore;
}
}; // namespace detail
}; // namespace stats
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/dispersion.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <memory>
#include <raft/core/interruptible.hpp>
#include <raft/linalg/eltwise.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace stats {
namespace detail {
///@todo: ColsPerBlk has been tested only for 32!
template <typename DataT, typename IdxT, int TPB, int ColsPerBlk = 32>
RAFT_KERNEL weightedMeanKernel(DataT* mu, const DataT* data, const IdxT* counts, IdxT D, IdxT N)
{
constexpr int RowsPerBlkPerIter = TPB / ColsPerBlk;
IdxT thisColId = threadIdx.x % ColsPerBlk;
IdxT thisRowId = threadIdx.x / ColsPerBlk;
IdxT colId = thisColId + ((IdxT)blockIdx.y * ColsPerBlk);
IdxT rowId = thisRowId + ((IdxT)blockIdx.x * RowsPerBlkPerIter);
DataT thread_data = DataT(0);
const IdxT stride = RowsPerBlkPerIter * gridDim.x;
__shared__ DataT smu[ColsPerBlk];
if (threadIdx.x < ColsPerBlk) smu[threadIdx.x] = DataT(0);
for (IdxT i = rowId; i < N; i += stride) {
thread_data += (colId < D) ? data[i * D + colId] * (DataT)counts[i] : DataT(0);
}
__syncthreads();
raft::myAtomicAdd(smu + thisColId, thread_data);
__syncthreads();
if (threadIdx.x < ColsPerBlk && colId < D) raft::myAtomicAdd(mu + colId, smu[thisColId]);
}
template <typename DataT, typename IdxT, int TPB>
RAFT_KERNEL dispersionKernel(DataT* result,
const DataT* clusters,
const IdxT* clusterSizes,
const DataT* mu,
IdxT dim,
IdxT nClusters)
{
IdxT tid = threadIdx.x + blockIdx.x * blockDim.x;
IdxT len = dim * nClusters;
IdxT stride = blockDim.x * gridDim.x;
DataT sum = DataT(0);
for (; tid < len; tid += stride) {
IdxT col = tid % dim;
IdxT row = tid / dim;
DataT diff = clusters[tid] - mu[col];
sum += diff * diff * DataT(clusterSizes[row]);
}
typedef cub::BlockReduce<DataT, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
__syncthreads();
auto acc = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
if (threadIdx.x == 0) raft::myAtomicAdd(result, acc);
}
/**
* @brief Compute cluster dispersion metric. This is very useful for
* automatically finding the 'k' (in kmeans) that improves this metric.
* @tparam DataT data type
* @tparam IdxT index type
* @tparam TPB threads block for kernels launched
* @param centroids the cluster centroids. This is assumed to be row-major
* and of dimension (nClusters x dim)
* @param clusterSizes number of points in the dataset which belong to each
* cluster. This is of length nClusters
* @param globalCentroid compute the global weighted centroid of all cluster
* centroids. This is of length dim. Pass a nullptr if this is not needed
* @param nClusters number of clusters
* @param nPoints number of points in the dataset
* @param dim dataset dimensionality
* @param stream cuda stream
* @return the cluster dispersion value
*/
template <typename DataT, typename IdxT = int, int TPB = 256>
DataT dispersion(const DataT* centroids,
const IdxT* clusterSizes,
DataT* globalCentroid,
IdxT nClusters,
IdxT nPoints,
IdxT dim,
cudaStream_t stream)
{
static const int RowsPerThread = 4;
static const int ColsPerBlk = 32;
static const int RowsPerBlk = (TPB / ColsPerBlk) * RowsPerThread;
dim3 grid(raft::ceildiv(nPoints, (IdxT)RowsPerBlk), raft::ceildiv(dim, (IdxT)ColsPerBlk));
rmm::device_uvector<DataT> mean(0, stream);
rmm::device_uvector<DataT> result(1, stream);
DataT* mu = globalCentroid;
if (globalCentroid == nullptr) {
mean.resize(dim, stream);
mu = mean.data();
}
RAFT_CUDA_TRY(cudaMemsetAsync(mu, 0, sizeof(DataT) * dim, stream));
RAFT_CUDA_TRY(cudaMemsetAsync(result.data(), 0, sizeof(DataT), stream));
weightedMeanKernel<DataT, IdxT, TPB, ColsPerBlk>
<<<grid, TPB, 0, stream>>>(mu, centroids, clusterSizes, dim, nClusters);
RAFT_CUDA_TRY(cudaGetLastError());
DataT ratio = DataT(1) / DataT(nPoints);
raft::linalg::scalarMultiply(mu, mu, ratio, dim, stream);
// finally, compute the dispersion
constexpr int ItemsPerThread = 4;
int nblks = raft::ceildiv<int>(dim * nClusters, TPB * ItemsPerThread);
dispersionKernel<DataT, IdxT, TPB>
<<<nblks, TPB, 0, stream>>>(result.data(), centroids, clusterSizes, mu, dim, nClusters);
RAFT_CUDA_TRY(cudaGetLastError());
DataT h_result;
raft::update_host(&h_result, result.data(), 1, stream);
raft::interruptible::synchronize(stream);
return sqrt(h_result);
}
} // end namespace detail
} // end namespace stats
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/sum.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/eltwise.cuh>
#include <raft/util/cuda_utils.cuh>
#include <cub/cub.cuh>
namespace raft {
namespace stats {
namespace detail {
///@todo: ColsPerBlk has been tested only for 32!
template <typename Type, typename IdxType, int TPB, int ColsPerBlk = 32>
RAFT_KERNEL sumKernelRowMajor(Type* mu, const Type* data, IdxType D, IdxType N)
{
const int RowsPerBlkPerIter = TPB / ColsPerBlk;
IdxType thisColId = threadIdx.x % ColsPerBlk;
IdxType thisRowId = threadIdx.x / ColsPerBlk;
IdxType colId = thisColId + ((IdxType)blockIdx.y * ColsPerBlk);
IdxType rowId = thisRowId + ((IdxType)blockIdx.x * RowsPerBlkPerIter);
Type thread_data = Type(0);
const IdxType stride = RowsPerBlkPerIter * gridDim.x;
for (IdxType i = rowId; i < N; i += stride)
thread_data += (colId < D) ? data[i * D + colId] : Type(0);
__shared__ Type smu[ColsPerBlk];
if (threadIdx.x < ColsPerBlk) smu[threadIdx.x] = Type(0);
__syncthreads();
raft::myAtomicAdd(smu + thisColId, thread_data);
__syncthreads();
if (threadIdx.x < ColsPerBlk) raft::myAtomicAdd(mu + colId, smu[thisColId]);
}
template <typename Type, typename IdxType, int TPB>
RAFT_KERNEL sumKernelColMajor(Type* mu, const Type* data, IdxType D, IdxType N)
{
typedef cub::BlockReduce<Type, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
Type thread_data = Type(0);
IdxType colStart = N * blockIdx.x;
for (IdxType i = threadIdx.x; i < N; i += TPB) {
IdxType idx = colStart + i;
thread_data += data[idx];
}
Type acc = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) { mu[blockIdx.x] = acc; }
}
template <typename Type, typename IdxType = int>
void sum(Type* output, const Type* input, IdxType D, IdxType N, bool rowMajor, cudaStream_t stream)
{
static const int TPB = 256;
if (rowMajor) {
static const int RowsPerThread = 4;
static const int ColsPerBlk = 32;
static const int RowsPerBlk = (TPB / ColsPerBlk) * RowsPerThread;
dim3 grid(raft::ceildiv(N, (IdxType)RowsPerBlk), raft::ceildiv(D, (IdxType)ColsPerBlk));
RAFT_CUDA_TRY(cudaMemset(output, 0, sizeof(Type) * D));
sumKernelRowMajor<Type, IdxType, TPB, ColsPerBlk>
<<<grid, TPB, 0, stream>>>(output, input, D, N);
} else {
sumKernelColMajor<Type, IdxType, TPB><<<D, TPB, 0, stream>>>(output, input, D, N);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
} // namespace detail
} // namespace stats
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/mutual_info_score.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file mutual_info_score.cuh
* @brief The Mutual Information is a measure of the similarity between two labels of
* the same data.This metric is independent of the absolute values of the labels:
* a permutation of the class or cluster label values won't change the
* score value in any way.
* This metric is furthermore symmetric.This can be useful to
* measure the agreement of two independent label assignments strategies
* on the same dataset when the real ground truth is not known.
*/
#pragma once
#include <cub/cub.cuh>
#include <math.h>
#include <raft/core/interruptible.hpp>
#include <raft/linalg/reduce.cuh>
#include <raft/stats/contingency_matrix.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace stats {
namespace detail {
/**
* @brief kernel to calculate the mutual info score
* @param dContingencyMatrix: the contingency matrix corresponding to the two clusters
* @param a: the row wise sum of the contingency matrix, which is also the bin counts of first
* cluster array
* @param b: the column wise sum of the contingency matrix, which is also the bin counts of second
* cluster array
* @param numUniqueClasses: number of unique classes
* @param size: the size of array a and b (size of the contingency matrix is (size x size))
* @param d_MI: pointer to the device memory that stores the aggregate mutual information
*/
template <typename T, int BLOCK_DIM_X, int BLOCK_DIM_Y>
RAFT_KERNEL mutual_info_kernel(const int* dContingencyMatrix,
const int* a,
const int* b,
int numUniqueClasses,
int size,
double* d_MI)
{
// calculating the indices of pairs of datapoints compared by the current thread
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
// thread-local variable to count the mutual info
double localMI = 0.0;
if (i < numUniqueClasses && j < numUniqueClasses && a[i] * b[j] != 0 &&
dContingencyMatrix[i * numUniqueClasses + j] != 0) {
localMI += (double(dContingencyMatrix[i * numUniqueClasses + j])) *
(log(double(size) * double(dContingencyMatrix[i * numUniqueClasses + j])) -
log(double(a[i] * b[j])));
}
// specialize blockReduce for a 2D block of 1024 threads of type uint64_t
typedef cub::BlockReduce<double, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y>
BlockReduce;
// Allocate shared memory for blockReduce
__shared__ typename BlockReduce::TempStorage temp_storage;
// summing up thread-local counts specific to a block
localMI = BlockReduce(temp_storage).Sum(localMI);
__syncthreads();
// executed once per block
if (threadIdx.x == 0 && threadIdx.y == 0) { raft::myAtomicAdd(d_MI, localMI); }
}
/**
* @brief Function to calculate the mutual information between two clusters
* <a href="https://en.wikipedia.org/wiki/Mutual_information">more info on mutual information</a>
* @param firstClusterArray: the array of classes of type T
* @param secondClusterArray: the array of classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
*/
template <typename T>
double mutual_info_score(const T* firstClusterArray,
const T* secondClusterArray,
int size,
T lowerLabelRange,
T upperLabelRange,
cudaStream_t stream)
{
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
// declaring, allocating and initializing memory for the contingency marix
rmm::device_uvector<int> dContingencyMatrix(numUniqueClasses * numUniqueClasses, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(
dContingencyMatrix.data(), 0, numUniqueClasses * numUniqueClasses * sizeof(int), stream));
// workspace allocation
size_t workspaceSz = raft::stats::getContingencyMatrixWorkspaceSize(
size, firstClusterArray, stream, lowerLabelRange, upperLabelRange);
rmm::device_uvector<char> pWorkspace(workspaceSz, stream);
// calculating the contingency matrix
raft::stats::contingencyMatrix(firstClusterArray,
secondClusterArray,
(int)size,
(int*)dContingencyMatrix.data(),
stream,
(void*)pWorkspace.data(),
workspaceSz,
lowerLabelRange,
upperLabelRange);
// creating device buffers for all the parameters involved in ARI calculation
// device variables
rmm::device_uvector<int> a(numUniqueClasses, stream);
rmm::device_uvector<int> b(numUniqueClasses, stream);
rmm::device_scalar<double> d_MI(stream);
// host variables
double h_MI;
// initializing device memory
RAFT_CUDA_TRY(cudaMemsetAsync(a.data(), 0, numUniqueClasses * sizeof(int), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(b.data(), 0, numUniqueClasses * sizeof(int), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_MI.data(), 0, sizeof(double), stream));
// calculating the row-wise sums
raft::linalg::reduce<int, int, int>(
a.data(), dContingencyMatrix.data(), numUniqueClasses, numUniqueClasses, 0, true, true, stream);
// calculating the column-wise sums
raft::linalg::reduce<int, int, int>(b.data(),
dContingencyMatrix.data(),
numUniqueClasses,
numUniqueClasses,
0,
true,
false,
stream);
// kernel configuration
static const int BLOCK_DIM_Y = 16, BLOCK_DIM_X = 16;
dim3 numThreadsPerBlock(BLOCK_DIM_X, BLOCK_DIM_Y);
dim3 numBlocks(raft::ceildiv<int>(numUniqueClasses, numThreadsPerBlock.x),
raft::ceildiv<int>(numUniqueClasses, numThreadsPerBlock.y));
// calling the kernel
mutual_info_kernel<T, BLOCK_DIM_X, BLOCK_DIM_Y><<<numBlocks, numThreadsPerBlock, 0, stream>>>(
dContingencyMatrix.data(), a.data(), b.data(), numUniqueClasses, size, d_MI.data());
// updating in the host memory
h_MI = d_MI.value(stream);
raft::interruptible::synchronize(stream);
return h_MI / size;
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/minmax.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <limits>
namespace raft {
namespace stats {
namespace detail {
// TODO: replace with `std::bitcast` once we adopt C++20 or libcu++ adds it
template <class To, class From>
constexpr To bit_cast(const From& from) noexcept
{
To to{};
static_assert(sizeof(To) == sizeof(From));
memcpy(&to, &from, sizeof(To));
return to;
}
template <typename T>
struct encode_traits {};
template <>
struct encode_traits<float> {
using E = int;
};
template <>
struct encode_traits<double> {
using E = long long;
};
HDI int encode(float val)
{
int i = detail::bit_cast<int>(val);
return i >= 0 ? i : (1 << 31) | ~i;
}
HDI long long encode(double val)
{
std::int64_t i = detail::bit_cast<std::int64_t>(val);
return i >= 0 ? i : (1ULL << 63) | ~i;
}
HDI float decode(int val)
{
if (val < 0) val = (1 << 31) | ~val;
return detail::bit_cast<float>(val);
}
HDI double decode(long long val)
{
if (val < 0) val = (1ULL << 63) | ~val;
return detail::bit_cast<double>(val);
}
template <typename T, typename E>
DI T atomicMaxBits(T* address, T val)
{
E old = atomicMax((E*)address, encode(val));
return decode(old);
}
template <typename T, typename E>
DI T atomicMinBits(T* address, T val)
{
E old = atomicMin((E*)address, encode(val));
return decode(old);
}
template <typename T, typename E>
RAFT_KERNEL decodeKernel(T* globalmin, T* globalmax, int ncols)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < ncols) {
globalmin[tid] = decode(*(E*)&globalmin[tid]);
globalmax[tid] = decode(*(E*)&globalmax[tid]);
}
}
///@todo: implement a proper "fill" kernel
template <typename T, typename E>
RAFT_KERNEL minmaxInitKernel(int ncols, T* globalmin, T* globalmax, T init_val)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= ncols) return;
*(E*)&globalmin[tid] = encode(init_val);
*(E*)&globalmax[tid] = encode(-init_val);
}
template <typename T, typename E>
RAFT_KERNEL minmaxKernel(const T* data,
const unsigned int* rowids,
const unsigned int* colids,
int nrows,
int ncols,
int row_stride,
T* g_min,
T* g_max,
T* sampledcols,
T init_min_val,
int batch_ncols,
int num_batches)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
extern __shared__ char shmem[];
T* s_min = (T*)shmem;
T* s_max = (T*)(shmem + sizeof(T) * batch_ncols);
int last_batch_ncols = ncols % batch_ncols;
if (last_batch_ncols == 0) { last_batch_ncols = batch_ncols; }
int orig_batch_ncols = batch_ncols;
for (int batch_id = 0; batch_id < num_batches; batch_id++) {
if (batch_id == num_batches - 1) { batch_ncols = last_batch_ncols; }
for (int i = threadIdx.x; i < batch_ncols; i += blockDim.x) {
*(E*)&s_min[i] = encode(init_min_val);
*(E*)&s_max[i] = encode(-init_min_val);
}
__syncthreads();
for (int i = tid; i < nrows * batch_ncols; i += blockDim.x * gridDim.x) {
int col = (batch_id * orig_batch_ncols) + (i / nrows);
int row = i % nrows;
if (colids != nullptr) { col = colids[col]; }
if (rowids != nullptr) { row = rowids[row]; }
int index = row + col * row_stride;
T coldata = data[index];
if (!isnan(coldata)) {
// Min max values are saved in shared memory and global memory as per the shuffled colids.
atomicMinBits<T, E>(&s_min[(int)(i / nrows)], coldata);
atomicMaxBits<T, E>(&s_max[(int)(i / nrows)], coldata);
}
if (sampledcols != nullptr) { sampledcols[batch_id * orig_batch_ncols + i] = coldata; }
}
__syncthreads();
// finally, perform global mem atomics
for (int j = threadIdx.x; j < batch_ncols; j += blockDim.x) {
atomicMinBits<T, E>(&g_min[batch_id * orig_batch_ncols + j], decode(*(E*)&s_min[j]));
atomicMaxBits<T, E>(&g_max[batch_id * orig_batch_ncols + j], decode(*(E*)&s_max[j]));
}
__syncthreads();
}
}
/**
* @brief Computes min/max across every column of the input matrix, as well as
* optionally allow to subsample based on the given row/col ID mapping vectors
*
* @tparam T the data type
* @tparam TPB number of threads per block
* @param data input data
* @param rowids actual row ID mappings. It is of length nrows. If you want to
* skip this index lookup entirely, pass nullptr
* @param colids actual col ID mappings. It is of length ncols. If you want to
* skip this index lookup entirely, pass nullptr
* @param nrows number of rows of data to be worked upon. The actual rows of the
* input "data" can be bigger than this!
* @param ncols number of cols of data to be worked upon. The actual cols of the
* input "data" can be bigger than this!
* @param row_stride stride (in number of elements) between 2 adjacent columns
* @param globalmin final col-wise global minimum (size = ncols)
* @param globalmax final col-wise global maximum (size = ncols)
* @param sampledcols output sampled data. Pass nullptr if you don't need this
* @param stream cuda stream
* @note This method makes the following assumptions:
* 1. input and output matrices are assumed to be col-major
* 2. ncols is small enough to fit the whole of min/max values across all cols
* in shared memory
*/
template <typename T, int TPB = 512>
void minmax(const T* data,
const unsigned* rowids,
const unsigned* colids,
int nrows,
int ncols,
int row_stride,
T* globalmin,
T* globalmax,
T* sampledcols,
cudaStream_t stream)
{
using E = typename encode_traits<T>::E;
int nblks = raft::ceildiv(ncols, TPB);
T init_val = std::numeric_limits<T>::max();
minmaxInitKernel<T, E><<<nblks, TPB, 0, stream>>>(ncols, globalmin, globalmax, init_val);
RAFT_CUDA_TRY(cudaPeekAtLastError());
nblks = raft::ceildiv(nrows * ncols, TPB);
nblks = min(nblks, 65536);
size_t smemSize = sizeof(T) * 2 * ncols;
// Compute the batch_ncols, in [1, ncols] range, that meet the available
// shared memory constraints.
auto smemPerBlk = raft::getSharedMemPerBlock();
int batch_ncols = min(ncols, (int)(smemPerBlk / (sizeof(T) * 2)));
int num_batches = raft::ceildiv(ncols, batch_ncols);
smemSize = sizeof(T) * 2 * batch_ncols;
minmaxKernel<T, E><<<nblks, TPB, smemSize, stream>>>(data,
rowids,
colids,
nrows,
ncols,
row_stride,
globalmin,
globalmax,
sampledcols,
init_val,
batch_ncols,
num_batches);
RAFT_CUDA_TRY(cudaPeekAtLastError());
decodeKernel<T, E><<<nblks, TPB, 0, stream>>>(globalmin, globalmax, ncols);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}; // end namespace detail
}; // end namespace stats
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/scores.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#include <raft/distance/distance.cuh>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/power.cuh>
#include <raft/linalg/subtract.cuh>
#include <raft/spatial/knn/knn.cuh>
#include <raft/stats/mean.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/count.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#define N_THREADS 512
namespace raft {
namespace stats {
namespace detail {
/**
* Calculates the "Coefficient of Determination" (R-Squared) score
* normalizing the sum of squared errors by the total sum of squares.
*
* This score indicates the proportionate amount of variation in an
* expected response variable is explained by the independent variables
* in a linear regression model. The larger the R-squared value, the
* more variability is explained by the linear regression model.
*
* @param y: Array of ground-truth response variables
* @param y_hat: Array of predicted response variables
* @param n: Number of elements in y and y_hat
* @param stream: cuda stream
* @return: The R-squared value.
*/
template <typename math_t>
math_t r2_score(math_t* y, math_t* y_hat, int n, cudaStream_t stream)
{
rmm::device_scalar<math_t> y_bar(stream);
raft::stats::mean(y_bar.data(), y, 1, n, false, false, stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
rmm::device_uvector<math_t> sse_arr(n, stream);
raft::linalg::eltwiseSub(sse_arr.data(), y, y_hat, n, stream);
raft::linalg::powerScalar(sse_arr.data(), sse_arr.data(), math_t(2.0), n, stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
rmm::device_uvector<math_t> ssto_arr(n, stream);
raft::linalg::subtractDevScalar(ssto_arr.data(), y, y_bar.data(), n, stream);
raft::linalg::powerScalar(ssto_arr.data(), ssto_arr.data(), math_t(2.0), n, stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
thrust::device_ptr<math_t> d_sse = thrust::device_pointer_cast(sse_arr.data());
thrust::device_ptr<math_t> d_ssto = thrust::device_pointer_cast(ssto_arr.data());
math_t sse = thrust::reduce(thrust::cuda::par.on(stream), d_sse, d_sse + n);
math_t ssto = thrust::reduce(thrust::cuda::par.on(stream), d_ssto, d_ssto + n);
return 1.0 - sse / ssto;
}
/**
* @brief Compute accuracy of predictions. Useful for classification.
* @tparam math_t: data type for predictions (e.g., int for classification)
* @param[in] predictions: array of predictions (GPU pointer).
* @param[in] ref_predictions: array of reference (ground-truth) predictions (GPU pointer).
* @param[in] n: number of elements in each of predictions, ref_predictions.
* @param[in] stream: cuda stream.
* @return: Accuracy score in [0, 1]; higher is better.
*/
template <typename math_t>
float accuracy_score(const math_t* predictions,
const math_t* ref_predictions,
int n,
cudaStream_t stream)
{
unsigned long long correctly_predicted = 0ULL;
rmm::device_uvector<math_t> diffs_array(n, stream);
// TODO could write a kernel instead
raft::linalg::eltwiseSub(diffs_array.data(), predictions, ref_predictions, n, stream);
RAFT_CUDA_TRY(cudaGetLastError());
correctly_predicted =
thrust::count(thrust::cuda::par.on(stream), diffs_array.data(), diffs_array.data() + n, 0);
float accuracy = correctly_predicted * 1.0f / n;
return accuracy;
}
template <typename T>
RAFT_KERNEL reg_metrics_kernel(
const T* predictions, const T* ref_predictions, int n, double* abs_diffs, double* tmp_sums)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ double shmem[2]; // {abs_difference_sum, squared difference sum}
for (int i = threadIdx.x; i < 2; i += blockDim.x) {
shmem[i] = 0;
}
__syncthreads();
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
double diff = predictions[i] - ref_predictions[i];
double abs_diff = abs(diff);
raft::myAtomicAdd(&shmem[0], abs_diff);
raft::myAtomicAdd(&shmem[1], diff * diff);
// update absolute difference in global memory for subsequent abs. median computation
abs_diffs[i] = abs_diff;
}
__syncthreads();
// Update tmp_sum w/ total abs_difference_sum and squared difference sum.
for (int i = threadIdx.x; i < 2; i += blockDim.x) {
raft::myAtomicAdd(&tmp_sums[i], shmem[i]);
}
}
/**
* @brief Compute regression metrics mean absolute error, mean squared error, median absolute error
* @tparam T: data type for predictions (e.g., float or double for regression).
* @param[in] predictions: array of predictions (GPU pointer).
* @param[in] ref_predictions: array of reference (ground-truth) predictions (GPU pointer).
* @param[in] n: number of elements in each of predictions, ref_predictions. Should be > 0.
* @param[in] stream: cuda stream.
* @param[out] mean_abs_error: Mean Absolute Error. Sum over n of (|predictions[i] -
* ref_predictions[i]|) / n.
* @param[out] mean_squared_error: Mean Squared Error. Sum over n of ((predictions[i] -
* ref_predictions[i])^2) / n.
* @param[out] median_abs_error: Median Absolute Error. Median of |predictions[i] -
* ref_predictions[i]| for i in [0, n).
*/
template <typename T>
void regression_metrics(const T* predictions,
const T* ref_predictions,
int n,
cudaStream_t stream,
double& mean_abs_error,
double& mean_squared_error,
double& median_abs_error)
{
std::vector<double> mean_errors(2);
std::vector<double> h_sorted_abs_diffs(n);
int thread_cnt = 256;
int block_cnt = raft::ceildiv(n, thread_cnt);
int array_size = n * sizeof(double);
rmm::device_uvector<double> abs_diffs_array(array_size, stream);
rmm::device_uvector<double> sorted_abs_diffs(array_size, stream);
rmm::device_uvector<double> tmp_sums(2 * sizeof(double), stream);
RAFT_CUDA_TRY(cudaMemsetAsync(tmp_sums.data(), 0, 2 * sizeof(double), stream));
reg_metrics_kernel<T><<<block_cnt, thread_cnt, 0, stream>>>(
predictions, ref_predictions, n, abs_diffs_array.data(), tmp_sums.data());
RAFT_CUDA_TRY(cudaGetLastError());
raft::update_host(&mean_errors[0], tmp_sums.data(), 2, stream);
raft::interruptible::synchronize(stream);
mean_abs_error = mean_errors[0] / n;
mean_squared_error = mean_errors[1] / n;
// Compute median error. Sort diffs_array and pick median value
char* temp_storage = nullptr;
size_t temp_storage_bytes;
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortKeys((void*)temp_storage,
temp_storage_bytes,
abs_diffs_array.data(),
sorted_abs_diffs.data(),
n,
0,
8 * sizeof(double),
stream));
rmm::device_uvector<char> temp_storage_v(temp_storage_bytes, stream);
temp_storage = temp_storage_v.data();
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortKeys((void*)temp_storage,
temp_storage_bytes,
abs_diffs_array.data(),
sorted_abs_diffs.data(),
n,
0,
8 * sizeof(double),
stream));
raft::update_host(h_sorted_abs_diffs.data(), sorted_abs_diffs.data(), n, stream);
raft::interruptible::synchronize(stream);
int middle = n / 2;
if (n % 2 == 1) {
median_abs_error = h_sorted_abs_diffs[middle];
} else {
median_abs_error = (h_sorted_abs_diffs[middle] + h_sorted_abs_diffs[middle - 1]) / 2;
}
}
} // namespace detail
} // namespace stats
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/meanvar.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/reduce.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft::stats::detail {
template <typename T>
class mean_var {
private:
T w;
T m;
T s;
public:
/** Monoidal neutral. */
HDI mean_var() : w(0.0), m(0.0), s(0.0) {}
/** Lift a single value. */
HDI explicit mean_var(T x) : w(1.0), m(x), s(0.0) {}
/**
* Monoidal binary op: combine means and vars of two sets.
* (associative and commutative)
*/
friend HDI auto operator+(mean_var<T> a, mean_var<T> const& b) -> mean_var<T>
{
a += b;
return a;
}
/**
* Combine means and vars of two sets.
*
* Similar to:
* https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
*/
HDI auto operator+=(mean_var<T> const& b) & -> mean_var<T>&
{
mean_var<T>& a(*this);
T cw = a.w + b.w;
if (cw == 0) return a;
T aw_frac = a.w / cw;
T bw_frac = b.w / cw;
a.w = cw;
T d = a.m - b.m;
a.s += b.s + cw * (d * aw_frac) * (d * bw_frac);
a.m = a.m * aw_frac + b.m * bw_frac;
return a;
}
/** Get the computed mean. */
HDI auto mean() const -> T { return m; }
/**
* @brief Get the computed variance.
*
* @param [in] sample whether to produce sample variance (divide by `N - 1` instead of `N`).
* @return variance
*/
HDI auto var(bool sample) const -> T { return s / max(T(1.0), sample ? w - T(1.0) : w); }
HDI void load(volatile mean_var<T>* address)
{
this->m = address->m;
this->s = address->s;
this->w = address->w;
}
HDI void store(volatile mean_var<T>* address)
{
address->m = this->m;
address->s = this->s;
address->w = this->w;
}
};
/*
NB: current implementation here is not optimal, especially the rowmajor version;
leaving this for further work (perhaps, as a more generic "linewiseReduce").
Vectorized loads/stores could speed things up a lot.
*/
/**
* meanvar kernel - row-major version
*
* Assumptions:
*
* 1. blockDim.x == WarpSize
* 2. Dimension X goes along columns (D)
* 3. Dimension Y goes along rows (N)
*
*
* @tparam T element type
* @tparam I indexing type
* @tparam BlockSize must be equal to blockDim.x * blockDim.y * blockDim.z
* @param data input data
* @param mvs meanvars -- output
* @param locks guards for updating meanvars
* @param len total length of input data (N * D)
* @param D number of columns in the input data.
*/
template <typename T, typename I, int BlockSize>
RAFT_KERNEL __launch_bounds__(BlockSize)
meanvar_kernel_rowmajor(const T* data, volatile mean_var<T>* mvs, int* locks, I len, I D)
{
// read the data
const I col = threadIdx.x + blockDim.x * blockIdx.x;
mean_var<T> thread_data;
if (col < D) {
const I step = D * blockDim.y * gridDim.y;
for (I i = col + D * (threadIdx.y + blockDim.y * blockIdx.y); i < len; i += step) {
thread_data += mean_var<T>(data[i]);
}
}
// aggregate within block
if (blockDim.y > 1) {
__shared__ uint8_t shm_bytes[BlockSize * sizeof(mean_var<T>)];
auto shm = (mean_var<T>*)shm_bytes;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
shm[tid] = thread_data;
for (int bs = BlockSize >> 1; bs >= blockDim.x; bs = bs >> 1) {
__syncthreads();
if (tid < bs) { shm[tid] += shm[tid + bs]; }
}
thread_data = shm[tid];
}
// aggregate across blocks
if (threadIdx.y == 0) {
int* lock = locks + blockIdx.x;
if (threadIdx.x == 0 && col < D) {
while (atomicCAS(lock, 0, 1) == 1) {
__threadfence();
}
}
__syncthreads();
if (col < D) {
__threadfence();
mean_var<T> global_data;
global_data.load(mvs + col);
global_data += thread_data;
global_data.store(mvs + col);
__threadfence();
}
__syncthreads();
if (threadIdx.x == 0 && col < D) { __stwt(lock, 0); }
}
}
template <typename T, typename I, int BlockSize>
RAFT_KERNEL __launch_bounds__(BlockSize)
meanvar_kernel_colmajor(T* mean, T* var, const T* data, I D, I N, bool sample)
{
using BlockReduce = cub::BlockReduce<mean_var<T>, BlockSize>;
__shared__ typename BlockReduce::TempStorage shm;
const T* block_data = data + N * blockIdx.x;
mean_var<T> thread_data;
for (I i = threadIdx.x; i < N; i += BlockSize) {
thread_data += mean_var<T>(block_data[i]);
}
mean_var<T> acc = BlockReduce(shm).Sum(thread_data);
if (threadIdx.x == 0) {
mean[blockIdx.x] = acc.mean();
var[blockIdx.x] = acc.var(sample);
}
}
template <typename T, typename I>
RAFT_KERNEL meanvar_kernel_fill(T* mean, T* var, const mean_var<T>* aggr, I D, bool sample)
{
I i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= D) return;
auto x = aggr[i];
mean[i] = x.mean();
var[i] = x.var(sample);
}
template <typename T, typename I = int, int BlockSize = 256>
void meanvar(
T* mean, T* var, const T* data, I D, I N, bool sample, bool rowMajor, cudaStream_t stream)
{
if (rowMajor) {
static_assert(BlockSize >= WarpSize, "Block size must be not smaller than the warp size.");
const dim3 bs(WarpSize, BlockSize / WarpSize, 1);
dim3 gs(raft::ceildiv<decltype(bs.x)>(D, bs.x), raft::ceildiv<decltype(bs.y)>(N, bs.y), 1);
// Don't create more blocks than necessary to occupy the GPU
int occupancy;
RAFT_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&occupancy, meanvar_kernel_rowmajor<T, I, BlockSize>, BlockSize, 0));
gs.y =
std::min(gs.y, raft::ceildiv<decltype(gs.y)>(occupancy * getMultiProcessorCount(), gs.x));
// Global memory: one mean_var<T> for each column
// one lock per all blocks working on the same set of columns
rmm::device_buffer buf(sizeof(mean_var<T>) * D + sizeof(int) * gs.x, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(buf.data(), 0, buf.size(), stream));
mean_var<T>* mvs = static_cast<mean_var<T>*>(buf.data());
int* locks = static_cast<int*>(static_cast<void*>(mvs + D));
const uint64_t len = uint64_t(D) * uint64_t(N);
ASSERT(len <= uint64_t(std::numeric_limits<I>::max()), "N * D does not fit the indexing type");
meanvar_kernel_rowmajor<T, I, BlockSize><<<gs, bs, 0, stream>>>(data, mvs, locks, len, D);
meanvar_kernel_fill<T, I>
<<<raft::ceildiv<I>(D, BlockSize), BlockSize, 0, stream>>>(mean, var, mvs, D, sample);
} else {
meanvar_kernel_colmajor<T, I, BlockSize>
<<<D, BlockSize, 0, stream>>>(mean, var, data, D, N, sample);
}
RAFT_CHECK_CUDA(stream);
}
}; // namespace raft::stats::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats/detail | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/batched/information_criterion.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/unary_op.cuh>
#include <raft/stats/stats_types.hpp>
#include <cmath>
namespace raft {
namespace stats {
namespace batched {
namespace detail {
/**
* Compute the given type of information criterion
*
* @note: it is safe to do the computation in-place (i.e give same pointer
* as input and output)
*
* @param[out] d_ic Information criterion to be returned for each
* series (device)
* @param[in] d_loglikelihood Log-likelihood for each series (device)
* @param[in] ic_type Type of criterion to compute. See IC_Type
* @param[in] n_params Number of parameters in the model
* @param[in] batch_size Number of series in the batch
* @param[in] n_samples Number of samples in each series
* @param[in] stream CUDA stream
*/
template <typename ScalarT, typename IdxT>
void information_criterion(ScalarT* d_ic,
const ScalarT* d_loglikelihood,
IC_Type ic_type,
IdxT n_params,
IdxT batch_size,
IdxT n_samples,
cudaStream_t stream)
{
ScalarT ic_base{};
ScalarT N = static_cast<ScalarT>(n_params);
ScalarT T = static_cast<ScalarT>(n_samples);
switch (ic_type) {
case AIC: ic_base = (ScalarT)2.0 * N; break;
case AICc:
ic_base = (ScalarT)2.0 * (N + (N * (N + (ScalarT)1.0)) / (T - N - (ScalarT)1.0));
break;
case BIC: ic_base = std::log(T) * N; break;
}
/* Compute information criterion from log-likelihood and base term */
raft::linalg::unaryOp(
d_ic,
d_loglikelihood,
batch_size,
[=] __device__(ScalarT loglike) { return ic_base - (ScalarT)2.0 * loglike; },
stream);
}
} // namespace detail
} // namespace batched
} // namespace stats
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/stats/detail | rapidsai_public_repos/raft/cpp/include/raft/stats/detail/batched/silhouette_score.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../silhouette_score.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cuda_stream_pool.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/device_atomics.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
namespace raft {
namespace stats {
namespace batched {
namespace detail {
/**
* This kernel initializes matrix b (n_rows * n_labels)
* For each label that the corresponding row is not a part of is initialized as 0
* If the corresponding row is the only sample in its label, again 0
* Only if the there are > 1 samples in the label, row is initialized to max
*/
template <typename value_t, typename value_idx, typename label_idx>
RAFT_KERNEL fill_b_kernel(value_t* b,
const label_idx* y,
value_idx n_rows,
label_idx n_labels,
const value_idx* cluster_counts)
{
value_idx idx = threadIdx.x + blockIdx.x * blockDim.x;
label_idx idy = threadIdx.y + blockIdx.y * blockDim.y;
if (idx >= n_rows || idy >= n_labels) { return; }
auto row_cluster = y[idx];
auto col_cluster_count = cluster_counts[idy];
// b for own cluster should be max value
// so that it does not interfere with min operator
// b is also max if col cluster count is 0
// however, b is 0 if self cluster count is 1
if (row_cluster == idy || col_cluster_count == 0) {
if (cluster_counts[row_cluster] == 1) {
b[idx * n_labels + idy] = 0;
} else {
b[idx * n_labels + idy] = std::numeric_limits<value_t>::max();
}
} else {
b[idx * n_labels + idy] = 0;
}
}
/**
* This kernel does an elementwise sweep of chunked pairwise distance matrix
* By knowing the offsets of the chunked pairwise distance matrix in the
* global pairwise distance matrix, we are able to calculate
* intermediate values of a and b for the rows and columns present in the
* current chunked pairwise distance matrix.
*/
template <typename value_t, typename value_idx, typename label_idx>
RAFT_KERNEL compute_chunked_a_b_kernel(value_t* a,
value_t* b,
value_idx row_offset,
value_idx col_offset,
const label_idx* y,
label_idx n_labels,
const value_idx* cluster_counts,
const value_t* distances,
value_idx dist_rows,
value_idx dist_cols)
{
value_idx row_id = threadIdx.x + blockIdx.x * blockDim.x;
value_idx col_id = threadIdx.y + blockIdx.y * blockDim.y;
// these are global offsets of current element
// in the full pairwise distance matrix
value_idx pw_row_id = row_id + row_offset;
value_idx pw_col_id = col_id + col_offset;
if (row_id >= dist_rows || col_id >= dist_cols || pw_row_id == pw_col_id) { return; }
auto row_cluster = y[pw_row_id];
if (cluster_counts[row_cluster] == 1) { return; }
auto col_cluster = y[pw_col_id];
auto col_cluster_counts = cluster_counts[col_cluster];
if (col_cluster == row_cluster) {
atomicAdd(&a[pw_row_id], distances[row_id * dist_cols + col_id] / (col_cluster_counts - 1));
} else {
atomicAdd(&b[pw_row_id * n_labels + col_cluster],
distances[row_id * dist_cols + col_id] / col_cluster_counts);
}
}
template <typename value_idx, typename label_idx>
rmm::device_uvector<value_idx> get_cluster_counts(raft::resources const& handle,
const label_idx* y,
value_idx& n_rows,
label_idx& n_labels)
{
auto stream = resource::get_cuda_stream(handle);
rmm::device_uvector<value_idx> cluster_counts(n_labels, stream);
rmm::device_uvector<char> workspace(1, stream);
raft::stats::detail::countLabels(y, cluster_counts.data(), n_rows, n_labels, workspace, stream);
return cluster_counts;
}
template <typename value_t, typename value_idx>
rmm::device_uvector<value_t> get_pairwise_distance(raft::resources const& handle,
const value_t* left_begin,
const value_t* right_begin,
value_idx& n_left_rows,
value_idx& n_right_rows,
value_idx& n_cols,
raft::distance::DistanceType metric,
cudaStream_t stream)
{
rmm::device_uvector<value_t> distances(n_left_rows * n_right_rows, stream);
raft::distance::pairwise_distance(
handle, left_begin, right_begin, distances.data(), n_left_rows, n_right_rows, n_cols, metric);
return distances;
}
template <typename value_t, typename value_idx, typename label_idx>
void compute_chunked_a_b(raft::resources const& handle,
value_t* a,
value_t* b,
value_idx& row_offset,
value_idx& col_offset,
const label_idx* y,
label_idx& n_labels,
const value_idx* cluster_counts,
const value_t* distances,
value_idx& dist_rows,
value_idx& dist_cols,
cudaStream_t stream)
{
dim3 block_size(std::min(dist_rows, 32), std::min(dist_cols, 32));
dim3 grid_size(raft::ceildiv(dist_rows, (value_idx)block_size.x),
raft::ceildiv(dist_cols, (value_idx)block_size.y));
detail::compute_chunked_a_b_kernel<<<grid_size, block_size, 0, stream>>>(
a, b, row_offset, col_offset, y, n_labels, cluster_counts, distances, dist_rows, dist_cols);
}
template <typename value_t, typename value_idx, typename label_idx>
value_t silhouette_score(
raft::resources const& handle,
const value_t* X,
value_idx n_rows,
value_idx n_cols,
const label_idx* y,
label_idx n_labels,
value_t* scores,
value_idx chunk,
raft::distance::DistanceType metric = raft::distance::DistanceType::L2Unexpanded)
{
ASSERT(n_labels >= 2 && n_labels <= (n_rows - 1),
"silhouette Score not defined for the given number of labels!");
rmm::device_uvector<value_idx> cluster_counts = get_cluster_counts(handle, y, n_rows, n_labels);
auto stream = resource::get_cuda_stream(handle);
auto policy = resource::get_thrust_policy(handle);
auto b_size = n_rows * n_labels;
value_t *a_ptr, *b_ptr;
rmm::device_uvector<value_t> a(0, stream);
rmm::device_uvector<value_t> b(b_size, stream);
b_ptr = b.data();
// since a and silhouette score per sample are same size, reusing
if (scores == nullptr || scores == NULL) {
a.resize(n_rows, stream);
a_ptr = a.data();
} else {
a_ptr = scores;
}
thrust::fill(policy, a_ptr, a_ptr + n_rows, 0);
dim3 block_size(std::min(n_rows, 32), std::min(n_labels, 32));
dim3 grid_size(raft::ceildiv(n_rows, (value_idx)block_size.x),
raft::ceildiv(n_labels, (label_idx)block_size.y));
detail::fill_b_kernel<<<grid_size, block_size, 0, stream>>>(
b_ptr, y, n_rows, n_labels, cluster_counts.data());
resource::wait_stream_pool_on_stream(handle);
auto n_iters = 0;
for (value_idx i = 0; i < n_rows; i += chunk) {
for (value_idx j = 0; j < n_rows; j += chunk) {
++n_iters;
auto chunk_stream = resource::get_next_usable_stream(handle, i + chunk * j);
const auto* left_begin = X + (i * n_cols);
const auto* right_begin = X + (j * n_cols);
auto n_left_rows = (i + chunk) < n_rows ? chunk : (n_rows - i);
auto n_right_rows = (j + chunk) < n_rows ? chunk : (n_rows - j);
rmm::device_uvector<value_t> distances = get_pairwise_distance(
handle, left_begin, right_begin, n_left_rows, n_right_rows, n_cols, metric, chunk_stream);
compute_chunked_a_b(handle,
a_ptr,
b_ptr,
i,
j,
y,
n_labels,
cluster_counts.data(),
distances.data(),
n_left_rows,
n_right_rows,
chunk_stream);
}
}
resource::sync_stream_pool(handle);
// calculating row-wise minimum in b
// this prim only supports int indices for now
raft::linalg::reduce<value_t, value_t, value_idx, raft::identity_op, raft::min_op>(
b_ptr,
b_ptr,
n_labels,
n_rows,
std::numeric_limits<value_t>::max(),
true,
true,
stream,
false,
raft::identity_op(),
raft::min_op());
// calculating the silhouette score per sample
raft::linalg::binaryOp<value_t, raft::stats::detail::SilOp<value_t>, value_t, value_idx>(
a_ptr, a_ptr, b_ptr, n_rows, raft::stats::detail::SilOp<value_t>(), stream);
return thrust::reduce(policy, a_ptr, a_ptr + n_rows, value_t(0)) / n_rows;
}
} // namespace detail
} // namespace batched
} // namespace stats
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/map_reduce.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MAP_REDUCE_H
#define __MAP_REDUCE_H
#pragma once
#include "detail/map_then_reduce.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
namespace raft::linalg {
/**
* @brief CUDA version of map and then generic reduction operation
* @tparam Type data-type upon which the math operation will be performed
* @tparam MapOp the device-lambda performing the actual map operation
* @tparam ReduceLambda the device-lambda performing the actual reduction
* @tparam TPB threads-per-block in the final kernel launched
* @tparam Args additional parameters
* @param out the output reduced value (assumed to be a device pointer)
* @param len number of elements in the input array
* @param neutral The neutral element of the reduction operation. For example:
* 0 for sum, 1 for multiply, +Inf for Min, -Inf for Max
* @param map the device-lambda
* @param op the reduction device lambda
* @param stream cuda-stream where to launch this kernel
* @param in the input array
* @param args additional input arrays
*/
template <typename InType,
typename MapOp,
typename ReduceLambda,
typename IdxType = std::uint32_t,
int TPB = 256,
typename OutType = InType,
typename... Args>
void mapReduce(OutType* out,
size_t len,
OutType neutral,
MapOp map,
ReduceLambda op,
cudaStream_t stream,
const InType* in,
Args... args)
{
detail::mapThenReduceImpl<InType, OutType, IdxType, MapOp, ReduceLambda, TPB, Args...>(
out, len, neutral, map, op, stream, in, args...);
}
/**
* @defgroup map_reduce Map-Reduce ops
* @{
*/
/**
* @brief CUDA version of map and then generic reduction operation
* @tparam InValueType the data-type of the input
* @tparam MapOp the device-lambda performing the actual map operation
* @tparam ReduceLambda the device-lambda performing the actual reduction
* @tparam IndexType the index type
* @tparam OutValueType the data-type of the output
* @tparam ScalarIdxType index type of scalar
* @tparam Args additional parameters
* @param[in] handle raft::resources
* @param[in] in the input of type raft::device_vector_view
* @param[in] neutral The neutral element of the reduction operation. For example:
* 0 for sum, 1 for multiply, +Inf for Min, -Inf for Max
* @param[out] out the output reduced value assumed to be a raft::device_scalar_view
* @param[in] map the fused device-lambda
* @param[in] op the fused reduction device lambda
* @param[in] args additional input arrays
*/
template <typename InValueType,
typename MapOp,
typename ReduceLambda,
typename IndexType,
typename OutValueType,
typename ScalarIdxType,
typename... Args>
void map_reduce(raft::resources const& handle,
raft::device_vector_view<const InValueType, IndexType> in,
raft::device_scalar_view<OutValueType, ScalarIdxType> out,
OutValueType neutral,
MapOp map,
ReduceLambda op,
Args... args)
{
mapReduce<InValueType, MapOp, ReduceLambda, IndexType, 256, OutValueType, Args...>(
out.data_handle(),
in.extent(0),
neutral,
map,
op,
resource::get_cuda_stream(handle),
in.data_handle(),
args...);
}
/** @} */ // end of map_reduce
} // end namespace raft::linalg
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/ternary_op.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __TERNARY_OP_H
#define __TERNARY_OP_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/map.cuh>
namespace raft {
namespace linalg {
/**
* @brief perform element-wise ternary operation on the input arrays
* @tparam math_t data-type upon which the math operation will be performed
* @tparam Lambda the device-lambda performing the actual operation
* @tparam IdxType Integer type used to for addressing
* @tparam TPB threads-per-block in the final kernel launched
* @param out the output array
* @param in1 the first input array
* @param in2 the second input array
* @param in3 the third input array
* @param len number of elements in the input array
* @param op the device-lambda
* @param stream cuda stream where to launch work
*/
template <typename math_t, typename Lambda, typename out_t, typename IdxType = int, int TPB = 256>
void ternaryOp(out_t* out,
const math_t* in1,
const math_t* in2,
const math_t* in3,
IdxType len,
Lambda op,
cudaStream_t stream)
{
return detail::map<false>(stream, out, len, op, in1, in2, in3);
}
/**
* @defgroup ternary_op Element-Wise Ternary Operation
* @{
*/
/**
* @brief perform element-wise ternary operation on the input arrays
* @tparam InType Input Type raft::device_mdspan
* @tparam Lambda the device-lambda performing the actual operation
* @tparam OutType Output Type raft::device_mdspan
* @param[in] handle raft::resources
* @param[in] in1 First input
* @param[in] in2 Second input
* @param[in] in3 Third input
* @param[out] out Output
* @param[in] op the device-lambda
* @note Lambda must be a functor with the following signature:
* `OutType func(const InType& val1, const InType& val2, const InType& val3);`
*/
template <typename InType,
typename Lambda,
typename OutType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void ternary_op(
raft::resources const& handle, InType in1, InType in2, InType in3, OutType out, Lambda op)
{
return map(handle, in1, in2, in3, out, op);
}
/** @} */ // end of group ternary_op
}; // end namespace linalg
}; // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/unary_op.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __UNARY_OP_H
#define __UNARY_OP_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/map.cuh>
namespace raft {
namespace linalg {
/**
* @brief perform element-wise unary operation in the input array
* @tparam InType input data-type
* @tparam Lambda Device lambda performing the actual operation, with the signature
* `OutType func(const InType& val);`
* @tparam OutType output data-type
* @tparam IdxType Integer type used to for addressing
* @tparam TPB threads-per-block in the final kernel launched
* @param[out] out Output array [on device], dim = [len]
* @param[in] in Input array [on device], dim = [len]
* @param[in] len Number of elements in the input array
* @param[in] op Device lambda
* @param[in] stream cuda stream where to launch work
*/
template <typename InType,
typename Lambda,
typename IdxType = int,
typename OutType = InType,
int TPB = 256>
void unaryOp(OutType* out, const InType* in, IdxType len, Lambda op, cudaStream_t stream)
{
return detail::map<false>(stream, out, len, op, in);
}
/**
* @brief Perform an element-wise unary operation into the output array
*
* Compared to `unaryOp()`, this method does not do any reads from any inputs
*
* @tparam OutType output data-type
* @tparam Lambda Device lambda performing the actual operation, with the signature
* `void func(OutType* outLocationOffset, IdxType idx);`
* where outLocationOffset will be out + idx.
* @tparam IdxType Integer type used to for addressing
* @tparam TPB threads-per-block in the final kernel launched
*
* @param[out] out Output array [on device], dim = [len]
* @param[in] len Number of elements in the input array
* @param[in] op Device lambda
* @param[in] stream cuda stream where to launch work
*/
template <typename OutType, typename Lambda, typename IdxType = int, int TPB = 256>
void writeOnlyUnaryOp(OutType* out, IdxType len, Lambda op, cudaStream_t stream)
{
return detail::map<true>(stream, out, len, [op] __device__(IdxType offset) {
OutType r;
op(&r, offset);
return r;
});
}
/**
* @defgroup unary_op Element-Wise Unary Operations
* @{
*/
/**
* @brief Perform an element-wise unary operation into the output array
* @tparam InType Input Type raft::device_mdspan
* @tparam Lambda Device lambda performing the actual operation, with the signature
* `out_value_t func(const in_value_t& val);`
* @tparam OutType Output Type raft::device_mdspan
* @param[in] handle The raft handle
* @param[in] in Input
* @param[out] out Output
* @param[in] op Device lambda
*/
template <typename InType,
typename Lambda,
typename OutType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void unary_op(raft::resources const& handle, InType in, OutType out, Lambda op)
{
return map(handle, in, out, op);
}
/**
* @brief Perform an element-wise unary operation on the input index into the output array
*
* @note This operation is deprecated. Please use map_offset in `raft/linalg/map.cuh` instead.
*
* @tparam OutType Output Type raft::device_mdspan
* @tparam Lambda Device lambda performing the actual operation, with the signature
* `void func(out_value_t* out_location, index_t idx);`
* @param[in] handle The raft handle
* @param[out] out Output
* @param[in] op Device lambda
*/
template <typename OutType,
typename Lambda,
typename = raft::enable_if_output_device_mdspan<OutType>>
void write_only_unary_op(const raft::resources& handle, OutType out, Lambda op)
{
return writeOnlyUnaryOp(out.data_handle(), out.size(), op, resource::get_cuda_stream(handle));
}
/** @} */ // end of group unary_op
}; // end namespace linalg
}; // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/lanczos.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use lanczos.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the sparse solvers version instead.")
#include <raft/sparse/solver/lanczos.cuh>
namespace raft::linalg {
using raft::sparse::solver::computeLargestEigenvectors;
using raft::sparse::solver::computeSmallestEigenvectors;
} // namespace raft::linalg
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/axpy.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __AXPY_H
#define __AXPY_H
#pragma once
#include "detail/axpy.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
namespace raft::linalg {
/**
* @brief the wrapper of cublas axpy function
* It computes the following equation: y = alpha * x + y
*
* @tparam T the element type
* @tparam DevicePointerMode whether pointers alpha, beta point to device memory
* @param [in] handle raft handle
* @param [in] n number of elements in x and y
* @param [in] alpha host or device scalar
* @param [in] x vector of length n
* @param [in] incx stride between consecutive elements of x
* @param [inout] y vector of length n
* @param [in] incy stride between consecutive elements of y
* @param [in] stream
*/
template <typename T, bool DevicePointerMode = false>
void axpy(raft::resources const& handle,
const int n,
const T* alpha,
const T* x,
const int incx,
T* y,
const int incy,
cudaStream_t stream)
{
detail::axpy<T, DevicePointerMode>(handle, n, alpha, x, incx, y, incy, stream);
}
/**
* @defgroup axpy axpy routine
* @{
*/
/**
* @brief axpy function
* It computes the following equation: y = alpha * x + y
*
* @param [in] handle raft::resources
* @param [in] alpha raft::device_scalar_view
* @param [in] x Input vector
* @param [inout] y Output vector
*/
template <typename ElementType,
typename IndexType,
typename InLayoutPolicy,
typename OutLayoutPolicy,
typename ScalarIdxType>
void axpy(raft::resources const& handle,
raft::device_scalar_view<const ElementType, ScalarIdxType> alpha,
raft::device_vector_view<const ElementType, IndexType, InLayoutPolicy> x,
raft::device_vector_view<ElementType, IndexType, OutLayoutPolicy> y)
{
RAFT_EXPECTS(y.size() == x.size(), "Size mismatch between Output and Input");
axpy<ElementType, true>(handle,
y.size(),
alpha.data_handle(),
x.data_handle(),
x.stride(0),
y.data_handle(),
y.stride(0),
resource::get_cuda_stream(handle));
}
/**
* @brief axpy function
* It computes the following equation: y = alpha * x + y
* @param [in] handle raft::resources
* @param [in] alpha raft::device_scalar_view
* @param [in] x Input vector
* @param [inout] y Output vector
*/
template <typename ElementType,
typename IndexType,
typename InLayoutPolicy,
typename OutLayoutPolicy,
typename ScalarIdxType>
void axpy(raft::resources const& handle,
raft::host_scalar_view<const ElementType, ScalarIdxType> alpha,
raft::device_vector_view<const ElementType, IndexType, InLayoutPolicy> x,
raft::device_vector_view<ElementType, IndexType, OutLayoutPolicy> y)
{
RAFT_EXPECTS(y.size() == x.size(), "Size mismatch between Output and Input");
axpy<ElementType, false>(handle,
y.size(),
alpha.data_handle(),
x.data_handle(),
x.stride(0),
y.data_handle(),
y.stride(0),
resource::get_cuda_stream(handle));
}
/** @} */ // end of group axpy
} // namespace raft::linalg
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/subtract.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SUBTRACT_H
#define __SUBTRACT_H
#pragma once
#include "detail/subtract.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/util/input_validation.hpp>
namespace raft {
namespace linalg {
/**
* @brief Elementwise scalar subtraction operation on the input buffer
*
* @tparam InT input data-type. Also the data-type upon which the math ops
* will be performed
* @tparam OutT output data-type
* @tparam IdxType Integer type used to for addressing
*
* @param out the output buffer
* @param in the input buffer
* @param scalar the scalar used in the operations
* @param len number of elements in the input buffer
* @param stream cuda stream where to launch work
*/
template <typename InT, typename OutT = InT, typename IdxType = int>
void subtractScalar(OutT* out, const InT* in, InT scalar, IdxType len, cudaStream_t stream)
{
detail::subtractScalar(out, in, scalar, len, stream);
}
/**
* @brief Elementwise subtraction operation on the input buffers
* @tparam InT input data-type. Also the data-type upon which the math ops
* will be performed
* @tparam OutT output data-type
* @tparam IdxType Integer type used to for addressing
*
* @param out the output buffer
* @param in1 the first input buffer
* @param in2 the second input buffer
* @param len number of elements in the input buffers
* @param stream cuda stream where to launch work
*/
template <typename InT, typename OutT = InT, typename IdxType = int>
void subtract(OutT* out, const InT* in1, const InT* in2, IdxType len, cudaStream_t stream)
{
detail::subtract(out, in1, in2, len, stream);
}
/** Subtract single value pointed by singleScalarDev parameter in device memory from inDev[i] and
* write result to outDev[i]
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param outDev the output buffer
* @param inDev the input buffer
* @param singleScalarDev pointer to the scalar located in device memory
* @param len number of elements in the input and output buffer
* @param stream cuda stream
* @remark block size has not been tuned
*/
template <typename math_t, typename IdxType = int, int TPB = 256>
void subtractDevScalar(math_t* outDev,
const math_t* inDev,
const math_t* singleScalarDev,
IdxType len,
cudaStream_t stream)
{
detail::subtractDevScalar(outDev, inDev, singleScalarDev, len, stream);
}
/**
* @defgroup sub Subtraction Arithmetic
* @{
*/
/**
* @brief Elementwise subtraction operation on the input buffers
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @param handle raft::resources
* @param[in] in1 First Input
* @param[in] in2 Second Input
* @param[out] out Output
*/
template <typename InType,
typename OutType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void subtract(raft::resources const& handle, InType in1, InType in2, OutType out)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in1), "Input 1 must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in2), "Input 2 must be contiguous");
RAFT_EXPECTS(out.size() == in1.size() && in1.size() == in2.size(),
"Size mismatch between Output and Inputs");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
subtract<in_value_t, out_value_t, std::uint32_t>(out.data_handle(),
in1.data_handle(),
in2.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
subtract<in_value_t, out_value_t, std::uint64_t>(out.data_handle(),
in1.data_handle(),
in2.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/**
* @brief Elementwise subtraction of device scalar to input
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @tparam ScalarIdxType Index Type of scalar
* @param[in] handle raft::resources
* @param[in] in Input
* @param[out] out Output
* @param[in] scalar raft::device_scalar_view
*/
template <typename InType,
typename OutType,
typename ScalarIdxType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void subtract_scalar(
raft::resources const& handle,
InType in,
OutType out,
raft::device_scalar_view<const typename InType::element_type, ScalarIdxType> scalar)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input must be contiguous");
RAFT_EXPECTS(out.size() == in.size(), "Size mismatch between Output and Input");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
subtractDevScalar<in_value_t, out_value_t, std::uint32_t>(
out.data_handle(),
in.data_handle(),
scalar.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
subtractDevScalar<in_value_t, out_value_t, std::uint64_t>(
out.data_handle(),
in.data_handle(),
scalar.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/**
* @brief Elementwise subtraction of host scalar to input
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @tparam ScalarIdxType Index Type of scalar
* @param[in] handle raft::resources
* @param[in] in Input
* @param[out] out Output
* @param[in] scalar raft::host_scalar_view
*/
template <typename InType,
typename OutType,
typename ScalarIdxType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void subtract_scalar(
raft::resources const& handle,
InType in,
OutType out,
raft::host_scalar_view<const typename InType::element_type, ScalarIdxType> scalar)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input must be contiguous");
RAFT_EXPECTS(out.size() == in.size(), "Size mismatch between Output and Input");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
subtractScalar<in_value_t, out_value_t, std::uint32_t>(out.data_handle(),
in.data_handle(),
*scalar.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
subtractScalar<in_value_t, out_value_t, std::uint64_t>(out.data_handle(),
in.data_handle(),
*scalar.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/** @} */ // end of group subtract
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/reduce.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __REDUCE_H
#define __REDUCE_H
#pragma once
#include "detail/reduce.cuh"
#include "linalg_types.hpp"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/operators.hpp>
#include <raft/util/input_validation.hpp>
namespace raft {
namespace linalg {
/**
* @brief Compute reduction of the input matrix along the requested dimension
*
* @tparam InType the data type of the input
* @tparam OutType the data type of the output (as well as the data type for
* which reduction is performed)
* @tparam IdxType data type of the indices of the array
* @tparam MainLambda Unary lambda applied while acculumation (eg: L1 or L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*MainLambda)(InType, IdxType);</pre>
* @tparam ReduceLambda Binary lambda applied for reduction (eg: addition(+) for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*ReduceLambda)(OutType);</pre>
* @tparam FinalLambda the final lambda applied before STG (eg: Sqrt for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*FinalLambda)(OutType);</pre>
* @param dots the output reduction vector
* @param data the input matrix
* @param D number of columns
* @param N number of rows
* @param init initial value to use for the reduction
* @param rowMajor input matrix is row-major or not
* @param alongRows whether to reduce along rows or columns
* @param stream cuda stream where to launch work
* @param inplace reduction result added inplace or overwrites old values?
* @param main_op elementwise operation to apply before reduction
* @param reduce_op binary reduction operation
* @param final_op elementwise operation to apply before storing results
*/
template <typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void reduce(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
bool rowMajor,
bool alongRows,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
detail::reduce<InType, OutType, IdxType>(
dots, data, D, N, init, rowMajor, alongRows, stream, inplace, main_op, reduce_op, final_op);
}
/**
* @defgroup reduction Reduction Along Requested Dimension
* @{
*/
/**
* @brief Compute reduction of the input matrix along the requested dimension
* This API computes a reduction of a matrix whose underlying storage
* is either row-major or column-major, while allowing the choose the
* dimension for reduction. Depending upon the dimension chosen for
* reduction, the memory accesses may be coalesced or strided.
*
* @tparam InElementType the input data-type of underlying raft::matrix_view
* @tparam LayoutPolicy The layout of Input/Output (row or col major)
* @tparam OutElementType the output data-type of underlying raft::matrix_view and reduction
* @tparam IndexType Integer type used to for addressing
* @tparam MainLambda Unary lambda applied while acculumation (eg: L1 or L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*MainLambda)(InType, IdxType);</pre>
* @tparam ReduceLambda Binary lambda applied for reduction (eg: addition(+) for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*ReduceLambda)(OutType);</pre>
* @tparam FinalLambda the final lambda applied before STG (eg: Sqrt for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*FinalLambda)(OutType);</pre>
* @param[in] handle raft::resources
* @param[in] data Input of type raft::device_matrix_view
* @param[out] dots Output of type raft::device_matrix_view
* @param[in] init initial value to use for the reduction
* @param[in] apply whether to reduce along rows or along columns (using raft::linalg::Apply)
* @param[in] main_op fused elementwise operation to apply before reduction
* @param[in] reduce_op fused binary reduction operation
* @param[in] final_op fused elementwise operation to apply before storing results
* @param[in] inplace reduction result added inplace or overwrites old values?
*/
template <typename InElementType,
typename LayoutPolicy,
typename OutElementType = InElementType,
typename IdxType = std::uint32_t,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void reduce(raft::resources const& handle,
raft::device_matrix_view<const InElementType, IdxType, LayoutPolicy> data,
raft::device_vector_view<OutElementType, IdxType> dots,
OutElementType init,
Apply apply,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
RAFT_EXPECTS(raft::is_row_or_column_major(data), "Input must be contiguous");
auto constexpr row_major = std::is_same_v<typename decltype(data)::layout_type, raft::row_major>;
bool along_rows = apply == Apply::ALONG_ROWS;
if (along_rows) {
RAFT_EXPECTS(static_cast<IdxType>(dots.size()) == data.extent(1),
"Output should be equal to number of columns in Input");
} else {
RAFT_EXPECTS(static_cast<IdxType>(dots.size()) == data.extent(0),
"Output should be equal to number of rows in Input");
}
reduce(dots.data_handle(),
data.data_handle(),
data.extent(1),
data.extent(0),
init,
row_major,
along_rows,
resource::get_cuda_stream(handle),
inplace,
main_op,
reduce_op,
final_op);
}
/** @} */ // end of group reduction
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/eig.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __EIG_H
#define __EIG_H
#pragma once
#include "detail/eig.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
namespace raft {
namespace linalg {
/**
* @brief eig decomp with divide and conquer method for the column-major
* symmetric matrices
* @param handle raft handle
* @param in the input buffer (symmetric matrix that has real eig values and
* vectors.
* @param n_rows: number of rows of the input
* @param n_cols: number of cols of the input
* @param eig_vectors: eigenvectors
* @param eig_vals: eigen values
* @param stream cuda stream
*/
template <typename math_t>
void eigDC(raft::resources const& handle,
const math_t* in,
std::size_t n_rows,
std::size_t n_cols,
math_t* eig_vectors,
math_t* eig_vals,
cudaStream_t stream)
{
detail::eigDC(handle, in, n_rows, n_cols, eig_vectors, eig_vals, stream);
}
using detail::COPY_INPUT;
using detail::EigVecMemUsage;
using detail::OVERWRITE_INPUT;
/**
* @brief eig sel decomp with divide and conquer method for the column-major
* symmetric matrices
* @param handle raft handle
* @param in the input buffer (symmetric matrix that has real eig values and
* vectors.
* @param n_rows: number of rows of the input
* @param n_cols: number of cols of the input
* @param n_eig_vals: number of eigenvectors to be generated
* @param eig_vectors: eigenvectors
* @param eig_vals: eigen values
* @param memUsage: the memory selection for eig vector output
* @param stream cuda stream
*/
template <typename math_t>
void eigSelDC(raft::resources const& handle,
math_t* in,
std::size_t n_rows,
std::size_t n_cols,
std::size_t n_eig_vals,
math_t* eig_vectors,
math_t* eig_vals,
EigVecMemUsage memUsage,
cudaStream_t stream)
{
detail::eigSelDC(handle, in, n_rows, n_cols, n_eig_vals, eig_vectors, eig_vals, memUsage, stream);
}
/**
* @brief overloaded function for eig decomp with Jacobi method for the
* column-major symmetric matrices (in parameter)
* @param handle: raft handle
* @param in: input matrix
* @param n_rows: number of rows of the input
* @param n_cols: number of cols of the input
* @param eig_vectors: eigenvectors
* @param eig_vals: eigen values
* @param stream: stream on which this function will be run
* @param tol: error tolerance for the jacobi method. Algorithm stops when the
* error is below tol
* @param sweeps: number of sweeps in the Jacobi algorithm. The more the better
* accuracy.
*/
template <typename math_t>
void eigJacobi(raft::resources const& handle,
const math_t* in,
std::size_t n_rows,
std::size_t n_cols,
math_t* eig_vectors,
math_t* eig_vals,
cudaStream_t stream,
math_t tol = 1.e-7,
int sweeps = 15)
{
detail::eigJacobi(handle, in, n_rows, n_cols, eig_vectors, eig_vals, stream, tol, sweeps);
}
/**
* @defgroup eig Eigen Decomposition Methods
* @{
*/
/**
* @brief eig decomp with divide and conquer method for the column-major
* symmetric matrices
* @tparam ValueType the data-type of input and output
* @tparam IntegerType Integer used for addressing
* @param handle raft::resources
* @param[in] in input raft::device_matrix_view (symmetric matrix that has real eig values and
* vectors)
* @param[out] eig_vectors: eigenvectors output of type raft::device_matrix_view
* @param[out] eig_vals: eigen values output of type raft::device_vector_view
*/
template <typename ValueType, typename IndexType>
void eig_dc(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> in,
raft::device_matrix_view<ValueType, IndexType, raft::col_major> eig_vectors,
raft::device_vector_view<ValueType, IndexType> eig_vals)
{
RAFT_EXPECTS(in.size() == eig_vectors.size(), "Size mismatch between Input and Eigen Vectors");
RAFT_EXPECTS(eig_vals.size() == in.extent(1), "Size mismatch between Input and Eigen Values");
eigDC(handle,
in.data_handle(),
in.extent(0),
in.extent(1),
eig_vectors.data_handle(),
eig_vals.data_handle(),
resource::get_cuda_stream(handle));
}
/**
* @brief eig decomp to select top-n eigen values with divide and conquer method
* for the column-major symmetric matrices
* @tparam ValueType the data-type of input and output
* @tparam IntegerType Integer used for addressing
* @param[in] handle raft::resources
* @param[in] in input raft::device_matrix_view (symmetric matrix that has real eig values and
* vectors)
* @param[out] eig_vectors: eigenvectors output of type raft::device_matrix_view
* @param[out] eig_vals: eigen values output of type raft::device_vector_view
* @param[in] n_eig_vals: number of eigenvectors to be generated
* @param[in] memUsage: the memory selection for eig vector output
*/
template <typename ValueType, typename IndexType>
void eig_dc_selective(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> in,
raft::device_matrix_view<ValueType, IndexType, raft::col_major> eig_vectors,
raft::device_vector_view<ValueType, IndexType> eig_vals,
std::size_t n_eig_vals,
EigVecMemUsage memUsage)
{
RAFT_EXPECTS(eig_vectors.size() == n_eig_vals * in.extent(0),
"Size mismatch between Input and Eigen Vectors");
RAFT_EXPECTS(eig_vals.size() == n_eig_vals, "Size mismatch between Input and Eigen Values");
raft::linalg::eigSelDC(handle,
const_cast<ValueType*>(in.data_handle()),
in.extent(0),
in.extent(1),
n_eig_vals,
eig_vectors.data_handle(),
eig_vals.data_handle(),
memUsage,
resource::get_cuda_stream(handle));
}
/**
* @brief overloaded function for eig decomp with Jacobi method for the
* column-major symmetric matrices (in parameter)
* @tparam ValueType the data-type of input and output
* @tparam IntegerType Integer used for addressing
* @param handle raft::resources
* @param[in] in input raft::device_matrix_view (symmetric matrix that has real eig values and
* vectors)
* @param[out] eig_vectors: eigenvectors output of type raft::device_matrix_view
* @param[out] eig_vals: eigen values output of type raft::device_vector_view
* @param[in] tol: error tolerance for the jacobi method. Algorithm stops when the
Frobenius norm of the absolute error is below tol
* @param[in] sweeps: number of sweeps in the Jacobi algorithm. The more the better
* accuracy.
*/
template <typename ValueType, typename IndexType>
void eig_jacobi(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> in,
raft::device_matrix_view<ValueType, IndexType, raft::col_major> eig_vectors,
raft::device_vector_view<ValueType, IndexType> eig_vals,
ValueType tol = 1.e-7,
int sweeps = 15)
{
RAFT_EXPECTS(in.size() == eig_vectors.size(), "Size mismatch between Input and Eigen Vectors");
RAFT_EXPECTS(eig_vals.size() == in.extent(1), "Size mismatch between Input and Eigen Values");
eigJacobi(handle,
in.data_handle(),
in.extent(0),
in.extent(1),
eig_vectors.data_handle(),
eig_vals.data_handle(),
resource::get_cuda_stream(handle),
tol,
sweeps);
}
/** @} */ // end of eig
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/reduce_cols_by_key.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __REDUCE_COLS_BY_KEY
#define __REDUCE_COLS_BY_KEY
#pragma once
#include "detail/reduce_cols_by_key.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
namespace raft {
namespace linalg {
/**
* @brief Computes the sum-reduction of matrix columns for each given key
* @tparam T the input data type (as well as the output reduced matrix)
* @tparam KeyType data type of the keys
* @tparam IdxType indexing arithmetic type
* @param data the input data (dim = nrows x ncols). This is assumed to be in
* row-major layout
* @param keys keys array (len = ncols). It is assumed that each key in this
* array is between [0, nkeys). In case this is not true, the caller is expected
* to have called make_monotonic primitive to prepare such a contiguous and
* monotonically increasing keys array.
* @param out the output reduced matrix along columns (dim = nrows x nkeys).
* This will be assumed to be in row-major layout
* @param nrows number of rows in the input data
* @param ncols number of columns in the input data
* @param nkeys number of unique keys in the keys array
* @param stream cuda stream to launch the kernel onto
* @param reset_sums Whether to reset the output sums to zero before reducing
*/
template <typename T, typename KeyIteratorT, typename IdxType = int>
void reduce_cols_by_key(const T* data,
const KeyIteratorT keys,
T* out,
IdxType nrows,
IdxType ncols,
IdxType nkeys,
cudaStream_t stream,
bool reset_sums = true)
{
detail::reduce_cols_by_key(data, keys, out, nrows, ncols, nkeys, stream, reset_sums);
}
/**
* @defgroup reduce_cols_by_key Reduce Across Columns by Key
* @{
*/
/**
* @brief Computes the sum-reduction of matrix columns for each given key
* TODO: Support generic reduction lambdas https://github.com/rapidsai/raft/issues/860
* @tparam ElementType the input data type (as well as the output reduced matrix)
* @tparam KeyType data type of the keys
* @tparam IndexType indexing arithmetic type
* @param[in] handle raft::resources
* @param[in] data the input data (dim = nrows x ncols). This is assumed to be in
* row-major layout of type raft::device_matrix_view
* @param[in] keys keys raft::device_vector_view (len = ncols). It is assumed that each key in this
* array is between [0, nkeys). In case this is not true, the caller is expected
* to have called make_monotonic primitive to prepare such a contiguous and
* monotonically increasing keys array.
* @param[out] out the output reduced raft::device_matrix_view along columns (dim = nrows x nkeys).
* This will be assumed to be in row-major layout
* @param[in] nkeys Number of unique keys in the keys array. By default, inferred from the number of
* columns of out
* @param[in] reset_sums Whether to reset the output sums to zero before reducing
*/
template <typename ElementType, typename KeyType = ElementType, typename IndexType = std::uint32_t>
void reduce_cols_by_key(
raft::resources const& handle,
raft::device_matrix_view<const ElementType, IndexType, raft::row_major> data,
raft::device_vector_view<const KeyType, IndexType> keys,
raft::device_matrix_view<ElementType, IndexType, raft::row_major> out,
IndexType nkeys = 0,
bool reset_sums = true)
{
if (nkeys > 0) {
RAFT_EXPECTS(out.extent(1) == nkeys, "Output doesn't have nkeys columns");
} else {
nkeys = out.extent(1);
}
RAFT_EXPECTS(out.extent(0) == data.extent(0),
"Output doesn't have the same number of rows as input");
RAFT_EXPECTS(keys.extent(0) == data.extent(1), "Keys is not of size ncols");
reduce_cols_by_key(data.data_handle(),
keys.data_handle(),
out.data_handle(),
data.extent(0),
data.extent(1),
nkeys,
resource::get_cuda_stream(handle),
reset_sums);
}
/** @} */ // end of group reduce_cols_by_key
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/eltwise.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __ELTWISE_H
#define __ELTWISE_H
#pragma once
#include "detail/eltwise.cuh"
namespace raft {
namespace linalg {
/**
* @defgroup ScalarOps Scalar operations on the input buffer
* @tparam InType data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param out the output buffer
* @param in the input buffer
* @param scalar the scalar used in the operations
* @param len number of elements in the input buffer
* @param stream cuda stream where to launch work
* @{
*/
template <typename InType, typename IdxType, typename OutType = InType>
void scalarAdd(OutType* out, const InType* in, InType scalar, IdxType len, cudaStream_t stream)
{
detail::scalarAdd(out, in, scalar, len, stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void scalarMultiply(OutType* out, const InType* in, InType scalar, IdxType len, cudaStream_t stream)
{
detail::scalarMultiply(out, in, scalar, len, stream);
}
/** @} */
/**
* @defgroup BinaryOps Element-wise binary operations on the input buffers
* @tparam InType data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param out the output buffer
* @param in1 the first input buffer
* @param in2 the second input buffer
* @param len number of elements in the input buffers
* @param stream cuda stream where to launch work
* @{
*/
template <typename InType, typename IdxType, typename OutType = InType>
void eltwiseAdd(
OutType* out, const InType* in1, const InType* in2, IdxType len, cudaStream_t stream)
{
detail::eltwiseAdd(out, in1, in2, len, stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void eltwiseSub(
OutType* out, const InType* in1, const InType* in2, IdxType len, cudaStream_t stream)
{
detail::eltwiseSub(out, in1, in2, len, stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void eltwiseMultiply(
OutType* out, const InType* in1, const InType* in2, IdxType len, cudaStream_t stream)
{
detail::eltwiseMultiply(out, in1, in2, len, stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void eltwiseDivide(
OutType* out, const InType* in1, const InType* in2, IdxType len, cudaStream_t stream)
{
detail::eltwiseDivide(out, in1, in2, len, stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void eltwiseDivideCheckZero(
OutType* out, const InType* in1, const InType* in2, IdxType len, cudaStream_t stream)
{
detail::eltwiseDivideCheckZero(out, in1, in2, len, stream);
}
/** @} */
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/matrix_vector_op.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MATRIX_VECTOR_OP_H
#define __MATRIX_VECTOR_OP_H
#pragma once
#include "detail/matrix_vector_op.cuh"
#include "linalg_types.hpp"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/input_validation.hpp>
namespace raft {
namespace linalg {
/**
* @brief Operations for all the columns or rows with a given vector.
* Caution : Threads process multiple elements to speed up processing. These
* are loaded in a single read thanks to type promotion. Faster processing
* would thus only be enabled when addresses are optimally aligned for it.
* Note : the function will also check that the size of the window of accesses
* is a multiple of the number of elements processed by a thread in order to
* enable faster processing
* @tparam MatT the matrix type
* @tparam Lambda a device function which represents a binary operator
* @tparam VecT the input vector type
* @tparam IdxType Integer type used to for addressing
* @param out the output matrix (passing out = matrix makes it in-place)
* @param matrix the input matrix
* @param vec the vector
* @param D number of columns of matrix
* @param N number of rows of matrix
* @param rowMajor whether input is row or col major
* @param bcastAlongRows whether the broadcast of vector needs to happen along
* the rows of the matrix or columns
* @param op the mathematical operation
* @param stream cuda stream where to launch work
*/
template <typename MatT, typename Lambda, typename VecT, typename IdxType = int>
void matrixVectorOp(MatT* out,
const MatT* matrix,
const VecT* vec,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
Lambda op,
cudaStream_t stream)
{
detail::matrixVectorOp(out, matrix, vec, D, N, rowMajor, bcastAlongRows, op, stream);
}
/**
* @brief Operations for all the columns or rows with the given vectors.
* Caution : Threads process multiple elements to speed up processing. These
* are loaded in a single read thanks to type promotion. Faster processing
* would thus only be enabled when addresses are optimally aligned for it.
* Note : the function will also check that the size of the window of accesses
* is a multiple of the number of elements processed by a thread in order to
* enable faster processing
* @tparam MatT the matrix type
* @tparam Lambda a device function which represents a binary operator
* @tparam Vec1T the first input vector type
* @tparam Vec2T the second input vector type
* @tparam IdxType Integer type used to for addressing
* @param out the output matrix (passing out = matrix makes it in-place)
* @param matrix the input matrix
* @param vec1 the first vector
* @param vec2 the second vector
* @param D number of columns of matrix
* @param N number of rows of matrix
* @param rowMajor whether input is row or col major
* @param bcastAlongRows whether the broadcast of vector needs to happen along
* the rows of the matrix or columns
* @param op the mathematical operation
* @param stream cuda stream where to launch work
*/
template <typename MatT, typename Lambda, typename Vec1T, typename Vec2T, typename IdxType = int>
void matrixVectorOp(MatT* out,
const MatT* matrix,
const Vec1T* vec1,
const Vec2T* vec2,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
Lambda op,
cudaStream_t stream)
{
detail::matrixVectorOp(out, matrix, vec1, vec2, D, N, rowMajor, bcastAlongRows, op, stream);
}
/**
* @defgroup matrix_vector_op Matrix Vector Operations
* @{
*/
/**
* @brief Operations for all the columns or rows with a given vector.
* Caution : Threads process multiple elements to speed up processing. These
* are loaded in a single read thanks to type promotion. Faster processing
* would thus only be enabled when addresses are optimally aligned for it.
* Note : the function will also check that the size of the window of accesses
* is a multiple of the number of elements processed by a thread in order to
* enable faster processing
* @tparam MatValueType the data-type of the input matrix
* @tparam VecValueType the data-type of the input vector
* @tparam LayoutPolicy the layout of input and output (raft::row_major or raft::col_major)
* @tparam Lambda a device function which represents a binary operator
* @tparam IndexType Integer used for addressing
* @param[in] handle raft::resources
* @param[in] matrix input raft::matrix_view
* @param[in] vec vector raft::vector_view
* @param[out] out output raft::matrix_view
* @param[in] apply whether the broadcast of vector needs to happen along
* the rows of the matrix or columns using enum class raft::linalg::Apply
* @param[in] op the mathematical operation
*/
template <typename MatValueType,
typename VecValueType,
typename LayoutPolicy,
typename Lambda,
typename IndexType>
void matrix_vector_op(raft::resources const& handle,
raft::device_matrix_view<const MatValueType, IndexType, LayoutPolicy> matrix,
raft::device_vector_view<const VecValueType, IndexType> vec,
raft::device_matrix_view<MatValueType, IndexType, LayoutPolicy> out,
Apply apply,
Lambda op)
{
RAFT_EXPECTS(raft::is_row_or_column_major(matrix), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Input must be contiguous");
RAFT_EXPECTS(out.size() == matrix.size(), "Size mismatch between Output and Input");
auto constexpr rowMajor = std::is_same_v<typename decltype(out)::layout_type, raft::row_major>;
auto bcastAlongRows = apply == Apply::ALONG_ROWS;
if (bcastAlongRows) {
RAFT_EXPECTS(out.extent(1) == static_cast<IndexType>(vec.size()),
"Size mismatch between matrix and vector");
} else {
RAFT_EXPECTS(out.extent(0) == static_cast<IndexType>(vec.size()),
"Size mismatch between matrix and vector");
}
matrixVectorOp(out.data_handle(),
matrix.data_handle(),
vec.data_handle(),
out.extent(1),
out.extent(0),
rowMajor,
bcastAlongRows,
op,
resource::get_cuda_stream(handle));
}
/**
* @brief Operations for all the columns or rows with the given vectors.
* Caution : Threads process multiple elements to speed up processing. These
* are loaded in a single read thanks to type promotion. Faster processing
* would thus only be enabled when addresses are optimally aligned for it.
* Note : the function will also check that the size of the window of accesses
* is a multiple of the number of elements processed by a thread in order to
* enable faster processing
* @tparam MatValueType the data-type of the input and output matrices
* @tparam Vec1ValueType the data-type of the first input vector
* @tparam Vec2ValueType the data-type of the second input vector
* @tparam LayoutPolicy the layout of input and output (raft::row_major or raft::col_major)
* @tparam Lambda a device function which represents a binary operator
* @tparam IndexType Integer used for addressing
* @param handle raft::resources
* @param matrix input raft::matrix_view
* @param vec1 the first vector raft::vector_view
* @param vec2 the second vector raft::vector_view
* @param out output raft::matrix_view
* @param apply whether the broadcast of vector needs to happen along
* the rows of the matrix or columns using enum class raft::linalg::Apply
* @param op the mathematical operation
*/
template <typename MatValueType,
typename Vec1ValueType,
typename Vec2ValueType,
typename LayoutPolicy,
typename Lambda,
typename IndexType>
void matrix_vector_op(raft::resources const& handle,
raft::device_matrix_view<const MatValueType, IndexType, LayoutPolicy> matrix,
raft::device_vector_view<const Vec1ValueType, IndexType> vec1,
raft::device_vector_view<const Vec2ValueType, IndexType> vec2,
raft::device_matrix_view<MatValueType, IndexType, LayoutPolicy> out,
Apply apply,
Lambda op)
{
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(matrix), "Input must be contiguous");
RAFT_EXPECTS(out.size() == matrix.size(), "Size mismatch between Output and Input");
auto constexpr rowMajor = std::is_same_v<typename decltype(out)::layout_type, raft::row_major>;
auto bcastAlongRows = apply == Apply::ALONG_ROWS;
if (bcastAlongRows) {
RAFT_EXPECTS(out.extent(1) == static_cast<IndexType>(vec1.size()),
"Size mismatch between matrix and vector");
RAFT_EXPECTS(out.extent(1) == static_cast<IndexType>(vec2.size()),
"Size mismatch between matrix and vector");
} else {
RAFT_EXPECTS(out.extent(0) == static_cast<IndexType>(vec1.size()),
"Size mismatch between matrix and vector");
RAFT_EXPECTS(out.extent(0) == static_cast<IndexType>(vec2.size()),
"Size mismatch between matrix and vector");
}
matrixVectorOp(out.data_handle(),
matrix.data_handle(),
vec1.data_handle(),
vec2.data_handle(),
out.extent(1),
out.extent(0),
rowMajor,
bcastAlongRows,
op,
resource::get_cuda_stream(handle));
}
/** @} */ // end of group matrix_vector_op
}; // end namespace linalg
}; // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/cublas_macros.h | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use core/cublas_macros.hpp instead.
*/
#pragma once
#include <raft/core/cublas_macros.hpp>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/matrix_vector.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/linalg_types.hpp>
#include <raft/matrix/detail/math.cuh>
#include <raft/util/input_validation.hpp>
namespace raft::linalg {
/**
* @defgroup matrix_vector Matrix-Vector Operations
* @{
*/
/**
* @brief multiply each row or column of matrix with vector, skipping zeros in vector
* @param [in] handle: raft handle for managing library resources
* @param[inout] data: input matrix, results are in-place
* @param[in] vec: input vector
* @param[in] apply whether the broadcast of vector needs to happen along
* the rows of the matrix or columns using enum class raft::linalg::Apply
*/
template <typename math_t, typename idx_t, typename layout_t>
void binary_mult_skip_zero(raft::resources const& handle,
raft::device_matrix_view<math_t, idx_t, layout_t> data,
raft::device_vector_view<const math_t, idx_t> vec,
Apply apply)
{
bool row_major = raft::is_row_major(data);
auto bcast_along_rows = apply == Apply::ALONG_ROWS;
idx_t vec_size = bcast_along_rows ? data.extent(1) : data.extent(0);
RAFT_EXPECTS(
vec.extent(0) == vec_size,
"If `bcast_along_rows==true`, vector size must equal number of columns in the matrix."
"If `bcast_along_rows==false`, vector size must equal number of rows in the matrix.");
matrix::detail::matrixVectorBinaryMultSkipZero(data.data_handle(),
vec.data_handle(),
data.extent(0),
data.extent(1),
row_major,
bcast_along_rows,
resource::get_cuda_stream(handle));
}
/**
* @brief divide each row or column of matrix with vector
* @param[in] handle: raft handle for managing library resources
* @param[inout] data: input matrix, results are in-place
* @param[in] vec: input vector
* @param[in] apply whether the broadcast of vector needs to happen along
* the rows of the matrix or columns using enum class raft::linalg::Apply
*/
template <typename math_t, typename idx_t, typename layout_t>
void binary_div(raft::resources const& handle,
raft::device_matrix_view<math_t, idx_t, layout_t> data,
raft::device_vector_view<const math_t, idx_t> vec,
Apply apply)
{
bool row_major = raft::is_row_major(data);
auto bcast_along_rows = apply == Apply::ALONG_ROWS;
idx_t vec_size = bcast_along_rows ? data.extent(1) : data.extent(0);
RAFT_EXPECTS(
vec.extent(0) == vec_size,
"If `bcast_along_rows==true`, vector size must equal number of columns in the matrix."
"If `bcast_along_rows==false`, vector size must equal number of rows in the matrix.");
matrix::detail::matrixVectorBinaryDiv(data.data_handle(),
vec.data_handle(),
data.extent(0),
data.extent(1),
row_major,
bcast_along_rows,
resource::get_cuda_stream(handle));
}
/**
* @brief divide each row or column of matrix with vector, skipping zeros in vector
* @param[in] handle: raft handle for managing library resources
* @param[inout] data: input matrix, results are in-place
* @param[in] vec: input vector
* @param[in] apply whether the broadcast of vector needs to happen along
* the rows of the matrix or columns using enum class raft::linalg::Apply
* @param[in] return_zero: result is zero if true and vector value is below threshold, original
* value if false
*/
template <typename math_t, typename idx_t, typename layout_t>
void binary_div_skip_zero(raft::resources const& handle,
raft::device_matrix_view<math_t, idx_t, layout_t> data,
raft::device_vector_view<const math_t, idx_t> vec,
Apply apply,
bool return_zero = false)
{
bool row_major = raft::is_row_major(data);
auto bcast_along_rows = apply == Apply::ALONG_ROWS;
idx_t vec_size = bcast_along_rows ? data.extent(1) : data.extent(0);
RAFT_EXPECTS(
vec.extent(0) == vec_size,
"If `bcast_along_rows==true`, vector size must equal number of columns in the matrix."
"If `bcast_along_rows==false`, vector size must equal number of rows in the matrix.");
matrix::detail::matrixVectorBinaryDivSkipZero(data.data_handle(),
vec.data_handle(),
data.extent(0),
data.extent(1),
row_major,
bcast_along_rows,
resource::get_cuda_stream(handle),
return_zero);
}
/**
* @brief add each row or column of matrix with vector
* @param[in] handle: raft handle for managing library resources
* @param[inout] data: input matrix, results are in-place
* @param[in] vec: input vector
* @param[in] apply whether the broadcast of vector needs to happen along
* the rows of the matrix or columns using enum class raft::linalg::Apply
*/
template <typename math_t, typename idx_t, typename layout_t>
void binary_add(raft::resources const& handle,
raft::device_matrix_view<math_t, idx_t, layout_t> data,
raft::device_vector_view<const math_t, idx_t> vec,
Apply apply)
{
bool row_major = raft::is_row_major(data);
auto bcast_along_rows = apply == Apply::ALONG_ROWS;
idx_t vec_size = bcast_along_rows ? data.extent(1) : data.extent(0);
RAFT_EXPECTS(
vec.extent(0) == vec_size,
"If `bcast_along_rows==true`, vector size must equal number of columns in the matrix."
"If `bcast_along_rows==false`, vector size must equal number of rows in the matrix.");
matrix::detail::matrixVectorBinaryAdd(data.data_handle(),
vec.data_handle(),
data.extent(0),
data.extent(1),
row_major,
bcast_along_rows,
resource::get_cuda_stream(handle));
}
/**
* @brief subtract each row or column of matrix with vector
* @param[in] handle: raft handle for managing library resources
* @param[inout] data: input matrix, results are in-place
* @param[in] vec: input vector
* @param[in] apply whether the broadcast of vector needs to happen along
* the rows of the matrix or columns using enum class raft::linalg::Apply
*/
template <typename math_t, typename idx_t, typename layout_t>
void binary_sub(raft::resources const& handle,
raft::device_matrix_view<math_t, idx_t, layout_t> data,
raft::device_vector_view<const math_t, idx_t> vec,
Apply apply)
{
bool row_major = raft::is_row_major(data);
auto bcast_along_rows = apply == Apply::ALONG_ROWS;
idx_t vec_size = bcast_along_rows ? data.extent(1) : data.extent(0);
RAFT_EXPECTS(
vec.extent(0) == vec_size,
"If `bcast_along_rows==true`, vector size must equal number of columns in the matrix."
"If `bcast_along_rows==false`, vector size must equal number of rows in the matrix.");
matrix::detail::matrixVectorBinarySub(data.data_handle(),
vec.data_handle(),
data.extent(0),
data.extent(1),
row_major,
bcast_along_rows,
resource::get_cuda_stream(handle));
}
/** @} */ // end of matrix_vector
} // namespace raft::linalg | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/cusolver_macros.h | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use core/cusolver_macros.hpp instead.
*/
#pragma once
#include <raft/core/cusolver_macros.hpp>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/map.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MAP_H
#define __MAP_H
#pragma once
#include "detail/map.cuh"
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
namespace raft::linalg {
/**
* @brief CUDA version of map
*
* Note: This call is deprecated, please use `map` from the same file.
*
* @tparam InType data-type upon which the math operation will be performed
* @tparam MapOp the device-lambda performing the actual operation
* @tparam TPB threads-per-block in the final kernel launched
* @tparam Args additional parameters
* @tparam OutType data-type in which the result will be stored
* @param out the output of the map operation (assumed to be a device pointer)
* @param len number of elements in the input array
* @param map the device-lambda
* @param stream cuda-stream where to launch this kernel
* @param in the input array
* @param args additional input arrays
*/
template <typename InType,
typename MapOp,
typename IdxType = std::uint32_t,
int TPB = 256,
typename OutType = InType,
typename... Args>
[[deprecated("Use function `map` from the same file")]] void map_k(
OutType* out, IdxType len, MapOp map, cudaStream_t stream, const InType* in, Args... args)
{
return detail::map<false>(stream, out, len, map, in, args...);
}
/**
* @defgroup map Mapping ops
* @{
*/
/**
* @brief Map a function over zero or more input mdspans of the same size.
*
* The algorithm applied on `k` inputs can be described in a following pseudo-code:
* @code
* for (auto i: [0 ... out.size()]) {
* out[i] = f(in_0[i], in_1[i], ..., in_k[i])
* }
* @endcode
*
* _Performance note_: when possible, this function loads the argument arrays and stores the output
* array using vectorized cuda load/store instructions. The size of the vectorization depends on the
* size of the largest input/output element type and on the alignment of all pointers.
*
* Usage example:
* @code{.cpp}
* #include <raft/core/device_mdarray.hpp>
* #include <raft/core/resources.hpp>
* #include <raft/core/operators.hpp>
* #include <raft/linalg/map.cuh>
*
* auto input = raft::make_device_vector<int>(res, n);
* ... fill input ..
* auto squares = raft::make_device_vector<int>(res, n);
* raft::linalg::map_offset(res, squares.view(), raft::sq_op{}, input.view());
* @endcode
*
* @tparam OutType data-type of the result (device_mdspan)
* @tparam Func the device-lambda performing the actual operation
* @tparam InTypes data-types of the inputs (device_mdspan)
*
* @param[in] res raft::resources
* @param[out] out the output of the map operation (device_mdspan)
* @param[in] f device lambda
* (InTypes::value_type xs...) -> OutType::value_type
* @param[in] ins the inputs (each of the same size as the output) (device_mdspan)
*/
template <typename OutType,
typename Func,
typename... InTypes,
typename = raft::enable_if_output_device_mdspan<OutType>,
typename = raft::enable_if_input_device_mdspan<InTypes...>>
void map(const raft::resources& res, OutType out, Func f, InTypes... ins)
{
return detail::map<false>(res, out, f, ins...);
}
/**
* @brief Map a function over one mdspan.
*
* @tparam InType1 data-type of the input (device_mdspan)
* @tparam OutType data-type of the result (device_mdspan)
* @tparam Func the device-lambda performing the actual operation
*
* @param[in] res raft::resources
* @param[in] in1 the input (the same size as the output) (device_mdspan)
* @param[out] out the output of the map operation (device_mdspan)
* @param[in] f device lambda
* (InType1::value_type x) -> OutType::value_type
*/
template <typename InType1,
typename OutType,
typename Func,
typename = raft::enable_if_output_device_mdspan<OutType>,
typename = raft::enable_if_input_device_mdspan<InType1>>
void map(const raft::resources& res, InType1 in1, OutType out, Func f)
{
return detail::map<false>(res, out, f, in1);
}
/**
* @brief Map a function over two mdspans.
*
* @tparam InType1 data-type of the input (device_mdspan)
* @tparam InType2 data-type of the input (device_mdspan)
* @tparam OutType data-type of the result (device_mdspan)
* @tparam Func the device-lambda performing the actual operation
*
* @param[in] res raft::resources
* @param[in] in1 the input (the same size as the output) (device_mdspan)
* @param[in] in2 the input (the same size as the output) (device_mdspan)
* @param[out] out the output of the map operation (device_mdspan)
* @param[in] f device lambda
* (InType1::value_type x1, InType2::value_type x2) -> OutType::value_type
*/
template <typename InType1,
typename InType2,
typename OutType,
typename Func,
typename = raft::enable_if_output_device_mdspan<OutType>,
typename = raft::enable_if_input_device_mdspan<InType1, InType2>>
void map(const raft::resources& res, InType1 in1, InType2 in2, OutType out, Func f)
{
return detail::map<false>(res, out, f, in1, in2);
}
/**
* @brief Map a function over three mdspans.
*
* @tparam InType1 data-type of the input 1 (device_mdspan)
* @tparam InType2 data-type of the input 2 (device_mdspan)
* @tparam InType3 data-type of the input 3 (device_mdspan)
* @tparam OutType data-type of the result (device_mdspan)
* @tparam Func the device-lambda performing the actual operation
*
* @param[in] res raft::resources
* @param[in] in1 the input 1 (the same size as the output) (device_mdspan)
* @param[in] in2 the input 2 (the same size as the output) (device_mdspan)
* @param[in] in3 the input 3 (the same size as the output) (device_mdspan)
* @param[out] out the output of the map operation (device_mdspan)
* @param[in] f device lambda
* (InType1::value_type x1, InType2::value_type x2, InType3::value_type x3) -> OutType::value_type
*/
template <typename InType1,
typename InType2,
typename InType3,
typename OutType,
typename Func,
typename = raft::enable_if_output_device_mdspan<OutType>,
typename = raft::enable_if_input_device_mdspan<InType1, InType2, InType3>>
void map(const raft::resources& res, InType1 in1, InType2 in2, InType3 in3, OutType out, Func f)
{
return detail::map<false>(res, out, f, in1, in2, in3);
}
/**
* @brief Map a function over zero-based flat index (element offset) and zero or more inputs.
*
* The algorithm applied on `k` inputs can be described in a following pseudo-code:
* @code
* for (auto i: [0 ... out.size()]) {
* out[i] = f(i, in_0[i], in_1[i], ..., in_k[i])
* }
* @endcode
*
* _Performance note_: when possible, this function loads the argument arrays and stores the output
* array using vectorized cuda load/store instructions. The size of the vectorization depends on the
* size of the largest input/output element type and on the alignment of all pointers.
*
* Usage example:
* @code{.cpp}
* #include <raft/core/device_mdarray.hpp>
* #include <raft/core/resources.hpp>
* #include <raft/core/operators.hpp>
* #include <raft/linalg/map.cuh>
*
* auto squares = raft::make_device_vector<int>(handle, n);
* raft::linalg::map_offset(res, squares.view(), raft::sq_op{});
* @endcode
*
* @tparam OutType data-type of the result (device_mdspan)
* @tparam Func the device-lambda performing the actual operation
* @tparam InTypes data-types of the inputs (device_mdspan)
*
* @param[in] res raft::resources
* @param[out] out the output of the map operation (device_mdspan)
* @param[in] f device lambda
* (auto offset, InTypes::value_type xs...) -> OutType::value_type
* @param[in] ins the inputs (each of the same size as the output) (device_mdspan)
*/
template <typename OutType,
typename Func,
typename... InTypes,
typename = raft::enable_if_output_device_mdspan<OutType>,
typename = raft::enable_if_input_device_mdspan<InTypes...>>
void map_offset(const raft::resources& res, OutType out, Func f, InTypes... ins)
{
return detail::map<true>(res, out, f, ins...);
}
/**
* @brief Map a function over zero-based flat index (element offset) and one mdspan.
*
* @tparam InType1 data-type of the input (device_mdspan)
* @tparam OutType data-type of the result (device_mdspan)
* @tparam Func the device-lambda performing the actual operation
*
* @param[in] res raft::resources
* @param[in] in1 the input (the same size as the output) (device_mdspan)
* @param[out] out the output of the map operation (device_mdspan)
* @param[in] f device lambda
* (auto offset, InType1::value_type x) -> OutType::value_type
*/
template <typename InType1,
typename OutType,
typename Func,
typename = raft::enable_if_output_device_mdspan<OutType>,
typename = raft::enable_if_input_device_mdspan<InType1>>
void map_offset(const raft::resources& res, InType1 in1, OutType out, Func f)
{
return detail::map<true>(res, out, f, in1);
}
/**
* @brief Map a function over zero-based flat index (element offset) and two mdspans.
*
* @tparam InType1 data-type of the input (device_mdspan)
* @tparam InType2 data-type of the input (device_mdspan)
* @tparam OutType data-type of the result (device_mdspan)
* @tparam Func the device-lambda performing the actual operation
*
* @param[in] res raft::resources
* @param[in] in1 the input (the same size as the output) (device_mdspan)
* @param[in] in2 the input (the same size as the output) (device_mdspan)
* @param[out] out the output of the map operation (device_mdspan)
* @param[in] f device lambda
* (auto offset, InType1::value_type x1, InType2::value_type x2) -> OutType::value_type
*/
template <typename InType1,
typename InType2,
typename OutType,
typename Func,
typename = raft::enable_if_output_device_mdspan<OutType>,
typename = raft::enable_if_input_device_mdspan<InType1, InType2>>
void map_offset(const raft::resources& res, InType1 in1, InType2 in2, OutType out, Func f)
{
return detail::map<true>(res, out, f, in1, in2);
}
/**
* @brief Map a function over zero-based flat index (element offset) and three mdspans.
*
* @tparam InType1 data-type of the input 1 (device_mdspan)
* @tparam InType2 data-type of the input 2 (device_mdspan)
* @tparam InType3 data-type of the input 3 (device_mdspan)
* @tparam OutType data-type of the result (device_mdspan)
* @tparam Func the device-lambda performing the actual operation
*
* @param[in] res raft::resources
* @param[in] in1 the input 1 (the same size as the output) (device_mdspan)
* @param[in] in2 the input 2 (the same size as the output) (device_mdspan)
* @param[in] in3 the input 3 (the same size as the output) (device_mdspan)
* @param[out] out the output of the map operation (device_mdspan)
* @param[in] f device lambda
* (auto offset, InType1::value_type x1, InType2::value_type x2, InType3::value_type x3)
* -> OutType::value_type
*/
template <typename InType1,
typename InType2,
typename InType3,
typename OutType,
typename Func,
typename = raft::enable_if_output_device_mdspan<OutType>,
typename = raft::enable_if_input_device_mdspan<InType1, InType2, InType3>>
void map_offset(
const raft::resources& res, InType1 in1, InType2 in2, InType3 in3, OutType out, Func f)
{
return detail::map<true>(res, out, f, in1, in2, in3);
}
/** @} */ // end of map
} // namespace raft::linalg
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/strided_reduction.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __STRIDED_REDUCTION_H
#define __STRIDED_REDUCTION_H
#pragma once
#include "detail/strided_reduction.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resources.hpp>
#include <type_traits>
namespace raft {
namespace linalg {
/**
* @brief Compute reduction of the input matrix along the strided dimension
*
* @tparam InType the data type of the input
* @tparam OutType the data type of the output (as well as the data type for
* which reduction is performed)
* @tparam IdxType data type of the indices of the array
* @tparam MainLambda Unary lambda applied while acculumation (eg: L1 or L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*MainLambda)(InType, IdxType);</pre>
* @tparam ReduceLambda Binary lambda applied for reduction (eg: addition(+) for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*ReduceLambda)(OutType);</pre>
* @tparam FinalLambda the final lambda applied before STG (eg: Sqrt for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*FinalLambda)(OutType);</pre>
* @param dots the output reduction vector
* @param data the input matrix
* @param D leading dimension of data
* @param N second dimension data
* @param init initial value to use for the reduction
* @param main_op elementwise operation to apply before reduction
* @param reduce_op binary reduction operation
* @param final_op elementwise operation to apply before storing results
* @param inplace reduction result added inplace or overwrites old values?
* @param stream cuda stream where to launch work
*/
template <typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void stridedReduction(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
// Only compile for types supported by myAtomicReduce, but don't make the compilation fail in
// other cases, because coalescedReduction supports arbitrary types.
if constexpr (std::is_same_v<OutType, float> || std::is_same_v<OutType, double> ||
std::is_same_v<OutType, int> || std::is_same_v<OutType, long long> ||
std::is_same_v<OutType, unsigned long long>) {
detail::stridedReduction<InType, OutType, IdxType>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
} else {
THROW("Unsupported type for stridedReduction: %s", typeid(OutType).name());
}
}
/**
* @defgroup strided_reduction Strided Memory Access Reductions
* For reducing along rows for row-major and along columns for column-major
* @{
*/
/**
* @brief Compute reduction of the input matrix along the strided dimension
* This API is to be used when the desired reduction is NOT along the dimension
* of the memory layout. For example, a row-major matrix will be reduced
* along the rows whereas a column-major matrix will be reduced along
* the columns.
*
* @tparam InValueType the input data-type of underlying raft::matrix_view
* @tparam LayoutPolicy The layout of Input/Output (row or col major)
* @tparam OutValueType the output data-type of underlying raft::matrix_view and reduction
* @tparam IndexType Integer type used to for addressing
* @tparam MainLambda Unary lambda applied while acculumation (eg: L1 or L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*MainLambda)(InType, IdxType);</pre>
* @tparam ReduceLambda Binary lambda applied for reduction (eg: addition(+) for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*ReduceLambda)(OutType);</pre>
* @tparam FinalLambda the final lambda applied before STG (eg: Sqrt for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*FinalLambda)(OutType);</pre>
* @param[in] handle raft::resources
* @param[in] data Input of type raft::device_matrix_view
* @param[out] dots Output of type raft::device_matrix_view
* @param[in] init initial value to use for the reduction
* @param[in] main_op fused elementwise operation to apply before reduction
* @param[in] reduce_op fused binary reduction operation
* @param[in] final_op fused elementwise operation to apply before storing results
* @param[in] inplace reduction result added inplace or overwrites old values?
*/
template <typename InValueType,
typename LayoutPolicy,
typename OutValueType,
typename IndexType,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void strided_reduction(raft::resources const& handle,
raft::device_matrix_view<const InValueType, IndexType, LayoutPolicy> data,
raft::device_vector_view<OutValueType, IndexType> dots,
OutValueType init,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
if constexpr (std::is_same_v<LayoutPolicy, raft::row_major>) {
RAFT_EXPECTS(static_cast<IndexType>(dots.size()) == data.extent(1),
"Output should be equal to number of columns in Input");
stridedReduction(dots.data_handle(),
data.data_handle(),
data.extent(1),
data.extent(0),
init,
resource::get_cuda_stream(handle),
inplace,
main_op,
reduce_op,
final_op);
} else if constexpr (std::is_same_v<LayoutPolicy, raft::col_major>) {
RAFT_EXPECTS(static_cast<IndexType>(dots.size()) == data.extent(0),
"Output should be equal to number of rows in Input");
stridedReduction(dots.data_handle(),
data.data_handle(),
data.extent(0),
data.extent(1),
init,
resource::get_cuda_stream(handle),
inplace,
main_op,
reduce_op,
final_op);
}
}
/** @} */ // end of group strided_reduction
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/gemv.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __GEMV_H
#define __GEMV_H
#pragma once
#include "detail/gemv.hpp"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/util/input_validation.hpp>
namespace raft {
namespace linalg {
/**
* @brief the wrapper of cublas gemv function
* It computes the following equation: y = alpha .* op(A) * x + beta .* y
*
* @tparam math_t the element type
* @tparam DevicePointerMode whether pointers alpha, beta point to device memory
* @param [in] handle raft handle
* @param [in] trans_a cublas transpose op for A
* @param [in] m number of rows of A
* @param [in] n number of columns of A
* @param [in] alpha host or device scalar
* @param [in] A column-major matrix of size [m, n]
* @param [in] lda leading dimension of A
* @param [in] x vector of length n if trans_a else m
* @param [in] incx stride between consecutive elements of x
* @param [in] beta host or device scalar
* @param [inout] y vector of length m if trans_a else n
* @param [in] incy stride between consecutive elements of y
* @param [in] stream
*/
template <typename math_t, bool DevicePointerMode = false>
void gemv(raft::resources const& handle,
const bool trans_a,
const int m,
const int n,
const math_t* alpha,
const math_t* A,
const int lda,
const math_t* x,
const int incx,
const math_t* beta,
math_t* y,
const int incy,
cudaStream_t stream)
{
detail::gemv<math_t, DevicePointerMode>(
handle, trans_a, m, n, alpha, A, lda, x, incx, beta, y, incy, stream);
}
template <typename math_t>
void gemv(raft::resources const& handle,
const math_t* A,
const int n_rows,
const int n_cols,
const math_t* x,
const int incx,
math_t* y,
const int incy,
const bool trans_a,
const math_t alpha,
const math_t beta,
cudaStream_t stream)
{
detail::gemv(handle, A, n_rows, n_cols, x, incx, y, incy, trans_a, alpha, beta, stream);
}
/**
* y = alpha * op(A) * x + beta * y
*
* where
*
* @param handle raft handle
* @param A is a column-major matrix of size n_rows_a * n_cols_a.
* op(A) is either the transpose operation (trans_a == true) or identity.
* @param n_rows_a number of rows in A
* @param n_cols_a number of cols in A
* @param x is a vector of size `trans_a ? n_rows_a : n_cols_a`.
* @param y is a vector of size `trans_a ? n_cols_a : n_rows_a`.
* @param trans_a whether to take transpose of a
* @param alpha is a scalar scale of Ax.
* @param beta is a scalar scale of y.
* @param stream stream on which this function is run
*/
template <typename math_t>
void gemv(raft::resources const& handle,
const math_t* A,
const int n_rows_a,
const int n_cols_a,
const math_t* x,
math_t* y,
const bool trans_a,
const math_t alpha,
const math_t beta,
cudaStream_t stream)
{
detail::gemv(handle, A, n_rows_a, n_cols_a, x, y, trans_a, alpha, beta, stream);
}
/**
* y = op(A) * x
*
* where
*
* @param handle raft handle
* @param A is a column-major matrix of size n_rows_a * n_cols_a.
* op(A) is either the transpose operation (trans_a == true) or identity.
* @param n_rows_a number of rows in A
* @param n_cols_a number of cols in A
* @param x is a vector of size `trans_a ? n_rows_a : n_cols_a`.
* @param y is a vector of size `trans_a ? n_cols_a : n_rows_a`.
* @param trans_a whether to take transpose of a
* @param stream stream on which this function is run
*/
template <typename math_t>
void gemv(raft::resources const& handle,
const math_t* A,
const int n_rows_a,
const int n_cols_a,
const math_t* x,
math_t* y,
const bool trans_a,
cudaStream_t stream)
{
detail::gemv(handle, A, n_rows_a, n_cols_a, x, y, trans_a, stream);
}
/**
* y = alpha * op(A) * x + beta * y
*
* where
* @param handle raft handle
* @param A is a column-major matrix of size n_rows_a * n_cols_a.
* op(A) is either the transpose operation (trans_a == true) or identity.
* @param n_rows_a number of rows in A
* @param n_cols_a number of cols in A
* @param lda is the leading dimension of A (number of rows); lda must be not smaller than n_rows_a.
* set it when you need to use only the first n_rows_a rows of the matrix A, which has
* (perhaps, due to padding) lda rows.
* @param x is a vector of size `trans_a ? n_rows_a : n_cols_a`.
* @param y is a vector of size `trans_a ? n_cols_a : n_rows_a`.
* @param trans_a whether to take transpose of a
* @param alpha is a scalar scale of Ax.
* @param beta is a scalar scale of y.
* @param stream stream on which this function is run
*/
template <typename math_t>
void gemv(raft::resources const& handle,
const math_t* A,
const int n_rows_a,
const int n_cols_a,
const int lda,
const math_t* x,
math_t* y,
const bool trans_a,
const math_t alpha,
const math_t beta,
cudaStream_t stream)
{
detail::gemv(handle, A, n_rows_a, n_cols_a, lda, x, y, trans_a, alpha, beta, stream);
}
/**
* y = op(A) * x
*
* where
* @param handle raft handle
* @param A is a column-major matrix of size n_rows_a * n_cols_a.
* op(A) is either the transpose operation (trans_a == true) or identity.
* @param n_rows_a number of rows in A
* @param n_cols_a number of cols in A
* @param lda is the leading dimension of A (number of rows); lda must be not smaller than n_rows_a.
* set it when you need to use only the first n_rows_a rows of the matrix A, which has
* (perhaps, due to padding) lda rows.
* @param x is a vector of size `trans_a ? n_rows_a : n_cols_a`.
* @param y is a vector of size `trans_a ? n_cols_a : n_rows_a`.
* @param trans_a whether to take transpose of a
* @param stream stream on which this function is run
*
*/
template <typename math_t>
void gemv(raft::resources const& handle,
const math_t* A,
const int n_rows_a,
const int n_cols_a,
const int lda,
const math_t* x,
math_t* y,
const bool trans_a,
cudaStream_t stream)
{
detail::gemv(handle, A, n_rows_a, n_cols_a, lda, x, y, trans_a, stream);
}
/**
* @defgroup gemv Matrix-Vector Multiplication
* @{
*/
/**
* @brief GEMV function designed for raft::col_major layout for A
* It computes y = alpha * op(A) * x + beta * y, where length of y is number
* of rows in A while length of x is number of columns in A
* If layout for A is provided as raft::row_major, then a transpose of A
* is used in the computation, where length of y is number of columns in A
* while length of x is number of rows in A
* If alpha is not provided, it is assumed to be 1.0
* If beta is not provided, it is assumed to be 0.0
* @tparam ValueType Data type of input/output matrices (float/double)
* @tparam IndexType Type of index
* @tparam LayoutPolicyX layout of X
* @tparam LayoutPolicyY layout of Y
* @tparam LayoutPolicyZ layout of Z
* @param[in] handle raft handle
* @param[in] A input raft::device_matrix_view of size (M, N)
* @param[in] x input raft::device_matrix_view of size (N, 1) if A is raft::col_major, else (M, 1)
* @param[out] y output raft::device_matrix_view of size (M, 1) if A is raft::col_major, else (N, 1)
* @param[in] alpha optional raft::host_scalar_view or raft::device_scalar_view, default 1.0
* @param[in] beta optional raft::host_scalar_view or raft::device_scalar_view, default 0.0
*/
template <typename ValueType,
typename IndexType,
typename LayoutPolicy,
typename ScalarIdxType = std::uint32_t,
typename ScalarViewType = raft::host_scalar_view<ValueType, ScalarIdxType>,
typename = std::enable_if_t<std::disjunction_v<
std::is_same<ScalarViewType, raft::host_scalar_view<ValueType, ScalarIdxType>>,
std::is_same<ScalarViewType, raft::device_scalar_view<ValueType, ScalarIdxType>>>>>
void gemv(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, LayoutPolicy> A,
raft::device_vector_view<const ValueType, IndexType> x,
raft::device_vector_view<ValueType, IndexType> y,
std::optional<ScalarViewType> alpha = std::nullopt,
std::optional<ScalarViewType> beta = std::nullopt)
{
RAFT_EXPECTS(raft::is_row_or_column_major(A), "A is not contiguous");
constexpr auto is_A_col_major =
std::is_same_v<typename decltype(A)::layout_type, raft::col_major>;
if (is_A_col_major) {
RAFT_EXPECTS(x.extent(0) == A.extent(1),
"Number of columns of A and length of x should be equal");
RAFT_EXPECTS(y.extent(0) == A.extent(0), "Number of rows of A and length of y should be equal");
} else {
RAFT_EXPECTS(x.extent(0) == A.extent(0), "Number of rows of A and length of x should be equal");
RAFT_EXPECTS(y.extent(0) == A.extent(1),
"Number of columns of A and length of y should be equal");
}
constexpr auto device_mode =
std::is_same_v<ScalarViewType, raft::device_scalar_view<ValueType, ScalarIdxType>>;
ValueType alpha_value = 1;
ValueType beta_value = 0;
auto alpha_device = raft::make_device_scalar(handle, alpha_value);
auto beta_device = raft::make_device_scalar(handle, beta_value);
auto alpha_host = raft::make_host_scalar(alpha_value);
auto beta_host = raft::make_host_scalar(beta_value);
if constexpr (device_mode) {
if (!alpha) { alpha = alpha_device.view(); }
if (!beta) { beta = beta_device.view(); }
} else {
if (!alpha) { alpha = alpha_host.view(); }
if (!beta) { beta = beta_host.view(); }
}
gemv<ValueType, device_mode>(handle,
!is_A_col_major,
A.extent(0),
A.extent(1),
alpha.value().data_handle(),
A.data_handle(),
A.extent(0),
x.data_handle(),
1,
beta.value().data_handle(),
y.data_handle(),
1,
resource::get_cuda_stream(handle));
}
/** @} */ // end of gemv
}; // namespace linalg
}; // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/norm_types.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft {
namespace linalg {
/** Enum to tell how to compute a norm */
enum NormType : unsigned short {
/** L0 (actually not a norm): sum((x_i != 0 ? 1 : 0)) */
L0PseudoNorm = 0,
/** L1 norm or Manhattan: sum(abs(x_i)) */
L1Norm = 1,
/** L2 norm or Euclidean: sqrt(sum(x_i^2)). Note that in some prims the square root is optional,
in which case it can be specified using a boolean or a functor final_op */
L2Norm = 2,
/** Linf norm or Chebyshev: max(abs(x_i)) */
LinfNorm
};
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/multiply.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MULTIPLY_H
#define __MULTIPLY_H
#pragma once
#include "detail/multiply.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/util/input_validation.hpp>
namespace raft {
namespace linalg {
/**
* @defgroup ScalarOps Scalar operations on the input buffer
* @tparam out_t data-type upon which the math operation will be performed
* @tparam in_t input data-type
* @tparam IdxType Integer type used to for addressing
* @param out the output buffer
* @param in the input buffer
* @param scalar the scalar used in the operations
* @param len number of elements in the input buffer
* @param stream cuda stream where to launch work
* @{
*/
template <typename in_t, typename out_t = in_t, typename IdxType = int>
void multiplyScalar(out_t* out, const in_t* in, in_t scalar, IdxType len, cudaStream_t stream)
{
detail::multiplyScalar(out, in, scalar, len, stream);
}
/** @} */
/**
* @defgroup multiply Multiplication Arithmetic
* @{
*/
/**
* @brief Element-wise multiplication of host scalar
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @tparam ScalarIdxType Index Type of scalar
* @param[in] handle raft::resources
* @param[in] in the input buffer
* @param[out] out the output buffer
* @param[in] scalar the scalar used in the operations
* @{
*/
template <typename InType,
typename OutType,
typename ScalarIdxType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void multiply_scalar(
raft::resources const& handle,
InType in,
OutType out,
raft::host_scalar_view<const typename InType::value_type, ScalarIdxType> scalar)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input must be contiguous");
RAFT_EXPECTS(out.size() == in.size(), "Size mismatch between Output and Input");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
multiplyScalar<in_value_t, out_value_t, std::uint32_t>(out.data_handle(),
in.data_handle(),
*scalar.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
multiplyScalar<in_value_t, out_value_t, std::uint64_t>(out.data_handle(),
in.data_handle(),
*scalar.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/** @} */ // end of group multiply
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/svd.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SVD_H
#define __SVD_H
#pragma once
#include "detail/svd.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <optional>
namespace raft {
namespace linalg {
/**
* @brief singular value decomposition (SVD) on the column major float type
* input matrix using QR method
* @param handle: raft handle
* @param in: input matrix
* @param n_rows: number rows of input matrix
* @param n_cols: number columns of input matrix
* @param sing_vals: singular values of input matrix
* @param left_sing_vecs: left singular values of input matrix
* @param right_sing_vecs: right singular values of input matrix
* @param trans_right: transpose right vectors or not
* @param gen_left_vec: generate left eig vector. Not activated.
* @param gen_right_vec: generate right eig vector. Not activated.
* @param stream cuda stream
*/
template <typename T>
void svdQR(raft::resources const& handle,
T* in,
int n_rows,
int n_cols,
T* sing_vals,
T* left_sing_vecs,
T* right_sing_vecs,
bool trans_right,
bool gen_left_vec,
bool gen_right_vec,
cudaStream_t stream)
{
detail::svdQR(handle,
in,
n_rows,
n_cols,
sing_vals,
left_sing_vecs,
right_sing_vecs,
trans_right,
gen_left_vec,
gen_right_vec,
stream);
}
template <typename math_t, typename idx_t>
void svdEig(raft::resources const& handle,
math_t* in,
idx_t n_rows,
idx_t n_cols,
math_t* S,
math_t* U,
math_t* V,
bool gen_left_vec,
cudaStream_t stream)
{
detail::svdEig(handle, in, n_rows, n_cols, S, U, V, gen_left_vec, stream);
}
/**
* @brief on the column major input matrix using Jacobi method
* @param handle: raft handle
* @param in: input matrix
* @param n_rows: number rows of input matrix
* @param n_cols: number columns of input matrix
* @param sing_vals: singular values of input matrix
* @param left_sing_vecs: left singular vectors of input matrix
* @param right_sing_vecs: right singular vectors of input matrix
* @param gen_left_vec: generate left eig vector. Not activated.
* @param gen_right_vec: generate right eig vector. Not activated.
* @param tol: error tolerance for the jacobi method. Algorithm stops when the
* error is below tol
* @param max_sweeps: number of sweeps in the Jacobi algorithm. The more the better
* accuracy.
* @param stream cuda stream
*/
template <typename math_t>
void svdJacobi(raft::resources const& handle,
math_t* in,
int n_rows,
int n_cols,
math_t* sing_vals,
math_t* left_sing_vecs,
math_t* right_sing_vecs,
bool gen_left_vec,
bool gen_right_vec,
math_t tol,
int max_sweeps,
cudaStream_t stream)
{
detail::svdJacobi(handle,
in,
n_rows,
n_cols,
sing_vals,
left_sing_vecs,
right_sing_vecs,
gen_left_vec,
gen_right_vec,
tol,
max_sweeps,
stream);
}
/**
* @brief reconstruct a matrix use left and right singular vectors and
* singular values
* @param handle: raft handle
* @param U: left singular vectors of size n_rows x k
* @param S: square matrix with singular values on its diagonal, k x k
* @param V: right singular vectors of size n_cols x k
* @param out: reconstructed matrix to be returned
* @param n_rows: number rows of output matrix
* @param n_cols: number columns of output matrix
* @param k: number of singular values
* @param stream cuda stream
*/
template <typename math_t>
void svdReconstruction(raft::resources const& handle,
math_t* U,
math_t* S,
math_t* V,
math_t* out,
int n_rows,
int n_cols,
int k,
cudaStream_t stream)
{
detail::svdReconstruction(handle, U, S, V, out, n_rows, n_cols, k, stream);
}
/**
* @brief reconstruct a matrix use left and right singular vectors and
* singular values
* @param handle: raft handle
* @param A_d: input matrix
* @param U: left singular vectors of size n_rows x k
* @param S_vec: singular values as a vector
* @param V: right singular vectors of size n_cols x k
* @param n_rows: number rows of output matrix
* @param n_cols: number columns of output matrix
* @param k: number of singular values to be computed, 1.0 for normal SVD
* @param tol: tolerance for the evaluation
* @param stream cuda stream
*/
template <typename math_t>
bool evaluateSVDByL2Norm(raft::resources const& handle,
math_t* A_d,
math_t* U,
math_t* S_vec,
math_t* V,
int n_rows,
int n_cols,
int k,
math_t tol,
cudaStream_t stream)
{
return detail::evaluateSVDByL2Norm(handle, A_d, U, S_vec, V, n_rows, n_cols, k, tol, stream);
}
/**
* @defgroup svd Singular Value Decomposition
* @{
*/
/**
* @brief singular value decomposition (SVD) on a column major
* matrix using QR decomposition
* @tparam ValueType value type of parameters
* @tparam IndexType index type of parameters
* @param[in] handle raft::resources
* @param[in] in input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] sing_vals singular values raft::device_vector_view of shape (K)
* @param[out] U std::optional left singular values of raft::device_matrix_view with layout
* raft::col_major and dimensions (m, n)
* @param[out] V std::optional right singular values of raft::device_matrix_view with
* layout raft::col_major and dimensions (n, n)
*/
template <typename ValueType, typename IndexType>
void svd_qr(
raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> in,
raft::device_vector_view<ValueType, IndexType> sing_vals,
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U = std::nullopt,
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V = std::nullopt)
{
ValueType* left_sing_vecs_ptr = nullptr;
ValueType* right_sing_vecs_ptr = nullptr;
if (U) {
RAFT_EXPECTS(in.extent(0) == U.value().extent(0) && in.extent(1) == U.value().extent(1),
"U should have dimensions m * n");
left_sing_vecs_ptr = U.value().data_handle();
}
if (V) {
RAFT_EXPECTS(in.extent(1) == V.value().extent(0) && in.extent(1) == V.value().extent(1),
"V should have dimensions n * n");
right_sing_vecs_ptr = V.value().data_handle();
}
svdQR(handle,
const_cast<ValueType*>(in.data_handle()),
in.extent(0),
in.extent(1),
sing_vals.data_handle(),
left_sing_vecs_ptr,
right_sing_vecs_ptr,
false,
U.has_value(),
V.has_value(),
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `svd_qr` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
* Please see above for documentation of `svd_qr`.
*/
template <typename ValueType, typename IndexType, typename UType, typename VType>
void svd_qr(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> in,
raft::device_vector_view<ValueType, IndexType> sing_vals,
UType&& U_in = std::nullopt,
VType&& V_in = std::nullopt)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U =
std::forward<UType>(U_in);
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V =
std::forward<VType>(V_in);
svd_qr(handle, in, sing_vals, U, V);
}
/**
* @brief singular value decomposition (SVD) on a column major
* matrix using QR decomposition. Right singular vector matrix is transposed before returning
* @tparam ValueType value type of parameters
* @tparam IndexType index type of parameters
* @param[in] handle raft::resources
* @param[in] in input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] sing_vals singular values raft::device_vector_view of shape (K)
* @param[out] U std::optional left singular values of raft::device_matrix_view with layout
* raft::col_major and dimensions (m, n)
* @param[out] V std::optional right singular values of raft::device_matrix_view with
* layout raft::col_major and dimensions (n, n)
*/
template <typename ValueType, typename IndexType>
void svd_qr_transpose_right_vec(
raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> in,
raft::device_vector_view<ValueType, IndexType> sing_vals,
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U = std::nullopt,
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V = std::nullopt)
{
ValueType* left_sing_vecs_ptr = nullptr;
ValueType* right_sing_vecs_ptr = nullptr;
if (U) {
RAFT_EXPECTS(in.extent(0) == U.value().extent(0) && in.extent(1) == U.value().extent(1),
"U should have dimensions m * n");
left_sing_vecs_ptr = U.value().data_handle();
}
if (V) {
RAFT_EXPECTS(in.extent(1) == V.value().extent(0) && in.extent(1) == V.value().extent(1),
"V should have dimensions n * n");
right_sing_vecs_ptr = V.value().data_handle();
}
svdQR(handle,
const_cast<ValueType*>(in.data_handle()),
in.extent(0),
in.extent(1),
sing_vals.data_handle(),
left_sing_vecs_ptr,
right_sing_vecs_ptr,
true,
U.has_value(),
V.has_value(),
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `svd_qr_transpose_right_vec` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
* Please see above for documentation of `svd_qr_transpose_right_vec`.
*/
template <typename ValueType, typename IndexType, typename UType, typename VType>
void svd_qr_transpose_right_vec(
raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> in,
raft::device_vector_view<ValueType, IndexType> sing_vals,
UType&& U_in = std::nullopt,
VType&& V_in = std::nullopt)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U =
std::forward<UType>(U_in);
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V =
std::forward<VType>(V_in);
svd_qr_transpose_right_vec(handle, in, sing_vals, U, V);
}
/**
* @brief singular value decomposition (SVD) on a column major
* matrix using Eigen decomposition. A square symmetric covariance matrix is constructed for the SVD
* @param[in] handle raft::resources
* @param[in] in input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] S singular values raft::device_vector_view of shape (K)
* @param[out] V right singular values of raft::device_matrix_view with layout
* raft::col_major and dimensions (n, n)
* @param[out] U optional left singular values of raft::device_matrix_view with layout
* raft::col_major and dimensions (m, n)
*/
template <typename ValueType, typename IndexType>
void svd_eig(
raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> in,
raft::device_vector_view<ValueType, IndexType> S,
raft::device_matrix_view<ValueType, IndexType, raft::col_major> V,
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U = std::nullopt)
{
ValueType* left_sing_vecs_ptr = nullptr;
if (U) {
RAFT_EXPECTS(in.extent(0) == U.value().extent(0) && in.extent(1) == U.value().extent(1),
"U should have dimensions m * n");
left_sing_vecs_ptr = U.value().data_handle();
}
RAFT_EXPECTS(in.extent(1) == V.extent(0) && in.extent(1) == V.extent(1),
"V should have dimensions n * n");
svdEig(handle,
const_cast<ValueType*>(in.data_handle()),
in.extent(0),
in.extent(1),
S.data_handle(),
left_sing_vecs_ptr,
V.data_handle(),
U.has_value(),
resource::get_cuda_stream(handle));
}
template <typename ValueType, typename IndexType, typename UType>
void svd_eig(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> in,
raft::device_vector_view<ValueType, IndexType> S,
raft::device_matrix_view<ValueType, IndexType, raft::col_major> V,
UType&& U = std::nullopt)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U_optional =
std::forward<UType>(U);
svd_eig(handle, in, S, V, U_optional);
}
/**
* @brief reconstruct a matrix use left and right singular vectors and
* singular values
* @param[in] handle raft::resources
* @param[in] U left singular values of raft::device_matrix_view with layout
* raft::col_major and dimensions (m, k)
* @param[in] S square matrix with singular values on its diagonal of shape (k, k)
* @param[in] V right singular values of raft::device_matrix_view with layout
* raft::col_major and dimensions (k, n)
* @param[out] out output raft::device_matrix_view with layout raft::col_major of shape (m, n)
*/
template <typename ValueType, typename IndexType>
void svd_reconstruction(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> U,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> S,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> V,
raft::device_matrix_view<ValueType, IndexType, raft::col_major> out)
{
RAFT_EXPECTS(S.extent(0) == S.extent(1), "S should be a square matrix");
RAFT_EXPECTS(S.extent(0) == U.extent(1),
"Number of rows of S should be equal to number of columns in U");
RAFT_EXPECTS(S.extent(1) == V.extent(0),
"Number of columns of S should be equal to number of rows in V");
RAFT_EXPECTS(out.extent(0) == U.extent(0) && out.extent(1) == V.extent(1),
"Number of rows should be equal in out and U and number of columns should be equal "
"in out and V");
svdReconstruction(handle,
const_cast<ValueType*>(U.data_handle()),
const_cast<ValueType*>(S.data_handle()),
const_cast<ValueType*>(V.data_handle()),
out.data_handle(),
out.extent(0),
out.extent(1),
S.extent(0),
resource::get_cuda_stream(handle));
}
/** @} */ // end of group svd
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/map_then_reduce.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MAP_THEN_REDUCE_H
#define __MAP_THEN_REDUCE_H
#pragma once
#include "detail/map_then_reduce.cuh"
namespace raft {
namespace linalg {
/**
* @brief CUDA version of map and then sum reduction operation
* @tparam Type data-type upon which the math operation will be performed
* @tparam MapOp the device-lambda performing the actual operation
* @tparam TPB threads-per-block in the final kernel launched
* @tparam Args additional parameters
* @param out the output sum-reduced value (assumed to be a device pointer)
* @param len number of elements in the input array
* @param map the device-lambda
* @param stream cuda-stream where to launch this kernel
* @param in the input array
* @param args additional input arrays
*/
template <typename InType,
typename MapOp,
typename IdxType = std::uint32_t,
int TPB = 256,
typename... Args,
typename OutType = InType>
void mapThenSumReduce(
OutType* out, IdxType len, MapOp map, cudaStream_t stream, const InType* in, Args... args)
{
detail::mapThenReduceImpl<InType, OutType, IdxType, MapOp, detail::sum_tag, TPB, Args...>(
out, len, (OutType)0, map, detail::sum_tag(), stream, in, args...);
}
/**
* @brief CUDA version of map and then generic reduction operation
* @tparam Type data-type upon which the math operation will be performed
* @tparam MapOp the device-lambda performing the actual map operation
* @tparam ReduceLambda the device-lambda performing the actual reduction
* @tparam TPB threads-per-block in the final kernel launched
* @tparam Args additional parameters
* @param out the output reduced value (assumed to be a device pointer)
* @param len number of elements in the input array
* @param neutral The neutral element of the reduction operation. For example:
* 0 for sum, 1 for multiply, +Inf for Min, -Inf for Max
* @param map the device-lambda
* @param op the reduction device lambda
* @param stream cuda-stream where to launch this kernel
* @param in the input array
* @param args additional input arrays
*/
template <typename InType,
typename MapOp,
typename ReduceLambda,
typename IdxType = std::uint32_t,
int TPB = 256,
typename OutType = InType,
typename... Args>
[[deprecated("Use function `mapReduce` from `raft/linalg/map_reduce.cuh")]] void mapThenReduce(
OutType* out,
size_t len,
OutType neutral,
MapOp map,
ReduceLambda op,
cudaStream_t stream,
const InType* in,
Args... args)
{
detail::mapThenReduceImpl<InType, OutType, IdxType, MapOp, ReduceLambda, TPB, Args...>(
out, len, neutral, map, op, stream, in, args...);
}
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/contractions.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CONTRACTIONS_H
#define __CONTRACTIONS_H
#pragma once
#include "detail/contractions.cuh"
namespace raft {
namespace linalg {
/**
* @brief This is the central enum that should be used to configure the perf
* landscape of the Contraction kernel.
*
* Main goal of this Policy struct is to provide sufficient knobs to tune the
* perf of Contraction kernel, as and when we see matrices of different shapes.
*
* @tparam DataT the IO and math datatype
* @tparam _veclen number of k-elements loaded by each thread for every LDG call
* it makes. This should be configured based on the input 'k'
* value and the input data type. For eg: if DataT = float and
* k is multiples of 4, then setting this to 4 gives the best
* LDG pattern. Possible values are {1, 2, 4}.
* @tparam _kblk number of k-elements operated upon per main-loop iteration.
* Therefore total number of main-loop iterations will be
* `ceil(k/_kblk)`. This must be multiples of `_veclen`. Do note
* that bigger this value, the greater shared mem requirement.
* @tparam _rpt Defines the number of rows that a given thread accumulates on.
* This directly results in increased register pressure. This
* also is used to compute the number of m-elements worked upon
* by each thread block.
* @tparam _cpt Defines the number of cols that a given thread accumulates on.
* This directly results in increased register pressure. This
* also is used to compute the number of n-elements worked upon
* by each thread block.
* @tparam _tr Number of threads working on the same output column. This is
* used to compute the number of m-elements worked upon by each
* thread block. This also determines the number of threads per
* thread block
* @tparam _tc Number of threads working on the same output row. This is
* used to compute the number of m-elements worked upon by each
* thread block. This also determines the number of threads per
* thread block
*/
template <typename DataT, int _veclen, int _kblk, int _rpt, int _cpt, int _tr, int _tc>
struct KernelPolicy {
enum {
/** number of elements along K worked upon per main loop iteration */
Kblk = _kblk,
/** number of elements loaded per LDG */
Veclen = _veclen,
/** number of rows a thread works on for accumulation */
AccRowsPerTh = _rpt,
/** number of cols a thread works on for accumulation */
AccColsPerTh = _cpt,
/** number of threads working the same output col */
AccThRows = _tr,
/** number of threads working the same output row */
AccThCols = _tc,
/** total threads per block */
Nthreads = AccThRows * AccThCols,
/** output tile size along rows */
Mblk = AccRowsPerTh * AccThRows,
/** output tile size along cols */
Nblk = AccColsPerTh * AccThCols,
/** number of threads loading a single row */
LdgThRow = Kblk / Veclen,
/** number of LDGs issued by a single thread for X */
LdgPerThX = Mblk * LdgThRow / Nthreads,
/** number of LDGs issued by a single thread for Y */
LdgPerThY = Nblk * LdgThRow / Nthreads,
/** number of rows of X covered per LDG */
LdgRowsX = Mblk / LdgPerThX,
/** number of rows of Y covered per LDG */
LdgRowsY = Nblk / LdgPerThY,
/** stride for accessing X/Y data in shared mem */
SmemStride = Kblk + Veclen,
/** size of one page for storing X data */
SmemPageX = SmemStride * Mblk,
/** size of one page for storing Y data */
SmemPageY = SmemStride * Nblk,
/** size of one smem page */
SmemPage = SmemPageX + SmemPageY,
/** size (in B) for smem needed */
SmemSize = 2 * SmemPage * sizeof(DataT),
}; // enum
}; // struct KernelPolicy
template <typename DataT, int _veclen, int _kblk, int _rpt, int _cpt, int _tr, int _tc>
struct ColKernelPolicy {
enum {
/** number of elements along K worked upon per main loop iteration */
Kblk = _kblk,
/** number of elements loaded per LDG */
Veclen = _veclen,
/** number of rows a thread works on for accumulation */
AccRowsPerTh = _rpt,
/** number of cols a thread works on for accumulation */
AccColsPerTh = _cpt,
/** number of threads working the same output col */
AccThRows = _tr,
/** number of threads working the same output row */
AccThCols = _tc,
/** total threads per block */
Nthreads = AccThRows * AccThCols,
/** output tile size along rows */
Mblk = AccRowsPerTh * AccThRows,
/** output tile size along cols */
Nblk = AccColsPerTh * AccThCols,
/** number of threads loading a single col */
LdgThRow = Mblk / Veclen,
/** number of LDGs issued by a single thread for X */
LdgPerThX = Kblk * LdgThRow / Nthreads,
/** number of LDGs issued by a single thread for Y */
LdgPerThY = Kblk * LdgThRow / Nthreads,
/** number of rows of X covered per LDG */
LdgRowsX = Kblk / LdgPerThX,
/** number of rows of Y covered per LDG */
LdgRowsY = Kblk / LdgPerThY,
/** stride for accessing X/Y data in shared mem */
SmemStride = Mblk + Veclen,
/** size of one page for storing X data */
SmemPageX = SmemStride * Kblk,
/** size of one page for storing Y data */
SmemPageY = SmemStride * Kblk,
/** size of one smem page */
SmemPage = SmemPageX + SmemPageY,
/** size (in B) for smem needed */
SmemSize = 2 * SmemPage * sizeof(DataT),
}; // colMajor enum
static_assert(Mblk == Nblk, "Mblk should be equal to Nblk");
};
/**
* @defgroup Policy4x4 16 elements per thread Policy with k-block = 32
* @{
*/
template <typename DataT, int _veclen>
struct Policy4x4 {};
template <int _veclen>
struct Policy4x4<float, _veclen> {
typedef KernelPolicy<float, _veclen, 32, 4, 4, 16, 16> Policy;
typedef ColKernelPolicy<float, _veclen, 32, 4, 4, 16, 16> ColPolicy;
};
template <int _veclen>
struct Policy4x4<double, _veclen> {
typedef KernelPolicy<double, _veclen, 16, 4, 4, 16, 16> Policy;
typedef ColKernelPolicy<double, _veclen, 16, 4, 4, 16, 16> ColPolicy;
};
/** @} */
/**
* A smaller k-block (8 instead of 32) with fewer threads per block (8x8 instead
* of 16x16), which is faster for raft::distance::fusedL2NN on skinny matrices,
* i.e., matrices with a small k dimension.
*
*/
template <typename DataT, int _veclen>
struct Policy4x4Skinny {};
template <int _veclen>
struct Policy4x4Skinny<float, _veclen> {
typedef KernelPolicy<float, _veclen, 8, 4, 4, 8, 8> Policy;
typedef ColKernelPolicy<float, _veclen, 8, 4, 4, 8, 8> ColPolicy;
};
template <int _veclen>
struct Policy4x4Skinny<double, _veclen> {
typedef KernelPolicy<double, _veclen, 8, 4, 4, 8, 8> Policy;
typedef ColKernelPolicy<double, _veclen, 8, 4, 4, 8, 8> ColPolicy;
};
/**
* @defgroup Policy2x8 16 elements per thread Policy with k-block = 16
* @{
*/
template <typename DataT, int _veclen = 1>
struct Policy2x8 {};
template <int _veclen>
struct Policy2x8<float, _veclen> {
typedef KernelPolicy<float, _veclen, 16, 2, 8, 8, 32> Policy;
};
template <int _veclen>
struct Policy2x8<double, _veclen> {
// this is not used just for keeping compiler happy.
typedef KernelPolicy<double, _veclen, 32, 1, 2, 8, 32> Policy;
};
/** @} */
/**
* @brief Base class for gemm-like NT contractions
*
* This class does not provide any arithmetic operations, but only provides the
* memory-related operations of loading the `x` and `y` matrix blocks from the
* global memory into shared memory and then from shared into registers. Thus,
* this class acts as a basic building block for further composing gemm-like NT
* contractions on input matrices which are row-major (and so does the output)
*
* @tparam DataT IO and math data type
* @tparam IdxT indexing type
* @tparam Policy policy used to customize memory access behavior.
* See documentation for `KernelPolicy` to know more.
*/
using detail::Contractions_NT;
} // namespace linalg
} // namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/binary_op.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __BINARY_OP_H
#define __BINARY_OP_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/map.cuh>
namespace raft {
namespace linalg {
/**
* @brief perform element-wise binary operation on the input arrays
* @tparam InType input data-type
* @tparam Lambda the device-lambda performing the actual operation
* @tparam OutType output data-type
* @tparam IdxType Integer type used to for addressing
* @tparam TPB threads-per-block in the final kernel launched
* @param out the output array
* @param in1 the first input array
* @param in2 the second input array
* @param len number of elements in the input array
* @param op the device-lambda
* @param stream cuda stream where to launch work
* @note Lambda must be a functor with the following signature:
* `OutType func(const InType& val1, const InType& val2);`
*/
template <typename InType,
typename Lambda,
typename OutType = InType,
typename IdxType = int,
int TPB = 256>
void binaryOp(
OutType* out, const InType* in1, const InType* in2, IdxType len, Lambda op, cudaStream_t stream)
{
return detail::map<false>(stream, out, len, op, in1, in2);
}
/**
* @defgroup binary_op Element-Wise Binary Operation
* @{
*/
/**
* @brief perform element-wise binary operation on the input arrays
* @tparam InType Input Type raft::device_mdspan
* @tparam Lambda the device-lambda performing the actual operation
* @tparam OutType Output Type raft::device_mdspan
* @param[in] handle raft::resources
* @param[in] in1 First input
* @param[in] in2 Second input
* @param[out] out Output
* @param[in] op the device-lambda
* @note Lambda must be a functor with the following signature:
* `OutType func(const InType& val1, const InType& val2);`
*/
template <typename InType,
typename Lambda,
typename OutType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void binary_op(raft::resources const& handle, InType in1, InType in2, OutType out, Lambda op)
{
return map(handle, in1, in2, out, op);
}
/** @} */ // end of group binary_op
}; // end namespace linalg
}; // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/sqrt.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SQRT_H
#define __SQRT_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/unary_op.cuh>
namespace raft {
namespace linalg {
/**
* @defgroup ScalarOps Scalar operations on the input buffer
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param out the output buffer
* @param in the input buffer
* @param len number of elements in the input buffer
* @param stream cuda stream where to launch work
* @{
*/
template <typename in_t, typename out_t = in_t, typename IdxType = int>
void sqrt(out_t* out, const in_t* in, IdxType len, cudaStream_t stream)
{
raft::linalg::unaryOp(out, in, len, raft::sqrt_op{}, stream);
}
/** @} */
/**
* @defgroup sqrt Sqrt Arithmetic
* @{
*/
/**
* @brief Elementwise sqrt operation
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @param[in] handle raft::resources
* @param[in] in Input
* @param[out] out Output
*/
template <typename InType,
typename OutType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void sqrt(raft::resources const& handle, InType in, OutType out)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input 1 must be contiguous");
RAFT_EXPECTS(out.size() == in.size(), "Size mismatch between Output and Inputs");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
sqrt<in_value_t, out_value_t, std::uint32_t>(out.data_handle(),
in.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
sqrt<in_value_t, out_value_t, std::uint64_t>(out.data_handle(),
in.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/** @} */ // end of group add
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/mean_squared_error.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MSE_H
#define __MSE_H
#pragma once
#include "detail/mean_squared_error.cuh"
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
namespace raft {
namespace linalg {
/**
* @brief CUDA version mean squared error function mean((A-B)**2)
* @tparam math_t data-type upon which the math operation will be performed
* @tparam TPB threads-per-block
* @param out the output mean squared error value (assumed to be a device pointer)
* @param A input array (assumed to be a device pointer)
* @param B input array (assumed to be a device pointer)
* @param len number of elements in the input arrays
* @param weight weight to apply to every term in the mean squared error calculation
* @param stream cuda-stream where to launch this kernel
*/
template <typename in_t, typename out_t, typename idx_t = size_t>
void meanSquaredError(
out_t* out, const in_t* A, const in_t* B, idx_t len, in_t weight, cudaStream_t stream)
{
detail::meanSquaredError(out, A, B, len, weight, stream);
}
/**
* @defgroup mean_squared_error Mean Squared Error
* @{
*/
/**
* @brief CUDA version mean squared error function mean((A-B)**2)
* @tparam InValueType Input data-type
* @tparam IndexType Input/Output index type
* @tparam OutValueType Output data-type
* @tparam TPB threads-per-block
* @param[in] handle raft::resources
* @param[in] A input raft::device_vector_view
* @param[in] B input raft::device_vector_view
* @param[out] out the output mean squared error value of type raft::device_scalar_view
* @param[in] weight weight to apply to every term in the mean squared error calculation
*/
template <typename InValueType, typename IndexType, typename OutValueType>
void mean_squared_error(raft::resources const& handle,
raft::device_vector_view<const InValueType, IndexType> A,
raft::device_vector_view<const InValueType, IndexType> B,
raft::device_scalar_view<OutValueType, IndexType> out,
OutValueType weight)
{
RAFT_EXPECTS(A.size() == B.size(), "Size mismatch between inputs");
meanSquaredError(out.data_handle(),
A.data_handle(),
B.data_handle(),
A.extent(0),
weight,
resource::get_cuda_stream(handle));
}
/** @} */ // end of group mean_squared_error
}; // end namespace linalg
}; // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/dot.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __DOT_H
#define __DOT_H
#pragma once
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resources.hpp>
namespace raft::linalg {
/**
* @defgroup dot BLAS dot routine
* @{
*/
/**
* @brief Computes the dot product of two vectors.
* @param[in] handle raft::resources
* @param[in] x First input vector
* @param[in] y Second input vector
* @param[out] out The output dot product between the x and y vectors.
*/
template <typename ElementType,
typename IndexType,
typename ScalarIndexType,
typename LayoutPolicy1,
typename LayoutPolicy2>
void dot(raft::resources const& handle,
raft::device_vector_view<const ElementType, IndexType, LayoutPolicy1> x,
raft::device_vector_view<const ElementType, IndexType, LayoutPolicy2> y,
raft::device_scalar_view<ElementType, ScalarIndexType> out)
{
RAFT_EXPECTS(x.size() == y.size(),
"Size mismatch between x and y input vectors in raft::linalg::dot");
RAFT_CUBLAS_TRY(detail::cublasdot(resource::get_cublas_handle(handle),
x.size(),
x.data_handle(),
x.stride(0),
y.data_handle(),
y.stride(0),
out.data_handle(),
resource::get_cuda_stream(handle)));
}
/**
* @brief Computes the dot product of two vectors.
* @param[in] handle raft::resources
* @param[in] x First input vector
* @param[in] y Second input vector
* @param[out] out The output dot product between the x and y vectors.
*/
template <typename ElementType,
typename IndexType,
typename ScalarIndexType,
typename LayoutPolicy1,
typename LayoutPolicy2>
void dot(raft::resources const& handle,
raft::device_vector_view<const ElementType, IndexType, LayoutPolicy1> x,
raft::device_vector_view<const ElementType, IndexType, LayoutPolicy2> y,
raft::host_scalar_view<ElementType, ScalarIndexType> out)
{
RAFT_EXPECTS(x.size() == y.size(),
"Size mismatch between x and y input vectors in raft::linalg::dot");
RAFT_CUBLAS_TRY(detail::cublasdot(resource::get_cublas_handle(handle),
x.size(),
x.data_handle(),
x.stride(0),
y.data_handle(),
y.stride(0),
out.data_handle(),
resource::get_cuda_stream(handle)));
}
/** @} */ // end of group dot
} // namespace raft::linalg
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/init.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __INIT_H
#define __INIT_H
#pragma once
#include <raft/linalg/map.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft::linalg {
/**
* @brief Like Python range.
*
* Fills the output as out[i] = i.
*
* \param [out] out device array, size [end-start]
* \param [in] start of the range
* \param [in] end of range (exclusive)
* \param [in] stream cuda stream
*/
template <typename T>
void range(T* out, int start, int end, cudaStream_t stream)
{
return detail::map<true>(
stream, out, end - start, compose_op{cast_op<T>{}, add_const_op<int>{start}});
}
/**
* @brief Like Python range.
*
* Fills the output as out[i] = i.
*
* \param [out] out device array, size [n]
* \param [in] n length of the array
* \param [in] stream cuda stream
*/
template <typename T, int TPB = 256>
void range(T* out, int n, cudaStream_t stream)
{
return detail::map<true>(stream, out, n, cast_op<T>{});
}
/**
* @brief Zeros the output.
*
* \param [out] out device array, size [n]
* \param [in] n length of the array
* \param [in] stream cuda stream
*/
template <typename T>
void zero(T* out, int n, cudaStream_t stream)
{
RAFT_CUDA_TRY(cudaMemsetAsync(static_cast<void*>(out), 0, n * sizeof(T), stream));
}
} // namespace raft::linalg
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/rsvd.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __RSVD_H
#define __RSVD_H
#pragma once
#include "detail/rsvd.cuh"
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
namespace raft {
namespace linalg {
/**
* @brief randomized singular value decomposition (RSVD) on the column major
* float type input matrix (Jacobi-based), by specifying no. of PCs and
* upsamples directly
* @param handle: raft handle
* @param M: input matrix
* @param n_rows: number rows of input matrix
* @param n_cols: number columns of input matrix
* @param S_vec: singular values of input matrix
* @param U: left singular values of input matrix
* @param V: right singular values of input matrix
* @param k: no. of singular values to be computed
* @param p: no. of upsamples
* @param use_bbt: whether use eigen decomposition in computation or not
* @param gen_left_vec: left vector needs to be generated or not?
* @param gen_right_vec: right vector needs to be generated or not?
* @param use_jacobi: whether to jacobi solver for decomposition
* @param tol: tolerance for Jacobi-based solvers
* @param max_sweeps: maximum number of sweeps for Jacobi-based solvers
* @param stream cuda stream
*/
template <typename math_t>
void rsvdFixedRank(raft::resources const& handle,
math_t* M,
int n_rows,
int n_cols,
math_t* S_vec,
math_t* U,
math_t* V,
int k,
int p,
bool use_bbt,
bool gen_left_vec,
bool gen_right_vec,
bool use_jacobi,
math_t tol,
int max_sweeps,
cudaStream_t stream)
{
detail::rsvdFixedRank(handle,
M,
n_rows,
n_cols,
S_vec,
U,
V,
k,
p,
use_bbt,
gen_left_vec,
gen_right_vec,
use_jacobi,
tol,
max_sweeps,
stream);
}
/**
* @brief randomized singular value decomposition (RSVD) on the column major
* float type input matrix (Jacobi-based), by specifying the PC and upsampling
* ratio
* @param handle: raft handle
* @param M: input matrix
* @param n_rows: number rows of input matrix
* @param n_cols: number columns of input matrix
* @param S_vec: singular values of input matrix
* @param U: left singular values of input matrix
* @param V: right singular values of input matrix
* @param PC_perc: percentage of singular values to be computed
* @param UpS_perc: upsampling percentage
* @param use_bbt: whether use eigen decomposition in computation or not
* @param gen_left_vec: left vector needs to be generated or not?
* @param gen_right_vec: right vector needs to be generated or not?
* @param use_jacobi: whether to jacobi solver for decomposition
* @param tol: tolerance for Jacobi-based solvers
* @param max_sweeps: maximum number of sweeps for Jacobi-based solvers
* @param stream cuda stream
*/
template <typename math_t>
void rsvdPerc(raft::resources const& handle,
math_t* M,
int n_rows,
int n_cols,
math_t* S_vec,
math_t* U,
math_t* V,
math_t PC_perc,
math_t UpS_perc,
bool use_bbt,
bool gen_left_vec,
bool gen_right_vec,
bool use_jacobi,
math_t tol,
int max_sweeps,
cudaStream_t stream)
{
detail::rsvdPerc(handle,
M,
n_rows,
n_cols,
S_vec,
U,
V,
PC_perc,
UpS_perc,
use_bbt,
gen_left_vec,
gen_right_vec,
use_jacobi,
tol,
max_sweeps,
stream);
}
/**
* @defgroup rsvd Randomized Singular Value Decomposition
* @{
*/
/**
* @brief randomized singular value decomposition (RSVD) on a column major
* rectangular matrix using QR decomposition, by specifying no. of PCs and
* upsamples directly
* @tparam ValueType value type of parameters
* @tparam IndexType index type of parameters
* @tparam UType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* U_in
* @tparam VType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* V_in
* @param[in] handle raft::resources
* @param[in] M input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] S_vec singular values raft::device_vector_view of shape (K)
* @param[in] p no. of upsamples
* @param[out] U_in std::optional left singular values of raft::device_matrix_view with layout
* raft::col_major
* @param[out] V_in std::optional right singular values of raft::device_matrix_view with layout
* raft::col_major
*/
template <typename ValueType, typename IndexType, typename UType, typename VType>
void rsvd_fixed_rank(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> M,
raft::device_vector_view<ValueType, IndexType> S_vec,
IndexType p,
UType&& U_in,
VType&& V_in)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U =
std::forward<UType>(U_in);
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V =
std::forward<VType>(V_in);
ValueType* U_ptr = nullptr;
ValueType* V_ptr = nullptr;
if (U) {
RAFT_EXPECTS(M.extent(0) == U.value().extent(0), "Number of rows in M should be equal to U");
RAFT_EXPECTS(S_vec.extent(0) == U.value().extent(1),
"Number of columns in U should be equal to length of S");
U_ptr = U.value().data_handle();
}
if (V) {
RAFT_EXPECTS(M.extent(1) == V.value().extent(1), "Number of columns in M should be equal to V");
RAFT_EXPECTS(S_vec.extent(0) == V.value().extent(0),
"Number of rows in V should be equal to length of S");
V_ptr = V.value().data_handle();
}
rsvdFixedRank(handle,
const_cast<ValueType*>(M.data_handle()),
M.extent(0),
M.extent(1),
S_vec.data_handle(),
U_ptr,
V_ptr,
S_vec.extent(0),
p,
false,
U.has_value(),
V.has_value(),
false,
static_cast<ValueType>(0),
0,
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `rsvd_fixed_rank` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
* Please see above for documentation of `rsvd_fixed_rank`.
*/
template <typename... Args, typename = std::enable_if_t<sizeof...(Args) == 4>>
void rsvd_fixed_rank(Args... args)
{
rsvd_fixed_rank(std::forward<Args>(args)..., std::nullopt, std::nullopt);
}
/**
* @brief randomized singular value decomposition (RSVD) on a column major
* rectangular matrix using symmetric Eigen decomposition, by specifying no. of PCs and
* upsamples directly. The rectangular input matrix is made square and symmetric using B @ B^T
* @tparam ValueType value type of parameters
* @tparam IndexType index type of parameters
* @tparam UType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* U_in
* @tparam VType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* V_in
* @param[in] handle raft::resources
* @param[in] M input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] S_vec singular values raft::device_vector_view of shape (K)
* @param[in] p no. of upsamples
* @param[out] U_in std::optional left singular values of raft::device_matrix_view with layout
* raft::col_major
* @param[out] V_in std::optional right singular values of raft::device_matrix_view with layout
* raft::col_major
*/
template <typename ValueType, typename IndexType, typename UType, typename VType>
void rsvd_fixed_rank_symmetric(
raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> M,
raft::device_vector_view<ValueType, IndexType> S_vec,
IndexType p,
UType&& U_in,
VType&& V_in)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U =
std::forward<UType>(U_in);
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V =
std::forward<VType>(V_in);
ValueType* U_ptr = nullptr;
ValueType* V_ptr = nullptr;
if (U) {
U_ptr = U.value().data_handle();
RAFT_EXPECTS(M.extent(0) == U.value().extent(0), "Number of rows in M should be equal to U");
RAFT_EXPECTS(S_vec.extent(0) == U.value().extent(1),
"Number of columns in U should be equal to length of S");
}
if (V) {
V_ptr = V.value().data_handle();
RAFT_EXPECTS(M.extent(1) == V.value().extent(1), "Number of columns in M should be equal to V");
RAFT_EXPECTS(S_vec.extent(0) == V.value().extent(0),
"Number of rows in V should be equal to length of S");
}
rsvdFixedRank(handle,
const_cast<ValueType*>(M.data_handle()),
M.extent(0),
M.extent(1),
S_vec.data_handle(),
U_ptr,
V_ptr,
S_vec.extent(0),
p,
true,
U.has_value(),
V.has_value(),
false,
static_cast<ValueType>(0),
0,
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `rsvd_fixed_rank_symmetric` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
* Please see above for documentation of `rsvd_fixed_rank_symmetric`.
*/
template <typename... Args, typename = std::enable_if_t<sizeof...(Args) == 4>>
void rsvd_fixed_rank_symmetric(Args... args)
{
rsvd_fixed_rank_symmetric(std::forward<Args>(args)..., std::nullopt, std::nullopt);
}
/**
* @brief randomized singular value decomposition (RSVD) on a column major
* rectangular matrix using Jacobi method, by specifying no. of PCs and
* upsamples directly
* @tparam ValueType value type of parameters
* @tparam IndexType index type of parameters
* @tparam UType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* U_in
* @tparam VType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* V_in
* @param[in] handle raft::resources
* @param[in] M input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] S_vec singular values raft::device_vector_view of shape (K)
* @param[in] p no. of upsamples
* @param[in] tol tolerance for Jacobi-based solvers
* @param[in] max_sweeps maximum number of sweeps for Jacobi-based solvers
* @param[out] U_in std::optional left singular values of raft::device_matrix_view with layout
* raft::col_major
* @param[out] V_in std::optional right singular values of raft::device_matrix_view with layout
* raft::col_major
*/
template <typename ValueType, typename IndexType, typename UType, typename VType>
void rsvd_fixed_rank_jacobi(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> M,
raft::device_vector_view<ValueType, IndexType> S_vec,
IndexType p,
ValueType tol,
int max_sweeps,
UType&& U_in,
VType&& V_in)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U =
std::forward<UType>(U_in);
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V =
std::forward<VType>(V_in);
ValueType* U_ptr = nullptr;
ValueType* V_ptr = nullptr;
if (U) {
U_ptr = U.value().data_handle();
RAFT_EXPECTS(M.extent(0) == U.value().extent(0), "Number of rows in M should be equal to U");
RAFT_EXPECTS(S_vec.extent(0) == U.value().extent(1),
"Number of columns in U should be equal to length of S");
}
if (V) {
V_ptr = V.value().data_handle();
RAFT_EXPECTS(M.extent(1) == V.value().extent(1), "Number of columns in M should be equal to V");
RAFT_EXPECTS(S_vec.extent(0) == V.value().extent(0),
"Number of rows in V should be equal to length of S");
}
rsvdFixedRank(handle,
const_cast<ValueType*>(M.data_handle()),
M.extent(0),
M.extent(1),
S_vec.data_handle(),
U_ptr,
V_ptr,
S_vec.extent(0),
p,
false,
U.has_value(),
V.has_value(),
true,
tol,
max_sweeps,
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `rsvd_fixed_rank_jacobi` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
* Please see above for documentation of `rsvd_fixed_rank_jacobi`.
*/
template <typename... Args, typename = std::enable_if_t<sizeof...(Args) == 6>>
void rsvd_fixed_rank_jacobi(Args... args)
{
rsvd_fixed_rank_jacobi(std::forward<Args>(args)..., std::nullopt, std::nullopt);
}
/**
* @brief randomized singular value decomposition (RSVD) on a column major
* rectangular matrix using Jacobi method, by specifying no. of PCs and
* upsamples directly. The rectangular input matrix is made square and symmetric using B @ B^T
* @tparam ValueType value type of parameters
* @tparam IndexType index type of parameters
* @tparam UType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* U_in
* @tparam VType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* V_in
* @param[in] handle raft::resources
* @param[in] M input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] S_vec singular values raft::device_vector_view of shape (K)
* @param[in] p no. of upsamples
* @param[in] tol tolerance for Jacobi-based solvers
* @param[in] max_sweeps maximum number of sweeps for Jacobi-based solvers
* @param[out] U_in std::optional left singular values of raft::device_matrix_view with layout
* raft::col_major
* @param[out] V_in std::optional right singular values of raft::device_matrix_view with layout
* raft::col_major
*/
template <typename ValueType, typename IndexType, typename UType, typename VType>
void rsvd_fixed_rank_symmetric_jacobi(
raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> M,
raft::device_vector_view<ValueType, IndexType> S_vec,
IndexType p,
ValueType tol,
int max_sweeps,
UType&& U_in,
VType&& V_in)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U =
std::forward<UType>(U_in);
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V =
std::forward<VType>(V_in);
ValueType* U_ptr = nullptr;
ValueType* V_ptr = nullptr;
if (U) {
U_ptr = U.value().data_handle();
RAFT_EXPECTS(M.extent(0) == U.value().extent(0), "Number of rows in M should be equal to U");
RAFT_EXPECTS(S_vec.extent(0) == U.value().extent(1),
"Number of columns in U should be equal to length of S");
}
if (V) {
V_ptr = V.value().data_handle();
RAFT_EXPECTS(M.extent(1) == V.value().extent(1), "Number of columns in M should be equal to V");
RAFT_EXPECTS(S_vec.extent(0) == V.value().extent(0),
"Number of rows in V should be equal to length of S");
}
rsvdFixedRank(handle,
const_cast<ValueType*>(M.data_handle()),
M.extent(0),
M.extent(1),
S_vec.data_handle(),
U_ptr,
V_ptr,
S_vec.extent(0),
p,
true,
U.has_value(),
V.has_value(),
true,
tol,
max_sweeps,
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `rsvd_fixed_rank_symmetric_jacobi` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
* Please see above for documentation of `rsvd_fixed_rank_symmetric_jacobi`.
*/
template <typename... Args, typename = std::enable_if_t<sizeof...(Args) == 6>>
void rsvd_fixed_rank_symmetric_jacobi(Args... args)
{
rsvd_fixed_rank_symmetric_jacobi(std::forward<Args>(args)..., std::nullopt, std::nullopt);
}
/**
* @brief randomized singular value decomposition (RSVD) on a column major
* rectangular matrix using QR decomposition, by specifying the PC and upsampling
* ratio
* @tparam ValueType value type of parameters
* @tparam IndexType index type of parameters
* @tparam UType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* U_in
* @tparam VType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* V_in
* @param[in] handle raft::resources
* @param[in] M input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] S_vec singular values raft::device_vector_view of shape (K)
* @param[in] PC_perc percentage of singular values to be computed
* @param[in] UpS_perc upsampling percentage
* @param[out] U_in std::optional left singular values of raft::device_matrix_view with layout
* raft::col_major
* @param[out] V_in std::optional right singular values of raft::device_matrix_view with layout
* raft::col_major
*/
template <typename ValueType, typename IndexType, typename UType, typename VType>
void rsvd_perc(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> M,
raft::device_vector_view<ValueType, IndexType> S_vec,
ValueType PC_perc,
ValueType UpS_perc,
UType&& U_in,
VType&& V_in)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U =
std::forward<UType>(U_in);
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V =
std::forward<VType>(V_in);
ValueType* U_ptr = nullptr;
ValueType* V_ptr = nullptr;
if (U) {
U_ptr = U.value().data_handle();
RAFT_EXPECTS(M.extent(0) == U.value().extent(0), "Number of rows in M should be equal to U");
RAFT_EXPECTS(S_vec.extent(0) == U.value().extent(1),
"Number of columns in U should be equal to length of S");
}
if (V) {
V_ptr = V.value().data_handle();
RAFT_EXPECTS(M.extent(1) == V.value().extent(1), "Number of columns in M should be equal to V");
RAFT_EXPECTS(S_vec.extent(0) == V.value().extent(0),
"Number of rows in V should be equal to length of S");
}
rsvdPerc(handle,
const_cast<ValueType*>(M.data_handle()),
M.extent(0),
M.extent(1),
S_vec.data_handle(),
U_ptr,
V_ptr,
PC_perc,
UpS_perc,
false,
U.has_value(),
V.has_value(),
false,
static_cast<ValueType>(0),
0,
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `rsvd_perc` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
* Please see above for documentation of `rsvd_perc`.
*/
template <typename... Args, typename = std::enable_if_t<sizeof...(Args) == 5>>
void rsvd_perc(Args... args)
{
rsvd_perc(std::forward<Args>(args)..., std::nullopt, std::nullopt);
}
/**
* @brief randomized singular value decomposition (RSVD) on a column major
* rectangular matrix using symmetric Eigen decomposition, by specifying the PC and upsampling
* ratio. The rectangular input matrix is made square and symmetric using B @ B^T
* @tparam ValueType value type of parameters
* @tparam IndexType index type of parameters
* @tparam UType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* U_in
* @tparam VType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* V_in
* @param[in] handle raft::resources
* @param[in] M input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] S_vec singular values raft::device_vector_view of shape (K)
* @param[in] PC_perc percentage of singular values to be computed
* @param[in] UpS_perc upsampling percentage
* @param[out] U_in std::optional left singular values of raft::device_matrix_view with layout
* raft::col_major
* @param[out] V_in std::optional right singular values of raft::device_matrix_view with layout
* raft::col_major
*/
template <typename ValueType, typename IndexType, typename UType, typename VType>
void rsvd_perc_symmetric(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> M,
raft::device_vector_view<ValueType, IndexType> S_vec,
ValueType PC_perc,
ValueType UpS_perc,
UType&& U_in,
VType&& V_in)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U =
std::forward<UType>(U_in);
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V =
std::forward<VType>(V_in);
ValueType* U_ptr = nullptr;
ValueType* V_ptr = nullptr;
if (U) {
U_ptr = U.value().data_handle();
RAFT_EXPECTS(M.extent(0) == U.value().extent(0), "Number of rows in M should be equal to U");
RAFT_EXPECTS(S_vec.extent(0) == U.value().extent(1),
"Number of columns in U should be equal to length of S");
}
if (V) {
V_ptr = V.value().data_handle();
RAFT_EXPECTS(M.extent(1) == V.value().extent(1), "Number of columns in M should be equal to V");
RAFT_EXPECTS(S_vec.extent(0) == V.value().extent(0),
"Number of rows in V should be equal to length of S");
}
rsvdPerc(handle,
const_cast<ValueType*>(M.data_handle()),
M.extent(0),
M.extent(1),
S_vec.data_handle(),
U_ptr,
V_ptr,
PC_perc,
UpS_perc,
true,
U.has_value(),
V.has_value(),
false,
static_cast<ValueType>(0),
0,
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `rsvd_perc_symmetric` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
* Please see above for documentation of `rsvd_perc_symmetric`.
*/
template <typename... Args, typename = std::enable_if_t<sizeof...(Args) == 5>>
void rsvd_perc_symmetric(Args... args)
{
rsvd_perc_symmetric(std::forward<Args>(args)..., std::nullopt, std::nullopt);
}
/**
* @brief randomized singular value decomposition (RSVD) on a column major
* rectangular matrix using Jacobi method, by specifying the PC and upsampling
* ratio
* @tparam ValueType value type of parameters
* @tparam IndexType index type of parameters
* @tparam UType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* U_in
* @tparam VType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* V_in
* @param[in] handle raft::resources
* @param[in] M input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] S_vec singular values raft::device_vector_view of shape (K)
* @param[in] PC_perc percentage of singular values to be computed
* @param[in] UpS_perc upsampling percentage
* @param[in] tol tolerance for Jacobi-based solvers
* @param[in] max_sweeps maximum number of sweeps for Jacobi-based solvers
* @param[out] U_in std::optional left singular values of raft::device_matrix_view with layout
* raft::col_major
* @param[out] V_in std::optional right singular values of raft::device_matrix_view with layout
* raft::col_major
*/
template <typename ValueType, typename IndexType, typename UType, typename VType>
void rsvd_perc_jacobi(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> M,
raft::device_vector_view<ValueType, IndexType> S_vec,
ValueType PC_perc,
ValueType UpS_perc,
ValueType tol,
int max_sweeps,
UType&& U_in,
VType&& V_in)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U =
std::forward<UType>(U_in);
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V =
std::forward<VType>(V_in);
ValueType* U_ptr = nullptr;
ValueType* V_ptr = nullptr;
if (U) {
U_ptr = U.value().data_handle();
RAFT_EXPECTS(M.extent(0) == U.value().extent(0), "Number of rows in M should be equal to U");
RAFT_EXPECTS(S_vec.extent(0) == U.value().extent(1),
"Number of columns in U should be equal to length of S");
}
if (V) {
V_ptr = V.value().data_handle();
RAFT_EXPECTS(M.extent(1) == V.value().extent(1), "Number of columns in M should be equal to V");
RAFT_EXPECTS(S_vec.extent(0) == V.value().extent(0),
"Number of rows in V should be equal to length of S");
}
rsvdPerc(handle,
const_cast<ValueType*>(M.data_handle()),
M.extent(0),
M.extent(1),
S_vec.data_handle(),
U_ptr,
V_ptr,
PC_perc,
UpS_perc,
false,
U.has_value(),
V.has_value(),
true,
tol,
max_sweeps,
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `rsvd_perc_jacobi` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
* Please see above for documentation of `rsvd_perc_jacobi`.
*/
template <typename... Args, typename = std::enable_if_t<sizeof...(Args) == 7>>
void rsvd_perc_jacobi(Args... args)
{
rsvd_perc_jacobi(std::forward<Args>(args)..., std::nullopt, std::nullopt);
}
/**
* @brief randomized singular value decomposition (RSVD) on a column major
* rectangular matrix using Jacobi method, by specifying the PC and upsampling
* ratio. The rectangular input matrix is made square and symmetric using B @ B^T
* @tparam ValueType value type of parameters
* @tparam IndexType index type of parameters
* @tparam UType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* U_in
* @tparam VType std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> @c
* V_in
* @param[in] handle raft::resources
* @param[in] M input raft::device_matrix_view with layout raft::col_major of shape (M, N)
* @param[out] S_vec singular values raft::device_vector_view of shape (K)
* @param[in] PC_perc percentage of singular values to be computed
* @param[in] UpS_perc upsampling percentage
* @param[in] tol tolerance for Jacobi-based solvers
* @param[in] max_sweeps maximum number of sweeps for Jacobi-based solvers
* @param[out] U_in std::optional left singular values of raft::device_matrix_view with layout
* raft::col_major
* @param[out] V_in std::optional right singular values of raft::device_matrix_view with layout
* raft::col_major
*/
template <typename ValueType, typename IndexType, typename UType, typename VType>
void rsvd_perc_symmetric_jacobi(
raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> M,
raft::device_vector_view<ValueType, IndexType> S_vec,
ValueType PC_perc,
ValueType UpS_perc,
ValueType tol,
int max_sweeps,
UType&& U_in,
VType&& V_in)
{
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> U =
std::forward<UType>(U_in);
std::optional<raft::device_matrix_view<ValueType, IndexType, raft::col_major>> V =
std::forward<VType>(V_in);
ValueType* U_ptr = nullptr;
ValueType* V_ptr = nullptr;
if (U) {
U_ptr = U.value().data_handle();
RAFT_EXPECTS(M.extent(0) == U.value().extent(0), "Number of rows in M should be equal to U");
RAFT_EXPECTS(S_vec.extent(0) == U.value().extent(1),
"Number of columns in U should be equal to length of S");
}
if (V) {
V_ptr = V.value().data_handle();
RAFT_EXPECTS(M.extent(1) == V.value().extent(1), "Number of columns in M should be equal to V");
RAFT_EXPECTS(S_vec.extent(0) == V.value().extent(0),
"Number of rows in V should be equal to length of S");
}
rsvdPerc(handle,
const_cast<ValueType*>(M.data_handle()),
M.extent(0),
M.extent(1),
S_vec.data_handle(),
U_ptr,
V_ptr,
PC_perc,
UpS_perc,
true,
U.has_value(),
V.has_value(),
true,
tol,
max_sweeps,
resource::get_cuda_stream(handle));
}
/**
* @brief Overload of `rsvd_perc_symmetric_jacobi` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for one or both of the optional arguments.
*
* Please see above for documentation of `rsvd_perc_symmetric_jacobi`.
*/
template <typename... Args, typename = std::enable_if_t<sizeof...(Args) == 7>>
void rsvd_perc_symmetric_jacobi(Args... args)
{
rsvd_perc_symmetric_jacobi(std::forward<Args>(args)..., std::nullopt, std::nullopt);
}
/**
* @brief randomized singular value decomposition (RSVD) using cusolver
* @tparam math_t the data type
* @tparam idx_t index type
* @param[in] handle: raft handle
* @param[in] in: input matrix in col-major format.
* Warning: the content of this matrix is modified by the cuSOLVER routines.
* [dim = n_rows * n_cols]
* @param[out] S: array of singular values of input matrix. The rank k must be less than
* min(m,n). [dim = k]
* @param[out] U: optional left singular values of input matrix. Use std::nullopt to not
* generate it. [dim = n_rows * k]
* @param[out] V: optional right singular values of input matrix. Use std::nullopt to not
* generate it. [dim = k * n_cols]
* @param[in] p: Oversampling. The size of the subspace will be (k + p). (k+p) is less than
* min(m,n). (Recommended to be at least 2*k)
* @param[in] niters: Number of iteration of power method. (2 is recommended)
*/
template <typename math_t, typename idx_t>
void randomized_svd(const raft::resources& handle,
raft::device_matrix_view<const math_t, idx_t, raft::col_major> in,
raft::device_vector_view<math_t, idx_t> S,
std::optional<raft::device_matrix_view<math_t, idx_t, raft::col_major>> U,
std::optional<raft::device_matrix_view<math_t, idx_t, raft::col_major>> V,
std::size_t p,
std::size_t niters)
{
auto k = S.extent(0);
math_t* left_sing_vecs_ptr = nullptr;
math_t* right_sing_vecs_ptr = nullptr;
auto gen_U = U.has_value();
auto gen_V = V.has_value();
if (gen_U) {
RAFT_EXPECTS(in.extent(0) == U.value().extent(0) && k == U.value().extent(1),
"U should have dimensions n_rows * k");
left_sing_vecs_ptr = U.value().data_handle();
}
if (gen_V) {
RAFT_EXPECTS(k == V.value().extent(0) && in.extent(1) == V.value().extent(1),
"V should have dimensions k * n_cols");
right_sing_vecs_ptr = V.value().data_handle();
}
detail::randomized_svd(handle,
in.data_handle(),
in.extent(0),
in.extent(1),
k,
p,
niters,
S.data_handle(),
left_sing_vecs_ptr,
right_sing_vecs_ptr,
gen_U,
gen_V);
}
/**
* @brief Overload of `randomized_svd` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for the optional arguments.
*
* Please see above for documentation of `randomized_svd`.
*/
template <typename math_t, typename idx_t, typename opt_u_vec_t, typename opt_v_vec_t>
void randomized_svd(const raft::resources& handle,
raft::device_matrix_view<const math_t, idx_t, raft::col_major> in,
raft::device_vector_view<math_t, idx_t> S,
opt_u_vec_t&& U,
opt_v_vec_t&& V,
std::size_t p,
std::size_t niters)
{
std::optional<raft::device_matrix_view<math_t, idx_t, raft::col_major>> opt_u =
std::forward<opt_u_vec_t>(U);
std::optional<raft::device_matrix_view<math_t, idx_t, raft::col_major>> opt_v =
std::forward<opt_v_vec_t>(V);
randomized_svd(handle, in, S, opt_u, opt_v, p, niters);
}
/** @} */ // end of group rsvd
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/coalesced_reduction.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __COALESCED_REDUCTION_H
#define __COALESCED_REDUCTION_H
#pragma once
#include "detail/coalesced_reduction.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resources.hpp>
namespace raft {
namespace linalg {
/**
* @brief Compute reduction of the input matrix along the leading dimension
*
* @tparam InType the data type of the input
* @tparam OutType the data type of the output (as well as the data type for
* which reduction is performed)
* @tparam IdxType data type of the indices of the array
* @tparam MainLambda Unary lambda applied while acculumation (eg: L1 or L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*MainLambda)(InType, IdxType);</pre>
* @tparam ReduceLambda Binary lambda applied for reduction (eg: addition(+) for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*ReduceLambda)(OutType);</pre>
* @tparam FinalLambda the final lambda applied before STG (eg: Sqrt for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*FinalLambda)(OutType);</pre>
* @param dots the output reduction vector
* @param data the input matrix
* @param D leading dimension of data
* @param N second dimension data
* @param init initial value to use for the reduction
* @param main_op elementwise operation to apply before reduction
* @param reduce_op binary reduction operation
* @param final_op elementwise operation to apply before storing results
* @param inplace reduction result added inplace or overwrites old values?
* @param stream cuda stream where to launch work
*/
template <typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void coalescedReduction(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
detail::coalescedReduction<InType, OutType, IdxType>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
}
/**
* @defgroup coalesced_reduction Coalesced Memory Access Reductions
* For reducing along rows for col-major and along columns for row-major
* @{
*/
/**
* @brief Compute reduction of the input matrix along the leading dimension
* This API is to be used when the desired reduction is along the dimension
* of the memory layout. For example, a row-major matrix will be reduced
* along the columns whereas a column-major matrix will be reduced along
* the rows.
*
* @tparam InValueType the input data-type of underlying raft::matrix_view
* @tparam LayoutPolicy The layout of Input/Output (row or col major)
* @tparam OutValueType the output data-type of underlying raft::matrix_view and reduction
* @tparam IndexType Integer type used to for addressing
* @tparam MainLambda Unary lambda applied while acculumation (eg: L1 or L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*MainLambda)(InType, IdxType);</pre>
* @tparam ReduceLambda Binary lambda applied for reduction (eg: addition(+) for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*ReduceLambda)(OutType);</pre>
* @tparam FinalLambda the final lambda applied before STG (eg: Sqrt for L2 norm)
* It must be a 'callable' supporting the following input and output:
* <pre>OutType (*FinalLambda)(OutType);</pre>
* @param handle raft::resources
* @param[in] data Input of type raft::device_matrix_view
* @param[out] dots Output of type raft::device_matrix_view
* @param[in] init initial value to use for the reduction
* @param[in] inplace reduction result added inplace or overwrites old values?
* @param[in] main_op fused elementwise operation to apply before reduction
* @param[in] reduce_op fused binary reduction operation
* @param[in] final_op fused elementwise operation to apply before storing results
*/
template <typename InValueType,
typename LayoutPolicy,
typename OutValueType,
typename IdxType,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void coalesced_reduction(raft::resources const& handle,
raft::device_matrix_view<const InValueType, IdxType, LayoutPolicy> data,
raft::device_vector_view<OutValueType, IdxType> dots,
OutValueType init,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
if constexpr (std::is_same_v<LayoutPolicy, raft::row_major>) {
RAFT_EXPECTS(static_cast<IdxType>(dots.size()) == data.extent(0),
"Output should be equal to number of rows in Input");
coalescedReduction(dots.data_handle(),
data.data_handle(),
data.extent(1),
data.extent(0),
init,
resource::get_cuda_stream(handle),
inplace,
main_op,
reduce_op,
final_op);
} else if constexpr (std::is_same_v<LayoutPolicy, raft::col_major>) {
RAFT_EXPECTS(static_cast<IdxType>(dots.size()) == data.extent(1),
"Output should be equal to number of columns in Input");
coalescedReduction(dots.data_handle(),
data.data_handle(),
data.extent(0),
data.extent(1),
init,
resource::get_cuda_stream(handle),
inplace,
main_op,
reduce_op,
final_op);
}
}
/** @} */ // end of group coalesced_reduction
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/norm.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __NORM_H
#define __NORM_H
#pragma once
#include "detail/norm.cuh"
#include "linalg_types.hpp"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/operators.hpp>
#include <raft/linalg/norm_types.hpp>
#include <raft/util/input_validation.hpp>
namespace raft {
namespace linalg {
/**
* @brief Compute row-wise norm of the input matrix and perform fin_op lambda
*
* Row-wise norm is useful while computing pairwise distance matrix, for
* example.
* This is used in many clustering algos like knn, kmeans, dbscan, etc...
*
* @tparam Type the data type
* @tparam Lambda device final lambda
* @tparam IdxType Integer type used to for addressing
* @param dots the output vector of row-wise dot products
* @param data the input matrix
* @param D number of columns of data
* @param N number of rows of data
* @param type the type of norm to be applied
* @param rowMajor whether the input is row-major or not
* @param stream cuda stream where to launch work
* @param fin_op the final lambda op
*/
template <typename Type, typename IdxType = int, typename Lambda = raft::identity_op>
void rowNorm(Type* dots,
const Type* data,
IdxType D,
IdxType N,
NormType type,
bool rowMajor,
cudaStream_t stream,
Lambda fin_op = raft::identity_op())
{
detail::rowNormCaller(dots, data, D, N, type, rowMajor, stream, fin_op);
}
/**
* @brief Compute column-wise norm of the input matrix and perform fin_op
* @tparam Type the data type
* @tparam Lambda device final lambda
* @tparam IdxType Integer type used to for addressing
* @param dots the output vector of column-wise dot products
* @param data the input matrix
* @param D number of columns of data
* @param N number of rows of data
* @param type the type of norm to be applied
* @param rowMajor whether the input is row-major or not
* @param stream cuda stream where to launch work
* @param fin_op the final lambda op
*/
template <typename Type, typename IdxType = int, typename Lambda = raft::identity_op>
void colNorm(Type* dots,
const Type* data,
IdxType D,
IdxType N,
NormType type,
bool rowMajor,
cudaStream_t stream,
Lambda fin_op = raft::identity_op())
{
detail::colNormCaller(dots, data, D, N, type, rowMajor, stream, fin_op);
}
/**
* @defgroup norm Row- or Col-norm computation
* @{
*/
/**
* @brief Compute norm of the input matrix and perform fin_op
* @tparam ElementType Input/Output data type
* @tparam LayoutPolicy the layout of input (raft::row_major or raft::col_major)
* @tparam IdxType Integer type used to for addressing
* @tparam Lambda device final lambda
* @param[in] handle raft::resources
* @param[in] in the input raft::device_matrix_view
* @param[out] out the output raft::device_vector_view
* @param[in] type the type of norm to be applied
* @param[in] apply Whether to apply the norm along rows (raft::linalg::Apply::ALONG_ROWS)
or along columns (raft::linalg::Apply::ALONG_COLUMNS)
* @param[in] fin_op the final lambda op
*/
template <typename ElementType,
typename LayoutPolicy,
typename IndexType,
typename Lambda = raft::identity_op>
void norm(raft::resources const& handle,
raft::device_matrix_view<const ElementType, IndexType, LayoutPolicy> in,
raft::device_vector_view<ElementType, IndexType> out,
NormType type,
Apply apply,
Lambda fin_op = raft::identity_op())
{
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input must be contiguous");
auto constexpr row_major = std::is_same_v<LayoutPolicy, raft::row_major>;
auto along_rows = apply == Apply::ALONG_ROWS;
if (along_rows) {
RAFT_EXPECTS(static_cast<IndexType>(out.size()) == in.extent(0),
"Output should be equal to number of rows in Input");
rowNorm(out.data_handle(),
in.data_handle(),
in.extent(1),
in.extent(0),
type,
row_major,
resource::get_cuda_stream(handle),
fin_op);
} else {
RAFT_EXPECTS(static_cast<IndexType>(out.size()) == in.extent(1),
"Output should be equal to number of columns in Input");
colNorm(out.data_handle(),
in.data_handle(),
in.extent(1),
in.extent(0),
type,
row_major,
resource::get_cuda_stream(handle),
fin_op);
}
}
/** @} */
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/gemm.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __GEMM_H
#define __GEMM_H
#pragma once
#include "detail/gemm.hpp"
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/input_validation.hpp>
namespace raft {
namespace linalg {
/**
* @brief the wrapper of cublas gemm function
* It computes the following equation: C = alpha .* opA(A) * opB(B) + beta .* C
*
* @tparam math_t the element type
* @tparam DevicePointerMode whether pointers alpha, beta point to device memory
* @param [in] handle raft handle
* @param [in] trans_a cublas transpose op for A
* @param [in] trans_b cublas transpose op for B
* @param [in] m number of rows of C
* @param [in] n number of columns of C
* @param [in] k number of rows of opB(B) / number of columns of opA(A)
* @param [in] alpha host or device scalar
* @param [in] A such a matrix that the shape of column-major opA(A) is [m, k]
* @param [in] lda leading dimension of A
* @param [in] B such a matrix that the shape of column-major opA(B) is [k, n]
* @param [in] ldb leading dimension of B
* @param [in] beta host or device scalar
* @param [inout] C column-major matrix of size [m, n]
* @param [in] ldc leading dimension of C
* @param [in] stream
*/
template <typename math_t, bool DevicePointerMode = false>
void gemm(raft::resources const& handle,
const bool trans_a,
const bool trans_b,
const int m,
const int n,
const int k,
const math_t* alpha,
const math_t* A,
const int lda,
const math_t* B,
const int ldb,
const math_t* beta,
math_t* C,
const int ldc,
cudaStream_t stream)
{
detail::gemm<math_t, DevicePointerMode>(
handle, trans_a, trans_b, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, stream);
}
/**
* @brief the wrapper of cublas gemm function
* It computes the following equation: D = alpha . opA(A) * opB(B) + beta . C
* @tparam math_t the type of input/output matrices
* @param handle raft handle
* @param a input matrix
* @param n_rows_a number of rows of A
* @param n_cols_a number of columns of A
* @param b input matrix
* @param c output matrix
* @param n_rows_c number of rows of C
* @param n_cols_c number of columns of C
* @param trans_a cublas transpose op for A
* @param trans_b cublas transpose op for B
* @param alpha scalar
* @param beta scalar
* @param stream cuda stream
*/
template <typename math_t>
void gemm(raft::resources const& handle,
const math_t* a,
int n_rows_a,
int n_cols_a,
const math_t* b,
math_t* c,
int n_rows_c,
int n_cols_c,
cublasOperation_t trans_a,
cublasOperation_t trans_b,
math_t alpha,
math_t beta,
cudaStream_t stream)
{
detail::gemm(
handle, a, n_rows_a, n_cols_a, b, c, n_rows_c, n_cols_c, trans_a, trans_b, alpha, beta, stream);
}
/**
* @brief the wrapper of cublas gemm function
* It computes the following equation: D = alpha . opA(A) * opB(B) + beta . C
* @tparam math_t the type of input/output matrices
* @param handle raft handle
* @param a input matrix
* @param n_rows_a number of rows of A
* @param n_cols_a number of columns of A
* @param b input matrix
* @param c output matrix
* @param n_rows_c number of rows of C
* @param n_cols_c number of columns of C
* @param trans_a cublas transpose op for A
* @param trans_b cublas transpose op for B
* @param stream cuda stream
*/
template <typename math_t>
void gemm(raft::resources const& handle,
const math_t* a,
int n_rows_a,
int n_cols_a,
const math_t* b,
math_t* c,
int n_rows_c,
int n_cols_c,
cublasOperation_t trans_a,
cublasOperation_t trans_b,
cudaStream_t stream)
{
detail::gemm(handle, a, n_rows_a, n_cols_a, b, c, n_rows_c, n_cols_c, trans_a, trans_b, stream);
}
/**
* @brief A wrapper for CUBLS GEMM function designed for handling all possible
* combinations of operand layouts.
* It computes the following equation: Z = alpha . X * Y + beta . Z
* @tparam T Data type of input/output matrices (float/double)
* @param handle raft handle
* @param z output matrix of size M rows x N columns
* @param x input matrix of size M rows x K columns
* @param y input matrix of size K rows x N columns
* @param _M number of rows of X and Z
* @param _N number of columns of Y and columns of Z
* @param _K number of columns of X and rows of Y
* @param isZColMajor Storage layout of Z. true = col major, false = row major
* @param isXColMajor Storage layout of X. true = col major, false = row major
* @param isYColMajor Storage layout of Y. true = col major, false = row major
* @param stream cuda stream
* @param alpha scalar
* @param beta scalar
*/
template <typename T>
void gemm(raft::resources const& handle,
T* z,
T* x,
T* y,
int _M,
int _N,
int _K,
bool isZColMajor,
bool isXColMajor,
bool isYColMajor,
cudaStream_t stream,
T alpha = T(1.0),
T beta = T(0.0))
{
detail::gemm(
handle, z, x, y, _M, _N, _K, isZColMajor, isXColMajor, isYColMajor, stream, &alpha, &beta);
}
/**
* @defgroup gemm Matrix-Matrix Multiplication
* @{
*/
/**
* @brief GEMM function designed for handling all possible
* combinations of operand layouts (raft::row_major or raft::col_major)
* with scalars alpha and beta on the host or device
* It computes the following equation: Z = alpha . X * Y + beta . Z
* If alpha is not provided, it is assumed to be 1.0
* If beta is not provided, it is assumed to be 0.0
* @tparam ValueType Data type of input/output matrices (float/double)
* @tparam IndexType Type of index
* @tparam LayoutPolicyX layout of X
* @tparam LayoutPolicyY layout of Y
* @tparam LayoutPolicyZ layout of Z
* @param[in] handle raft handle
* @param[in] x input raft::device_matrix_view of size M rows x K columns
* @param[in] y input raft::device_matrix_view of size K rows x N columns
* @param[out] z output raft::device_matrix_view of size M rows x N columns
* @param[in] alpha optional raft::host_scalar_view or raft::device_scalar_view, default 1.0
* @param[in] beta optional raft::host_scalar_view or raft::device_scalar_view, default 0.0
*/
template <typename ValueType,
typename IndexType,
typename LayoutPolicyX,
typename LayoutPolicyY,
typename LayoutPolicyZ,
typename ScalarIdxType = std::uint32_t,
typename ScalarViewType = raft::host_scalar_view<ValueType, ScalarIdxType>,
typename = std::enable_if_t<std::disjunction_v<
std::is_same<ScalarViewType, raft::host_scalar_view<ValueType, ScalarIdxType>>,
std::is_same<ScalarViewType, raft::device_scalar_view<ValueType, ScalarIdxType>>>>>
void gemm(raft::resources const& handle,
raft::device_matrix_view<ValueType, IndexType, LayoutPolicyX> x,
raft::device_matrix_view<ValueType, IndexType, LayoutPolicyY> y,
raft::device_matrix_view<ValueType, IndexType, LayoutPolicyZ> z,
std::optional<ScalarViewType> alpha = std::nullopt,
std::optional<ScalarViewType> beta = std::nullopt)
{
RAFT_EXPECTS(raft::is_row_or_column_major(x), "X is not contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(y), "Y is not contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(z), "Z is not contiguous");
RAFT_EXPECTS(x.extent(0) == z.extent(0), "Number of rows of X and Z should be equal");
RAFT_EXPECTS(y.extent(1) == z.extent(1), "Number of columns of Y and Z should be equal");
RAFT_EXPECTS(x.extent(1) == y.extent(0), "Number of columns of X and rows of Y should be equal");
constexpr auto is_x_col_major =
std::is_same_v<typename decltype(x)::layout_type, raft::col_major>;
constexpr auto is_y_col_major =
std::is_same_v<typename decltype(y)::layout_type, raft::col_major>;
constexpr auto is_z_col_major =
std::is_same_v<typename decltype(z)::layout_type, raft::col_major>;
constexpr auto device_mode =
std::is_same_v<ScalarViewType, raft::device_scalar_view<ValueType, ScalarIdxType>>;
ValueType alpha_value = 1;
ValueType beta_value = 0;
auto alpha_device = raft::make_device_scalar(handle, alpha_value);
auto beta_device = raft::make_device_scalar(handle, beta_value);
auto alpha_host = raft::make_host_scalar(alpha_value);
auto beta_host = raft::make_host_scalar(beta_value);
if constexpr (device_mode) {
if (!alpha) { alpha = alpha_device.view(); }
if (!beta) { beta = beta_device.view(); }
} else {
if (!alpha) { alpha = alpha_host.view(); }
if (!beta) { beta = beta_host.view(); }
}
detail::gemm<ValueType, device_mode>(handle,
z.data_handle(),
x.data_handle(),
y.data_handle(),
x.extent(0),
y.extent(1),
x.extent(1),
is_z_col_major,
is_x_col_major,
is_y_col_major,
resource::get_cuda_stream(handle),
alpha.value().data_handle(),
beta.value().data_handle());
}
/** @} */ // end of gemm
} // end namespace linalg
} // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/linalg_types.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft::linalg {
/**
* @brief Enum for reduction/broadcast where an operation is to be performed along
* a matrix's rows or columns
*
*/
enum class Apply { ALONG_ROWS, ALONG_COLUMNS };
/**
* @brief Enum for reduction/broadcast where an operation is to be performed along
* a matrix's rows or columns
*
*/
enum class FillMode { UPPER, LOWER };
} // end namespace raft::linalg | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/cholesky_r1_update.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CHOLESKY_R1_UPDATE_H
#define __CHOLESKY_R1_UPDATE_H
#pragma once
#include "detail/cholesky_r1_update.cuh"
#include <raft/core/resource/cublas_handle.hpp>
namespace raft {
namespace linalg {
/**
* @brief Rank 1 update of Cholesky decomposition.
* NOTE: The new mdspan-based API will not be provided for this function.
*
* This method is useful if an algorithm iteratively builds up matrix A, and
* the Cholesky decomposition of A is required at each step.
*
* On entry, L is the Cholesky decomposition of matrix A, where both A and L
* have size n-1 x n-1. We are interested in the Cholesky decomposition of a new
* matrix A', which we get by adding a row and column to A. In Python notation:
* - A'[0:n-1, 0:n-1] = A;
* - A'[:,n-1] = A[n-1,:] = A_new
*
* On entry, the new column A_new, is stored as the n-th column of L if uplo ==
* CUBLAS_FILL_MODE_UPPER, else A_new is stored as the n-th row of L.
*
* On exit L contains the Cholesky decomposition of A'. In practice the elements
* of A_new are overwritten with new row/column of the L matrix.
*
* The uplo parameter is used to select the matrix layout.
* If (uplo != CUBLAS_FILL_MODE_UPPER) then the input arg L stores the
* lower triangular matrix L, so that A = L * L.T. Otherwise the input arg L
* stores an upper triangular matrix U: A = U.T * U.
*
* On exit L will be updated to store the Cholesky decomposition of A'.
*
* If the matrix is not positive definite, or very ill conditioned then the new
* diagonal element of L would be NaN. In such a case an exception is thrown.
* The eps argument can be used to override this behavior: if eps >= 0 then
* the diagonal element is replaced by eps in case the diagonal is NaN or
* smaller than eps. Note: for an iterative solver it is probably better to
* stop early in case of error, rather than relying on the eps parameter.
*
* Examples:
*
* - Lower triangular factorization:
* @code{.cpp}
* // Initialize arrays
* int ld_L = n_rows;
* rmm::device_uvector<math_t> L(ld_L * n_rows, stream);
* raft::linalg::choleskyRank1Update(handle, L, n_rows, ld_L, nullptr,
* &n_bytes, CUBLAS_FILL_MODE_LOWER,
* stream);
* rmm::device_uvector<char> workspace(n_bytes, stream);
*
* for (n=1; n<=n_rows; rank++) {
* // Calculate a new row/column of matrix A into A_new
* // ...
* // Copy new row to L[rank-1,:]
* RAFT_CUBLAS_TRY(cublasCopy(resource::get_cublas_handle(handle), n - 1, A_new, 1,
* L + n - 1, ld_L, stream));
* // Update Cholesky factorization
* raft::linalg::choleskyRank1Update(
* handle, L, rank, ld_L, workspace, &n_bytes, CUBLAS_FILL_MODE_LOWER,
* stream);
* }
* Now L stores the Cholesky decomposition of A: A = L * L.T
* @endcode
*
* - Upper triangular factorization:
* @code{.cpp}
* // Initialize arrays
* int ld_U = n_rows;
* rmm::device_uvector<math_t> U(ld_U * n_rows, stream);
* raft::linalg::choleskyRank1Update(handle, L, n_rows, ld_U, nullptr,
* &n_bytes, CUBLAS_FILL_MODE_UPPER,
* stream);
* rmm::device_uvector<char> workspace(stream, n_bytes, stream);
*
* for (n=1; n<=n_rows; n++) {
* // Calculate a new row/column of matrix A into array A_new
* // ...
* // Copy new row to U[:,n-1] (column major layout)
* raft::copy(U + ld_U * (n-1), A_new, n-1, stream);
* //
* // Update Cholesky factorization
* raft::linalg::choleskyRank1Update(
* handle, U, n, ld_U, workspace, &n_bytes, CUBLAS_FILL_MODE_UPPER,
* stream);
* }
* // Now U stores the Cholesky decomposition of A: A = U.T * U
* @endcode
*
* @param handle RAFT handle (used to retrieve cuBLAS handles).
* @param L device array for to store the triangular matrix L, and the new
* column of A in column major format, size [n*n]
* @param n number of elements in the new row.
* @param ld stride of columns in L
* @param workspace device pointer to workspace shall be nullptr ar an array
* of size [n_bytes].
* @param n_bytes size of workspace is returned here if workspace==nullptr.
* @param stream CUDA stream
* @param uplo indicates whether L is stored as an upper or lower triangular
* matrix (CUBLAS_FILL_MODE_UPPER or CUBLAS_FILL_MODE_LOWER)
* @param eps numerical parameter that can act as a regularizer for ill
* conditioned systems. Negative values mean no regularizaton.
*/
template <typename math_t>
void choleskyRank1Update(raft::resources const& handle,
math_t* L,
int n,
int ld,
void* workspace,
int* n_bytes,
cublasFillMode_t uplo,
cudaStream_t stream,
math_t eps = -1)
{
detail::choleskyRank1Update(handle, L, n, ld, workspace, n_bytes, uplo, stream, eps);
}
}; // namespace linalg
}; // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/normalize.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/normalize.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/operators.hpp>
#include <raft/linalg/norm_types.hpp>
namespace raft {
namespace linalg {
/**
* @defgroup normalize Row- or Col-norm computation
* @{
*/
/**
* @brief Divide rows by their norm defined by main_op, reduce_op and fin_op
*
* @tparam ElementType Input/Output data type
* @tparam IndexType Integer type used to for addressing
* @tparam MainLambda Type of main_op
* @tparam ReduceLambda Type of reduce_op
* @tparam FinalLambda Type of fin_op
* @param[in] handle raft::resources
* @param[in] in the input raft::device_matrix_view
* @param[out] out the output raft::device_matrix_view
* @param[in] init Initialization value, i.e identity element for the reduction operation
* @param[in] main_op Operation to apply to the elements before reducing them (e.g square for L2)
* @param[in] reduce_op Operation to reduce a pair of elements (e.g sum for L2)
* @param[in] fin_op Operation to apply once to the reduction result to finalize the norm
* computation (e.g sqrt for L2)
* @param[in] eps If the norm is below eps, the row is considered zero and no division is applied
*/
template <typename ElementType,
typename IndexType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
void row_normalize(raft::resources const& handle,
raft::device_matrix_view<const ElementType, IndexType, row_major> in,
raft::device_matrix_view<ElementType, IndexType, row_major> out,
ElementType init,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda fin_op,
ElementType eps = ElementType(1e-8))
{
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(in.extent(0) == out.extent(0),
"The number of rows of the input and output should be equal");
RAFT_EXPECTS(in.extent(1) == out.extent(1),
"The number of columns of the input and output should be equal");
detail::coalesced_normalize(out.data_handle(),
in.data_handle(),
in.extent(1),
in.extent(0),
init,
resource::get_cuda_stream(handle),
main_op,
reduce_op,
fin_op,
eps);
}
/**
* @brief Divide rows by their norm.
*
* @tparam ElementType Input/Output data type
* @tparam IndexType Integer type used to for addressing
* @param[in] handle raft::resources
* @param[in] in the input raft::device_matrix_view
* @param[out] out the output raft::device_matrix_view
* @param[in] norm_type the type of norm to be applied
* @param[in] eps If the norm is below eps, the row is considered zero and no division is applied
*/
template <typename ElementType, typename IndexType>
void row_normalize(raft::resources const& handle,
raft::device_matrix_view<const ElementType, IndexType, row_major> in,
raft::device_matrix_view<ElementType, IndexType, row_major> out,
NormType norm_type,
ElementType eps = ElementType(1e-8))
{
switch (norm_type) {
case L1Norm:
row_normalize(
handle, in, out, ElementType(0), raft::abs_op(), raft::add_op(), raft::identity_op(), eps);
break;
case L2Norm:
row_normalize(
handle, in, out, ElementType(0), raft::sq_op(), raft::add_op(), raft::sqrt_op(), eps);
break;
case LinfNorm:
row_normalize(
handle, in, out, ElementType(0), raft::abs_op(), raft::max_op(), raft::identity_op(), eps);
break;
default: THROW("Unsupported norm type: %d", norm_type);
}
}
/** @} */
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/qr.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __QR_H
#define __QR_H
#pragma once
#include "detail/qr.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
namespace raft {
namespace linalg {
/**
* @brief compute QR decomp and return only Q matrix
* @param handle: raft handle
* @param M: input matrix
* @param Q: Q matrix to be returned (on GPU)
* @param n_rows: number rows of input matrix
* @param n_cols: number columns of input matrix
* @param stream cuda stream
*/
template <typename math_t>
void qrGetQ(raft::resources const& handle,
const math_t* M,
math_t* Q,
int n_rows,
int n_cols,
cudaStream_t stream)
{
detail::qrGetQ(handle, M, Q, n_rows, n_cols, stream);
}
/**
* @brief compute QR decomp and return both Q and R matrices
* @param handle: raft handle
* @param M: input matrix
* @param Q: Q matrix to be returned (on GPU)
* @param R: R matrix to be returned (on GPU)
* @param n_rows: number rows of input matrix
* @param n_cols: number columns of input matrix
* @param stream cuda stream
*/
template <typename math_t>
void qrGetQR(raft::resources const& handle,
math_t* M,
math_t* Q,
math_t* R,
int n_rows,
int n_cols,
cudaStream_t stream)
{
detail::qrGetQR(handle, M, Q, R, n_rows, n_cols, stream);
}
/**
* @defgroup qr QR Decomposition
* @{
*/
/**
* @brief Compute the QR decomposition of matrix M and return only the Q matrix.
* @param[in] handle raft::resources
* @param[in] M Input raft::device_matrix_view
* @param[out] Q Output raft::device_matrix_view
*/
template <typename ElementType, typename IndexType>
void qr_get_q(raft::resources const& handle,
raft::device_matrix_view<const ElementType, IndexType, raft::col_major> M,
raft::device_matrix_view<ElementType, IndexType, raft::col_major> Q)
{
RAFT_EXPECTS(Q.size() == M.size(), "Size mismatch between Output and Input");
qrGetQ(handle,
M.data_handle(),
Q.data_handle(),
M.extent(0),
M.extent(1),
resource::get_cuda_stream(handle));
}
/**
* @brief Compute the QR decomposition of matrix M and return both the Q and R matrices.
* @param[in] handle raft::resources
* @param[in] M Input raft::device_matrix_view
* @param[in] Q Output raft::device_matrix_view
* @param[out] R Output raft::device_matrix_view
*/
template <typename ElementType, typename IndexType>
void qr_get_qr(raft::resources const& handle,
raft::device_matrix_view<const ElementType, IndexType, raft::col_major> M,
raft::device_matrix_view<ElementType, IndexType, raft::col_major> Q,
raft::device_matrix_view<ElementType, IndexType, raft::col_major> R)
{
RAFT_EXPECTS(Q.size() == M.size(), "Size mismatch between Output and Input");
qrGetQR(handle,
M.data_handle(),
Q.data_handle(),
R.data_handle(),
M.extent(0),
M.extent(1),
resource::get_cuda_stream(handle));
}
/** @} */
}; // namespace linalg
}; // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/reduce_rows_by_key.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __REDUCE_ROWS_BY_KEY
#define __REDUCE_ROWS_BY_KEY
#pragma once
#include "detail/reduce_rows_by_key.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
namespace raft {
namespace linalg {
/**
Small helper function to convert from int->char and char->int
Transform ncols*nrows read of int in 2*nrows reads of int + ncols*rows reads of chars
**/
template <typename IteratorT1, typename IteratorT2>
void convert_array(IteratorT1 dst, IteratorT2 src, int n, cudaStream_t st)
{
detail::convert_array(dst, src, n, st);
}
/**
* @brief Computes the weighted reduction of matrix rows for each given key
*
* @tparam DataIteratorT Random-access iterator type, for reading input matrix
* (may be a simple pointer type)
* @tparam KeysIteratorT Random-access iterator type, for reading input keys
* (may be a simple pointer type)
* @tparam SumsT Type of the output sums
* @tparam IdxT Index type
*
* @param[in] d_A Input data array (lda x nrows)
* @param[in] lda Real row size for input data, d_A
* @param[in] d_keys Keys for each row (1 x nrows)
* @param[in] d_weights Weights for each observation in d_A (1 x nrows)
* @param[out] d_keys_char Scratch memory for conversion of keys to char
* @param[in] nrows Number of rows in d_A and d_keys
* @param[in] ncols Number of data columns in d_A
* @param[in] nkeys Number of unique keys in d_keys
* @param[out] d_sums Row sums by key (ncols x d_keys)
* @param[in] stream CUDA stream
* @param[in] reset_sums Whether to reset the output sums to zero before reducing
*/
template <typename DataIteratorT,
typename KeysIteratorT,
typename WeightT,
typename SumsT,
typename IdxT>
void reduce_rows_by_key(const DataIteratorT d_A,
IdxT lda,
const KeysIteratorT d_keys,
const WeightT* d_weights,
char* d_keys_char,
IdxT nrows,
IdxT ncols,
IdxT nkeys,
SumsT* d_sums,
cudaStream_t stream,
bool reset_sums = true)
{
detail::reduce_rows_by_key(
d_A, lda, d_keys, d_weights, d_keys_char, nrows, ncols, nkeys, d_sums, stream, reset_sums);
}
/**
* @brief Computes the reduction of matrix rows for each given key
* @tparam DataIteratorT Random-access iterator type, for reading input matrix (may be a simple
* pointer type)
* @tparam KeysIteratorT Random-access iterator type, for reading input keys (may be a simple
* pointer type)
* @tparam SumsT Type of the output sums
* @tparam IdxT Index type
* @param[in] d_A Input data array (lda x nrows)
* @param[in] lda Real row size for input data, d_A
* @param[in] d_keys Keys for each row (1 x nrows)
* @param d_keys_char Scratch memory for conversion of keys to char
* @param[in] nrows Number of rows in d_A and d_keys
* @param[in] ncols Number of data columns in d_A
* @param[in] nkeys Number of unique keys in d_keys
* @param[out] d_sums Row sums by key (ncols x d_keys)
* @param[in] stream CUDA stream
* @param[in] reset_sums Whether to reset the output sums to zero before reducing
*/
template <typename DataIteratorT, typename KeysIteratorT, typename SumsT, typename IdxT>
void reduce_rows_by_key(const DataIteratorT d_A,
IdxT lda,
const KeysIteratorT d_keys,
char* d_keys_char,
IdxT nrows,
IdxT ncols,
IdxT nkeys,
SumsT* d_sums,
cudaStream_t stream,
bool reset_sums = true)
{
typedef typename std::iterator_traits<DataIteratorT>::value_type DataType;
reduce_rows_by_key(d_A,
lda,
d_keys,
static_cast<DataType*>(nullptr),
d_keys_char,
nrows,
ncols,
nkeys,
d_sums,
stream,
reset_sums);
}
/**
* @defgroup reduce_rows_by_key Reduce Across Rows by Key
* @{
*/
/**
* @brief Computes the weighted sum-reduction of matrix rows for each given key
* TODO: Support generic reduction lambdas https://github.com/rapidsai/raft/issues/860
* @tparam ElementType data-type of input and output
* @tparam KeyType data-type of keys
* @tparam WeightType data-type of weights
* @tparam IndexType index type
* @param[in] handle raft::resources
* @param[in] d_A Input raft::device_mdspan (ncols * nrows)
* @param[in] d_keys Keys for each row raft::device_vector_view (1 x nrows)
* @param[out] d_sums Row sums by key raft::device_matrix_view (ncols x d_keys)
* @param[in] n_unique_keys Number of unique keys in d_keys
* @param[out] d_keys_char Scratch memory for conversion of keys to char, raft::device_vector_view
* @param[in] d_weights Weights for each observation in d_A raft::device_vector_view optional (1
* x nrows)
* @param[in] reset_sums Whether to reset the output sums to zero before reducing
*/
template <typename ElementType, typename KeyType, typename WeightType, typename IndexType>
void reduce_rows_by_key(
raft::resources const& handle,
raft::device_matrix_view<const ElementType, IndexType, raft::row_major> d_A,
raft::device_vector_view<const KeyType, IndexType> d_keys,
raft::device_matrix_view<ElementType, IndexType, raft::row_major> d_sums,
IndexType n_unique_keys,
raft::device_vector_view<char, IndexType> d_keys_char,
std::optional<raft::device_vector_view<const WeightType, IndexType>> d_weights = std::nullopt,
bool reset_sums = true)
{
RAFT_EXPECTS(d_A.extent(0) == d_A.extent(0) && d_sums.extent(1) == n_unique_keys,
"Output is not of size ncols * n_unique_keys");
RAFT_EXPECTS(d_keys.extent(0) == d_A.extent(1), "Keys is not of size nrows");
if (d_weights) {
RAFT_EXPECTS(d_weights.value().extent(0) == d_A.extent(1), "Weights is not of size nrows");
reduce_rows_by_key(d_A.data_handle(),
d_A.extent(0),
d_keys.data_handle(),
d_weights.value().data_handle(),
d_keys_char.data_handle(),
d_A.extent(1),
d_A.extent(0),
n_unique_keys,
d_sums.data_handle(),
resource::get_cuda_stream(handle),
reset_sums);
} else {
reduce_rows_by_key(d_A.data_handle(),
d_A.extent(0),
d_keys.data_handle(),
d_keys_char.data_handle(),
d_A.extent(1),
d_A.extent(0),
n_unique_keys,
d_sums.data_handle(),
resource::get_cuda_stream(handle),
reset_sums);
}
}
/** @} */ // end of group reduce_rows_by_key
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/power.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __POWER_H
#define __POWER_H
#pragma once
#include <raft/core/host_mdspan.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/binary_op.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/input_validation.hpp>
namespace raft {
namespace linalg {
/**
* @defgroup ScalarOps Scalar operations on the input buffer
* @tparam in_t Input data-type
* @tparam out_t Output data-type
* @param out the output buffer
* @param in the input buffer
* @param scalar the scalar used in the operations
* @param len number of elements in the input buffer
* @param stream cuda stream where to launch work
* @{
*/
template <typename in_t, typename out_t = in_t, typename IdxType = int>
void powerScalar(out_t* out, const in_t* in, const in_t scalar, IdxType len, cudaStream_t stream)
{
raft::linalg::unaryOp(out, in, len, raft::pow_const_op<in_t>(scalar), stream);
}
/** @} */
/**
* @defgroup BinaryOps Element-wise binary operations on the input buffers
* @tparam in_t Input data-type
* @tparam out_t Output data-type
* @tparam IdxType Integer type used to for addressing
* @param out the output buffer
* @param in1 the first input buffer
* @param in2 the second input buffer
* @param len number of elements in the input buffers
* @param stream cuda stream where to launch work
* @{
*/
template <typename in_t, typename out_t = in_t, typename IdxType = int>
void power(out_t* out, const in_t* in1, const in_t* in2, IdxType len, cudaStream_t stream)
{
raft::linalg::binaryOp(out, in1, in2, len, raft::pow_op(), stream);
}
/** @} */
/**
* @defgroup power Power Arithmetic
* @{
*/
/**
* @brief Elementwise power operation on the input buffers
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @param[in] handle raft::resources
* @param[in] in1 First Input
* @param[in] in2 Second Input
* @param[out] out Output
*/
template <typename InType,
typename OutType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void power(raft::resources const& handle, InType in1, InType in2, OutType out)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in1), "Input 1 must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in2), "Input 2 must be contiguous");
RAFT_EXPECTS(out.size() == in1.size() && in1.size() == in2.size(),
"Size mismatch between Output and Inputs");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
power<in_value_t, out_value_t, std::uint32_t>(out.data_handle(),
in1.data_handle(),
in2.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
power<in_value_t, out_value_t, std::uint64_t>(out.data_handle(),
in1.data_handle(),
in2.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/**
* @brief Elementwise power of host scalar to input
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @tparam ScalarIdxType Index Type of scalar
* @param[in] handle raft::resources
* @param[in] in Input
* @param[out] out Output
* @param[in] scalar raft::host_scalar_view
*/
template <typename InType,
typename OutType,
typename ScalarIdxType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void power_scalar(
raft::resources const& handle,
InType in,
OutType out,
const raft::host_scalar_view<const typename InType::value_type, ScalarIdxType> scalar)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input must be contiguous");
RAFT_EXPECTS(out.size() == in.size(), "Size mismatch between Output and Input");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
powerScalar<in_value_t, out_value_t, std::uint32_t>(out.data_handle(),
in.data_handle(),
*scalar.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
powerScalar<in_value_t, out_value_t, std::uint64_t>(out.data_handle(),
in.data_handle(),
*scalar.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/** @} */ // end of group add
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/divide.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __DIVIDE_H
#define __DIVIDE_H
#pragma once
#include "detail/divide.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/input_validation.hpp>
namespace raft {
namespace linalg {
/**
* @defgroup ScalarOps Scalar operations on the input buffer
* @tparam OutT output data-type upon which the math operation will be performed
* @tparam InT input data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param out the output buffer
* @param in the input buffer
* @param scalar the scalar used in the operations
* @param len number of elements in the input buffer
* @param stream cuda stream where to launch work
* @{
*/
template <typename InT, typename OutT = InT, typename IdxType = int>
void divideScalar(OutT* out, const InT* in, InT scalar, IdxType len, cudaStream_t stream)
{
detail::divideScalar(out, in, scalar, len, stream);
}
/** @} */
/**
* @defgroup divide Division Arithmetic
* @{
*/
/**
* @brief Elementwise division of input by host scalar
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @tparam ScalarIdxType Index Type of scalar
* @param[in] handle raft::resources
* @param[in] in Input
* @param[in] scalar raft::host_scalar_view
* @param[out] out Output
*/
template <typename InType,
typename OutType,
typename ScalarIdxType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void divide_scalar(raft::resources const& handle,
InType in,
OutType out,
raft::host_scalar_view<const typename InType::value_type, ScalarIdxType> scalar)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input must be contiguous");
RAFT_EXPECTS(out.size() == in.size(), "Size mismatch between Output and Input");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
divideScalar<in_value_t, out_value_t, std::uint32_t>(out.data_handle(),
in.data_handle(),
*scalar.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
divideScalar<in_value_t, out_value_t, std::uint64_t>(out.data_handle(),
in.data_handle(),
*scalar.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/** @} */ // end of group add
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/transpose.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __TRANSPOSE_H
#define __TRANSPOSE_H
#pragma once
#include "detail/transpose.cuh"
#include <raft/core/device_mdarray.hpp>
#include <raft/core/resources.hpp>
namespace raft {
namespace linalg {
/**
* @brief transpose on the column major input matrix using Jacobi method
* @param handle: raft handle
* @param in: input matrix
* @param out: output. Transposed input matrix
* @param n_rows: number rows of input matrix
* @param n_cols: number columns of input matrix
* @param stream: cuda stream
*/
template <typename math_t>
void transpose(raft::resources const& handle,
math_t* in,
math_t* out,
int n_rows,
int n_cols,
cudaStream_t stream)
{
detail::transpose(handle, in, out, n_rows, n_cols, stream);
}
/**
* @brief transpose on the column major input matrix using Jacobi method
* @param inout: input and output matrix
* @param n: number of rows and columns of input matrix
* @param stream: cuda stream
*/
template <typename math_t>
void transpose(math_t* inout, int n, cudaStream_t stream)
{
detail::transpose(inout, n, stream);
}
/**
* @defgroup transpose Matrix transpose
* @{
*/
/**
* @brief Transpose a matrix. The output has same layout policy as the input.
*
* @tparam T Data type of input matrix element.
* @tparam IndexType Index type of matrix extent.
* @tparam LayoutPolicy Layout type of the input matrix. When layout is strided, it can
* be a submatrix of a larger matrix. Arbitrary stride is not supported.
* @tparam AccessorPolicy Accessor for the input and output, must be valid accessor on
* device.
*
* @param[in] handle raft handle for managing expensive cuda resources.
* @param[in] in Input matrix.
* @param[out] out Output matrix, storage is pre-allocated by caller.
*/
template <typename T, typename IndexType, typename LayoutPolicy, typename AccessorPolicy>
auto transpose(raft::resources const& handle,
raft::mdspan<T, raft::matrix_extent<IndexType>, LayoutPolicy, AccessorPolicy> in,
raft::mdspan<T, raft::matrix_extent<IndexType>, LayoutPolicy, AccessorPolicy> out)
-> std::enable_if_t<std::is_floating_point_v<T>, void>
{
RAFT_EXPECTS(out.extent(0) == in.extent(1), "Invalid shape for transpose.");
RAFT_EXPECTS(out.extent(1) == in.extent(0), "Invalid shape for transpose.");
if constexpr (std::is_same_v<typename decltype(in)::layout_type, layout_c_contiguous>) {
detail::transpose_row_major_impl(handle, in, out);
} else if (std::is_same_v<typename decltype(in)::layout_type, layout_f_contiguous>) {
detail::transpose_col_major_impl(handle, in, out);
} else {
RAFT_EXPECTS(in.stride(0) == 1 || in.stride(1) == 1, "Unsupported matrix layout.");
if (in.stride(1) == 1) {
// row-major submatrix
detail::transpose_row_major_impl(handle, in, out);
} else {
// col-major submatrix
detail::transpose_col_major_impl(handle, in, out);
}
}
}
/** @} */ // end of group transpose
}; // end namespace linalg
}; // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/add.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __ADD_H
#define __ADD_H
#pragma once
#include "detail/add.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/util/input_validation.hpp>
namespace raft {
namespace linalg {
/**
* @ingroup arithmetic
* @brief Elementwise scalar add operation on the input buffer
*
* @tparam InT input data-type. Also the data-type upon which the math ops
* will be performed
* @tparam OutT output data-type
* @tparam IdxType Integer type used to for addressing
*
* @param out the output buffer
* @param in the input buffer
* @param scalar the scalar used in the operations
* @param len number of elements in the input buffer
* @param stream cuda stream where to launch work
*/
template <typename InT, typename OutT = InT, typename IdxType = int>
void addScalar(OutT* out, const InT* in, const InT scalar, IdxType len, cudaStream_t stream)
{
detail::addScalar(out, in, scalar, len, stream);
}
/**
* @brief Elementwise add operation on the input buffers
* @tparam InT input data-type. Also the data-type upon which the math ops
* will be performed
* @tparam OutT output data-type
* @tparam IdxType Integer type used to for addressing
*
* @param out the output buffer
* @param in1 the first input buffer
* @param in2 the second input buffer
* @param len number of elements in the input buffers
* @param stream cuda stream where to launch work
*/
template <typename InT, typename OutT = InT, typename IdxType = int>
void add(OutT* out, const InT* in1, const InT* in2, IdxType len, cudaStream_t stream)
{
detail::add(out, in1, in2, len, stream);
}
/** Subtract single value pointed by singleScalarDev parameter in device memory from inDev[i] and
* write result to outDev[i]
* @tparam InT input data-type. Also the data-type upon which the math ops
* will be performed
* @tparam OutT output data-type
* @tparam IdxType Integer type used to for addressing
* @param outDev the output buffer
* @param inDev the input buffer
* @param singleScalarDev pointer to the scalar located in device memory
* @param len number of elements in the input and output buffer
* @param stream cuda stream
*/
template <typename InT, typename OutT = InT, typename IdxType = int>
void addDevScalar(
OutT* outDev, const InT* inDev, const InT* singleScalarDev, IdxType len, cudaStream_t stream)
{
detail::addDevScalar(outDev, inDev, singleScalarDev, len, stream);
}
/**
* @defgroup add_dense Addition Arithmetic
* @{
*/
/**
* @brief Elementwise add operation
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @param[in] handle raft::resources
* @param[in] in1 First Input
* @param[in] in2 Second Input
* @param[out] out Output
*/
template <typename InType,
typename OutType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void add(raft::resources const& handle, InType in1, InType in2, OutType out)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in1), "Input 1 must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in2), "Input 2 must be contiguous");
RAFT_EXPECTS(out.size() == in1.size() && in1.size() == in2.size(),
"Size mismatch between Output and Inputs");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
add<in_value_t, out_value_t, std::uint32_t>(out.data_handle(),
in1.data_handle(),
in2.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
add<in_value_t, out_value_t, std::uint64_t>(out.data_handle(),
in1.data_handle(),
in2.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/**
* @brief Elementwise addition of device scalar to input
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @tparam ScalarIdxType Index Type of scalar
* @param[in] handle raft::resources
* @param[in] in Input
* @param[in] scalar raft::device_scalar_view
* @param[in] out Output
*/
template <typename InType,
typename OutType,
typename ScalarIdxType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void add_scalar(raft::resources const& handle,
InType in,
OutType out,
raft::device_scalar_view<const typename InType::value_type, ScalarIdxType> scalar)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input must be contiguous");
RAFT_EXPECTS(out.size() == in.size(), "Size mismatch between Output and Input");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
addDevScalar<in_value_t, out_value_t, std::uint32_t>(out.data_handle(),
in.data_handle(),
scalar.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
addDevScalar<in_value_t, out_value_t, std::uint64_t>(out.data_handle(),
in.data_handle(),
scalar.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/**
* @brief Elementwise addition of host scalar to input
* @tparam InType Input Type raft::device_mdspan
* @tparam OutType Output Type raft::device_mdspan
* @tparam ScalarIdxType Index Type of scalar
* @param[in] handle raft::resources
* @param[in] in Input
* @param[in] scalar raft::host_scalar_view
* @param[in] out Output
*/
template <typename InType,
typename OutType,
typename ScalarIdxType,
typename = raft::enable_if_input_device_mdspan<InType>,
typename = raft::enable_if_output_device_mdspan<OutType>>
void add_scalar(raft::resources const& handle,
const InType in,
OutType out,
raft::host_scalar_view<const typename InType::value_type, ScalarIdxType> scalar)
{
using in_value_t = typename InType::value_type;
using out_value_t = typename OutType::value_type;
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input must be contiguous");
RAFT_EXPECTS(out.size() == in.size(), "Size mismatch between Output and Input");
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
addScalar<in_value_t, out_value_t, std::uint32_t>(out.data_handle(),
in.data_handle(),
*scalar.data_handle(),
static_cast<std::uint32_t>(out.size()),
resource::get_cuda_stream(handle));
} else {
addScalar<in_value_t, out_value_t, std::uint64_t>(out.data_handle(),
in.data_handle(),
*scalar.data_handle(),
static_cast<std::uint64_t>(out.size()),
resource::get_cuda_stream(handle));
}
}
/** @} */ // end of group add
}; // end namespace linalg
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/linalg/lstsq.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __LSTSQ_H
#define __LSTSQ_H
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/detail/lstsq.cuh>
namespace raft {
namespace linalg {
/** Solves the linear ordinary least squares problem `Aw = b`
* Via SVD decomposition of `A = U S Vt` using default cuSOLVER routine.
*
* @param[in] handle raft handle
* @param[inout] A input feature matrix.
* Warning: the content of this matrix is modified by the cuSOLVER routines.
* @param[in] n_rows number of rows in A
* @param[in] n_cols number of columns in A
* @param[inout] b input target vector.
* Warning: the content of this vector is modified by the cuSOLVER routines.
* @param[out] w output coefficient vector
* @param[in] stream cuda stream for ordering operations
*/
template <typename math_t>
void lstsqSvdQR(raft::resources const& handle,
math_t* A,
const int n_rows,
const int n_cols,
const math_t* b,
math_t* w,
cudaStream_t stream)
{
detail::lstsqSvdQR(handle, A, n_rows, n_cols, b, w, stream);
}
/** Solves the linear ordinary least squares problem `Aw = b`
* Via SVD decomposition of `A = U S V^T` using Jacobi iterations (cuSOLVER).
*
* @param[in] handle raft handle
* @param[inout] A input feature matrix.
* Warning: the content of this matrix is modified by the cuSOLVER routines.
* @param[in] n_rows number of rows in A
* @param[in] n_cols number of columns in A
* @param[inout] b input target vector.
* Warning: the content of this vector is modified by the cuSOLVER routines.
* @param[out] w output coefficient vector
* @param[in] stream cuda stream for ordering operations
*/
template <typename math_t>
void lstsqSvdJacobi(raft::resources const& handle,
math_t* A,
const int n_rows,
const int n_cols,
const math_t* b,
math_t* w,
cudaStream_t stream)
{
detail::lstsqSvdJacobi(handle, A, n_rows, n_cols, b, w, stream);
}
/** Solves the linear ordinary least squares problem `Aw = b`
* via eigenvalue decomposition of `A^T * A` (covariance matrix for dataset A).
* (`w = (A^T A)^-1 A^T b`)
*/
template <typename math_t>
void lstsqEig(raft::resources const& handle,
const math_t* A,
const int n_rows,
const int n_cols,
const math_t* b,
math_t* w,
cudaStream_t stream)
{
detail::lstsqEig(handle, A, n_rows, n_cols, b, w, stream);
}
/** Solves the linear ordinary least squares problem `Aw = b`
* via QR decomposition of `A = QR`.
* (triangular system of equations `Rw = Q^T b`)
*
* @param[in] handle raft handle
* @param[inout] A input feature matrix.
* Warning: the content of this matrix is modified by the cuSOLVER routines.
* @param[in] n_rows number of rows in A
* @param[in] n_cols number of columns in A
* @param[inout] b input target vector.
* Warning: the content of this vector is modified by the cuSOLVER routines.
* @param[out] w output coefficient vector
* @param[in] stream cuda stream for ordering operations
*/
template <typename math_t>
void lstsqQR(raft::resources const& handle,
math_t* A,
const int n_rows,
const int n_cols,
math_t* b,
math_t* w,
cudaStream_t stream)
{
detail::lstsqQR(handle, A, n_rows, n_cols, b, w, stream);
}
/**
* @defgroup lstsq Least Squares Methods
* @{
*/
/**
* @brief Solves the linear ordinary least squares problem `Aw = b`
* Via SVD decomposition of `A = U S Vt`.
*
* @tparam ValueType the data-type of input/output
* @param[in] handle raft::resources
* @param[inout] A input raft::device_matrix_view
* Warning: the content of this matrix is modified.
* @param[inout] b input target raft::device_vector_view
* Warning: the content of this vector is modified.
* @param[out] w output coefficient raft::device_vector_view
*/
template <typename ValueType, typename IndexType>
void lstsq_svd_qr(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> A,
raft::device_vector_view<const ValueType, IndexType> b,
raft::device_vector_view<ValueType, IndexType> w)
{
RAFT_EXPECTS(A.extent(1) == w.size(), "Size mismatch between A and w");
RAFT_EXPECTS(A.extent(0) == b.size(), "Size mismatch between A and b");
lstsqSvdQR(handle,
const_cast<ValueType*>(A.data_handle()),
A.extent(0),
A.extent(1),
const_cast<ValueType*>(b.data_handle()),
w.data_handle(),
resource::get_cuda_stream(handle));
}
/**
* @brief Solves the linear ordinary least squares problem `Aw = b`
* Via SVD decomposition of `A = U S V^T` using Jacobi iterations.
*
* @tparam ValueType the data-type of input/output
* @param[in] handle raft::resources
* @param[inout] A input raft::device_matrix_view
* Warning: the content of this matrix is modified.
* @param[inout] b input target raft::device_vector_view
* Warning: the content of this vector is modified.
* @param[out] w output coefficient raft::device_vector_view
*/
template <typename ValueType, typename IndexType>
void lstsq_svd_jacobi(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> A,
raft::device_vector_view<const ValueType, IndexType> b,
raft::device_vector_view<ValueType, IndexType> w)
{
RAFT_EXPECTS(A.extent(1) == w.size(), "Size mismatch between A and w");
RAFT_EXPECTS(A.extent(0) == b.size(), "Size mismatch between A and b");
lstsqSvdJacobi(handle,
const_cast<ValueType*>(A.data_handle()),
A.extent(0),
A.extent(1),
const_cast<ValueType*>(b.data_handle()),
w.data_handle(),
resource::get_cuda_stream(handle));
}
/**
* @brief Solves the linear ordinary least squares problem `Aw = b`
* via eigenvalue decomposition of `A^T * A` (covariance matrix for dataset A).
* (`w = (A^T A)^-1 A^T b`)
*
* @tparam ValueType the data-type of input/output
* @param[in] handle raft::resources
* @param[inout] A input raft::device_matrix_view
* Warning: the content of this matrix is modified by the cuSOLVER routines.
* @param[inout] b input target raft::device_vector_view
* Warning: the content of this vector is modified by the cuSOLVER routines.
* @param[out] w output coefficient raft::device_vector_view
*/
template <typename ValueType, typename IndexType>
void lstsq_eig(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> A,
raft::device_vector_view<const ValueType, IndexType> b,
raft::device_vector_view<ValueType, IndexType> w)
{
RAFT_EXPECTS(A.extent(1) == w.size(), "Size mismatch between A and w");
RAFT_EXPECTS(A.extent(0) == b.size(), "Size mismatch between A and b");
lstsqEig(handle,
const_cast<ValueType*>(A.data_handle()),
A.extent(0),
A.extent(1),
const_cast<ValueType*>(b.data_handle()),
w.data_handle(),
resource::get_cuda_stream(handle));
}
/**
* @brief Solves the linear ordinary least squares problem `Aw = b`
* via QR decomposition of `A = QR`.
* (triangular system of equations `Rw = Q^T b`)
*
* @tparam ValueType the data-type of input/output
* @param[in] handle raft::resources
* @param[inout] A input raft::device_matrix_view
* Warning: the content of this matrix is modified.
* @param[inout] b input target raft::device_vector_view
* Warning: the content of this vector is modified.
* @param[out] w output coefficient raft::device_vector_view
*/
template <typename ValueType, typename IndexType>
void lstsq_qr(raft::resources const& handle,
raft::device_matrix_view<const ValueType, IndexType, raft::col_major> A,
raft::device_vector_view<const ValueType, IndexType> b,
raft::device_vector_view<ValueType, IndexType> w)
{
RAFT_EXPECTS(A.extent(1) == w.size(), "Size mismatch between A and w");
RAFT_EXPECTS(A.extent(0) == b.size(), "Size mismatch between A and b");
lstsqQR(handle,
const_cast<ValueType*>(A.data_handle()),
A.extent(0),
A.extent(1),
const_cast<ValueType*>(b.data_handle()),
w.data_handle(),
resource::get_cuda_stream(handle));
}
/** @} */ // end of lstsq
}; // namespace linalg
}; // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/lanczos.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// for cmath:
#define _USE_MATH_DEFINES
#include <cmath>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <vector>
#include <cuda.h>
#include <curand.h>
#include "cublas_wrappers.hpp"
#include <raft/core/resources.hpp>
#include <raft/spectral/detail/lapack.hpp>
#include <raft/spectral/detail/warn_dbg.hpp>
#include <raft/spectral/matrix_wrappers.hpp>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
namespace detail {
// curandGeneratorNormalX
inline curandStatus_t curandGenerateNormalX(
curandGenerator_t generator, float* outputPtr, size_t n, float mean, float stddev)
{
return curandGenerateNormal(generator, outputPtr, n, mean, stddev);
}
inline curandStatus_t curandGenerateNormalX(
curandGenerator_t generator, double* outputPtr, size_t n, double mean, double stddev)
{
return curandGenerateNormalDouble(generator, outputPtr, n, mean, stddev);
}
// =========================================================
// Helper functions
// =========================================================
/**
* @brief Perform Lanczos iteration
* Lanczos iteration is performed on a shifted matrix A+shift*I.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param A Matrix.
* @param iter Pointer to current Lanczos iteration. On exit, the
* variable is set equal to the final Lanczos iteration.
* @param maxIter Maximum Lanczos iteration. This function will
* perform a maximum of maxIter-*iter iterations.
* @param shift Matrix shift.
* @param tol Convergence tolerance. Lanczos iteration will
* terminate when the residual norm (i.e. entry in beta_host) is
* less than tol.
* @param reorthogonalize Whether to reorthogonalize Lanczos
* vectors.
* @param alpha_host (Output, host memory, maxIter entries)
* Diagonal entries of Lanczos system.
* @param beta_host (Output, host memory, maxIter entries)
* Off-diagonal entries of Lanczos system.
* @param lanczosVecs_dev (Input/output, device memory,
* n*(maxIter+1) entries) Lanczos vectors. Vectors are stored as
* columns of a column-major matrix with dimensions
* n x (maxIter+1).
* @param work_dev (Output, device memory, maxIter entries)
* Workspace. Not needed if full reorthogonalization is disabled.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename index_type_t, typename value_type_t>
int performLanczosIteration(raft::resources const& handle,
spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const* A,
index_type_t* iter,
index_type_t maxIter,
value_type_t shift,
value_type_t tol,
bool reorthogonalize,
value_type_t* __restrict__ alpha_host,
value_type_t* __restrict__ beta_host,
value_type_t* __restrict__ lanczosVecs_dev,
value_type_t* __restrict__ work_dev)
{
// -------------------------------------------------------
// Variable declaration
// -------------------------------------------------------
// Useful variables
constexpr value_type_t one = 1;
constexpr value_type_t negOne = -1;
constexpr value_type_t zero = 0;
value_type_t alpha;
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
RAFT_EXPECTS(A != nullptr, "Null matrix pointer.");
index_type_t n = A->nrows_;
// -------------------------------------------------------
// Compute second Lanczos vector
// -------------------------------------------------------
if (*iter <= 0) {
*iter = 1;
// Apply matrix
if (shift != 0)
RAFT_CUDA_TRY(cudaMemcpyAsync(lanczosVecs_dev + n,
lanczosVecs_dev,
n * sizeof(value_type_t),
cudaMemcpyDeviceToDevice,
stream));
A->mv(1, lanczosVecs_dev, shift, lanczosVecs_dev + n);
// Orthogonalize Lanczos vector
RAFT_CUBLAS_TRY(cublasdot(
cublas_h, n, lanczosVecs_dev, 1, lanczosVecs_dev + IDX(0, 1, n), 1, alpha_host, stream));
alpha = -alpha_host[0];
RAFT_CUBLAS_TRY(cublasaxpy(
cublas_h, n, &alpha, lanczosVecs_dev, 1, lanczosVecs_dev + IDX(0, 1, n), 1, stream));
RAFT_CUBLAS_TRY(cublasnrm2(cublas_h, n, lanczosVecs_dev + IDX(0, 1, n), 1, beta_host, stream));
// Check if Lanczos has converged
if (beta_host[0] <= tol) return 0;
// Normalize Lanczos vector
alpha = 1 / beta_host[0];
RAFT_CUBLAS_TRY(cublasscal(cublas_h, n, &alpha, lanczosVecs_dev + IDX(0, 1, n), 1, stream));
}
// -------------------------------------------------------
// Compute remaining Lanczos vectors
// -------------------------------------------------------
while (*iter < maxIter) {
++(*iter);
// Apply matrix
if (shift != 0)
RAFT_CUDA_TRY(cudaMemcpyAsync(lanczosVecs_dev + (*iter) * n,
lanczosVecs_dev + (*iter - 1) * n,
n * sizeof(value_type_t),
cudaMemcpyDeviceToDevice,
stream));
A->mv(1, lanczosVecs_dev + IDX(0, *iter - 1, n), shift, lanczosVecs_dev + IDX(0, *iter, n));
// Full reorthogonalization
// "Twice is enough" algorithm per Kahan and Parlett
if (reorthogonalize) {
RAFT_CUBLAS_TRY(cublasgemv(cublas_h,
CUBLAS_OP_T,
n,
*iter,
&one,
lanczosVecs_dev,
n,
lanczosVecs_dev + IDX(0, *iter, n),
1,
&zero,
work_dev,
1,
stream));
RAFT_CUBLAS_TRY(cublasgemv(cublas_h,
CUBLAS_OP_N,
n,
*iter,
&negOne,
lanczosVecs_dev,
n,
work_dev,
1,
&one,
lanczosVecs_dev + IDX(0, *iter, n),
1,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(alpha_host + (*iter - 1),
work_dev + (*iter - 1),
sizeof(value_type_t),
cudaMemcpyDeviceToHost,
stream));
RAFT_CUBLAS_TRY(cublasgemv(cublas_h,
CUBLAS_OP_T,
n,
*iter,
&one,
lanczosVecs_dev,
n,
lanczosVecs_dev + IDX(0, *iter, n),
1,
&zero,
work_dev,
1,
stream));
RAFT_CUBLAS_TRY(cublasgemv(cublas_h,
CUBLAS_OP_N,
n,
*iter,
&negOne,
lanczosVecs_dev,
n,
work_dev,
1,
&one,
lanczosVecs_dev + IDX(0, *iter, n),
1,
stream));
}
// Orthogonalization with 3-term recurrence relation
else {
RAFT_CUBLAS_TRY(cublasdot(cublas_h,
n,
lanczosVecs_dev + IDX(0, *iter - 1, n),
1,
lanczosVecs_dev + IDX(0, *iter, n),
1,
alpha_host + (*iter - 1),
stream));
auto alpha = -alpha_host[*iter - 1];
RAFT_CUBLAS_TRY(cublasaxpy(cublas_h,
n,
&alpha,
lanczosVecs_dev + IDX(0, *iter - 1, n),
1,
lanczosVecs_dev + IDX(0, *iter, n),
1,
stream));
alpha = -beta_host[*iter - 2];
RAFT_CUBLAS_TRY(cublasaxpy(cublas_h,
n,
&alpha,
lanczosVecs_dev + IDX(0, *iter - 2, n),
1,
lanczosVecs_dev + IDX(0, *iter, n),
1,
stream));
}
// Compute residual
RAFT_CUBLAS_TRY(cublasnrm2(
cublas_h, n, lanczosVecs_dev + IDX(0, *iter, n), 1, beta_host + *iter - 1, stream));
// Check if Lanczos has converged
if (beta_host[*iter - 1] <= tol) break;
// Normalize Lanczos vector
alpha = 1 / beta_host[*iter - 1];
RAFT_CUBLAS_TRY(cublasscal(cublas_h, n, &alpha, lanczosVecs_dev + IDX(0, *iter, n), 1, stream));
}
resource::sync_stream(handle, stream);
return 0;
}
/**
* @brief Find Householder transform for 3-dimensional system
* Given an input vector v=[x,y,z]', this function finds a
* Householder transform P such that P*v is a multiple of
* e_1=[1,0,0]'. The input vector v is overwritten with the
* Householder vector such that P=I-2*v*v'.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param v (Input/output, host memory, 3 entries) Input
* 3-dimensional vector. On exit, the vector is set to the
* Householder vector.
* @param Pv (Output, host memory, 1 entry) First entry of P*v
* (here v is the input vector). Either equal to ||v||_2 or
* -||v||_2.
* @param P (Output, host memory, 9 entries) Householder transform
* matrix. Matrix dimensions are 3 x 3.
*/
template <typename index_type_t, typename value_type_t>
static void findHouseholder3(value_type_t* v, value_type_t* Pv, value_type_t* P)
{
// Compute norm of vector
*Pv = std::sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
// Choose whether to reflect to e_1 or -e_1
// This choice avoids catastrophic cancellation
if (v[0] >= 0) *Pv = -(*Pv);
v[0] -= *Pv;
// Normalize Householder vector
value_type_t normHouseholder = std::sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
if (normHouseholder != 0) {
v[0] /= normHouseholder;
v[1] /= normHouseholder;
v[2] /= normHouseholder;
} else {
v[0] = 0;
v[1] = 0;
v[2] = 0;
}
// Construct Householder matrix
index_type_t i, j;
for (j = 0; j < 3; ++j)
for (i = 0; i < 3; ++i)
P[IDX(i, j, 3)] = -2 * v[i] * v[j];
for (i = 0; i < 3; ++i)
P[IDX(i, i, 3)] += 1;
}
/**
* @brief Apply 3-dimensional Householder transform to 4 x 4 matrix
* The Householder transform is pre-applied to the top three rows
* of the matrix and post-applied to the left three columns. The
* 4 x 4 matrix is intended to contain the bulge that is produced
* in the Francis QR algorithm.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param v (Input, host memory, 3 entries) Householder vector.
* @param A (Input/output, host memory, 16 entries) 4 x 4 matrix.
*/
template <typename index_type_t, typename value_type_t>
static void applyHouseholder3(const value_type_t* v, value_type_t* A)
{
// Loop indices
index_type_t i, j;
// Dot product between Householder vector and matrix row/column
value_type_t vDotA;
// Pre-apply Householder transform
for (j = 0; j < 4; ++j) {
vDotA = 0;
for (i = 0; i < 3; ++i)
vDotA += v[i] * A[IDX(i, j, 4)];
for (i = 0; i < 3; ++i)
A[IDX(i, j, 4)] -= 2 * v[i] * vDotA;
}
// Post-apply Householder transform
for (i = 0; i < 4; ++i) {
vDotA = 0;
for (j = 0; j < 3; ++j)
vDotA += A[IDX(i, j, 4)] * v[j];
for (j = 0; j < 3; ++j)
A[IDX(i, j, 4)] -= 2 * vDotA * v[j];
}
}
/**
* @brief Perform one step of Francis QR algorithm
* Equivalent to two steps of the classical QR algorithm on a
* tridiagonal matrix.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param n Matrix dimension.
* @param shift1 QR algorithm shift.
* @param shift2 QR algorithm shift.
* @param alpha (Input/output, host memory, n entries) Diagonal
* entries of tridiagonal matrix.
* @param beta (Input/output, host memory, n-1 entries)
* Off-diagonal entries of tridiagonal matrix.
* @param V (Input/output, host memory, n*n entries) Orthonormal
* transforms from previous steps of QR algorithm. Matrix
* dimensions are n x n. On exit, the orthonormal transform from
* this Francis QR step is post-applied to the matrix.
* @param work (Output, host memory, 3*n entries) Workspace.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename index_type_t, typename value_type_t>
static int francisQRIteration(index_type_t n,
value_type_t shift1,
value_type_t shift2,
value_type_t* alpha,
value_type_t* beta,
value_type_t* V,
value_type_t* work)
{
// -------------------------------------------------------
// Variable declaration
// -------------------------------------------------------
// Temporary storage of 4x4 bulge and Householder vector
value_type_t bulge[16];
// Householder vector
value_type_t householder[3];
// Householder matrix
value_type_t householderMatrix[3 * 3];
// Shifts are roots of the polynomial p(x)=x^2+b*x+c
value_type_t b = -shift1 - shift2;
value_type_t c = shift1 * shift2;
// Loop indices
index_type_t i, j, pos;
// Temporary variable
value_type_t temp;
// -------------------------------------------------------
// Implementation
// -------------------------------------------------------
// Compute initial Householder transform
householder[0] = alpha[0] * alpha[0] + beta[0] * beta[0] + b * alpha[0] + c;
householder[1] = beta[0] * (alpha[0] + alpha[1] + b);
householder[2] = beta[0] * beta[1];
findHouseholder3<index_type_t, value_type_t>(householder, &temp, householderMatrix);
// Apply initial Householder transform to create bulge
memset(bulge, 0, 16 * sizeof(value_type_t));
for (i = 0; i < 4; ++i)
bulge[IDX(i, i, 4)] = alpha[i];
for (i = 0; i < 3; ++i) {
bulge[IDX(i + 1, i, 4)] = beta[i];
bulge[IDX(i, i + 1, 4)] = beta[i];
}
applyHouseholder3<index_type_t, value_type_t>(householder, bulge);
Lapack<value_type_t>::gemm(false, false, n, 3, 3, 1, V, n, householderMatrix, 3, 0, work, n);
memcpy(V, work, 3 * n * sizeof(value_type_t));
// Chase bulge to bottom-right of matrix with Householder transforms
for (pos = 0; pos < n - 4; ++pos) {
// Move to next position
alpha[pos] = bulge[IDX(0, 0, 4)];
householder[0] = bulge[IDX(1, 0, 4)];
householder[1] = bulge[IDX(2, 0, 4)];
householder[2] = bulge[IDX(3, 0, 4)];
for (j = 0; j < 3; ++j)
for (i = 0; i < 3; ++i)
bulge[IDX(i, j, 4)] = bulge[IDX(i + 1, j + 1, 4)];
bulge[IDX(3, 0, 4)] = 0;
bulge[IDX(3, 1, 4)] = 0;
bulge[IDX(3, 2, 4)] = beta[pos + 3];
bulge[IDX(0, 3, 4)] = 0;
bulge[IDX(1, 3, 4)] = 0;
bulge[IDX(2, 3, 4)] = beta[pos + 3];
bulge[IDX(3, 3, 4)] = alpha[pos + 4];
// Apply Householder transform
findHouseholder3<index_type_t, value_type_t>(householder, beta + pos, householderMatrix);
applyHouseholder3<index_type_t, value_type_t>(householder, bulge);
Lapack<value_type_t>::gemm(
false, false, n, 3, 3, 1, V + IDX(0, pos + 1, n), n, householderMatrix, 3, 0, work, n);
memcpy(V + IDX(0, pos + 1, n), work, 3 * n * sizeof(value_type_t));
}
// Apply penultimate Householder transform
// Values in the last row and column are zero
alpha[n - 4] = bulge[IDX(0, 0, 4)];
householder[0] = bulge[IDX(1, 0, 4)];
householder[1] = bulge[IDX(2, 0, 4)];
householder[2] = bulge[IDX(3, 0, 4)];
for (j = 0; j < 3; ++j)
for (i = 0; i < 3; ++i)
bulge[IDX(i, j, 4)] = bulge[IDX(i + 1, j + 1, 4)];
bulge[IDX(3, 0, 4)] = 0;
bulge[IDX(3, 1, 4)] = 0;
bulge[IDX(3, 2, 4)] = 0;
bulge[IDX(0, 3, 4)] = 0;
bulge[IDX(1, 3, 4)] = 0;
bulge[IDX(2, 3, 4)] = 0;
bulge[IDX(3, 3, 4)] = 0;
findHouseholder3<index_type_t, value_type_t>(householder, beta + n - 4, householderMatrix);
applyHouseholder3<index_type_t, value_type_t>(householder, bulge);
Lapack<value_type_t>::gemm(
false, false, n, 3, 3, 1, V + IDX(0, n - 3, n), n, householderMatrix, 3, 0, work, n);
memcpy(V + IDX(0, n - 3, n), work, 3 * n * sizeof(value_type_t));
// Apply final Householder transform
// Values in the last two rows and columns are zero
alpha[n - 3] = bulge[IDX(0, 0, 4)];
householder[0] = bulge[IDX(1, 0, 4)];
householder[1] = bulge[IDX(2, 0, 4)];
householder[2] = 0;
for (j = 0; j < 3; ++j)
for (i = 0; i < 3; ++i)
bulge[IDX(i, j, 4)] = bulge[IDX(i + 1, j + 1, 4)];
findHouseholder3<index_type_t, value_type_t>(householder, beta + n - 3, householderMatrix);
applyHouseholder3<index_type_t, value_type_t>(householder, bulge);
Lapack<value_type_t>::gemm(
false, false, n, 2, 2, 1, V + IDX(0, n - 2, n), n, householderMatrix, 3, 0, work, n);
memcpy(V + IDX(0, n - 2, n), work, 2 * n * sizeof(value_type_t));
// Bulge has been eliminated
alpha[n - 2] = bulge[IDX(0, 0, 4)];
alpha[n - 1] = bulge[IDX(1, 1, 4)];
beta[n - 2] = bulge[IDX(1, 0, 4)];
return 0;
}
/**
* @brief Perform implicit restart of Lanczos algorithm
* Shifts are Chebyshev nodes of unwanted region of matrix spectrum.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param n Matrix dimension.
* @param iter Current Lanczos iteration.
* @param iter_new Lanczos iteration after restart.
* @param shiftUpper Pointer (host memory) to upper bound for unwanted
* region. Value is ignored if less than *shiftLower. If a
* stronger upper bound has been found, the value is updated on
* exit.
* @param shiftLower Pointer (host memory) to lower bound for unwanted
* region. Value is ignored if greater than *shiftUpper. If a
* stronger lower bound has been found, the value is updated on
* exit.
* @param alpha_host (Input/output, host memory, iter entries)
* Diagonal entries of Lanczos system.
* @param beta_host (Input/output, host memory, iter entries)
* Off-diagonal entries of Lanczos system.
* @param V_host (Output, host memory, iter*iter entries)
* Orthonormal transform used to obtain restarted system. Matrix
* dimensions are iter x iter.
* @param work_host (Output, host memory, 4*iter entries)
* Workspace.
* @param lanczosVecs_dev (Input/output, device memory, n*(iter+1)
* entries) Lanczos vectors. Vectors are stored as columns of a
* column-major matrix with dimensions n x (iter+1).
* @param work_dev (Output, device memory, (n+iter)*iter entries)
* Workspace.
* @param smallest_eig specifies whether smallest (true) or largest
* (false) eigenvalues are to be calculated.
* @return error flag.
*/
template <typename index_type_t, typename value_type_t>
static int lanczosRestart(raft::resources const& handle,
index_type_t n,
index_type_t iter,
index_type_t iter_new,
value_type_t* shiftUpper,
value_type_t* shiftLower,
value_type_t* __restrict__ alpha_host,
value_type_t* __restrict__ beta_host,
value_type_t* __restrict__ V_host,
value_type_t* __restrict__ work_host,
value_type_t* __restrict__ lanczosVecs_dev,
value_type_t* __restrict__ work_dev,
bool smallest_eig)
{
// -------------------------------------------------------
// Variable declaration
// -------------------------------------------------------
// Useful constants
constexpr value_type_t zero = 0;
constexpr value_type_t one = 1;
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// Loop index
index_type_t i;
// Number of implicit restart steps
// Assumed to be even since each call to Francis algorithm is
// equivalent to two calls of QR algorithm
index_type_t restartSteps = iter - iter_new;
// Ritz values from Lanczos method
value_type_t* ritzVals_host = work_host + 3 * iter;
// Shifts for implicit restart
value_type_t* shifts_host;
// Orthonormal matrix for similarity transform
value_type_t* V_dev = work_dev + n * iter;
// -------------------------------------------------------
// Implementation
// -------------------------------------------------------
// Compute Ritz values
memcpy(ritzVals_host, alpha_host, iter * sizeof(value_type_t));
memcpy(work_host, beta_host, (iter - 1) * sizeof(value_type_t));
Lapack<value_type_t>::sterf(iter, ritzVals_host, work_host);
// Debug: Print largest eigenvalues
// for (int i = iter-iter_new; i < iter; ++i)
// std::cout <<*(ritzVals_host+i)<< " ";
// std::cout <<std::endl;
// Initialize similarity transform with identity matrix
memset(V_host, 0, iter * iter * sizeof(value_type_t));
for (i = 0; i < iter; ++i)
V_host[IDX(i, i, iter)] = 1;
// Determine interval to suppress eigenvalues
if (smallest_eig) {
if (*shiftLower > *shiftUpper) {
*shiftUpper = ritzVals_host[iter - 1];
*shiftLower = ritzVals_host[iter_new];
} else {
*shiftUpper = std::max(*shiftUpper, ritzVals_host[iter - 1]);
*shiftLower = std::min(*shiftLower, ritzVals_host[iter_new]);
}
} else {
if (*shiftLower > *shiftUpper) {
*shiftUpper = ritzVals_host[iter - iter_new - 1];
*shiftLower = ritzVals_host[0];
} else {
*shiftUpper = std::max(*shiftUpper, ritzVals_host[iter - iter_new - 1]);
*shiftLower = std::min(*shiftLower, ritzVals_host[0]);
}
}
// Calculate Chebyshev nodes as shifts
shifts_host = ritzVals_host;
for (i = 0; i < restartSteps; ++i) {
shifts_host[i] = cos((i + 0.5) * static_cast<value_type_t>(M_PI) / restartSteps);
shifts_host[i] *= 0.5 * ((*shiftUpper) - (*shiftLower));
shifts_host[i] += 0.5 * ((*shiftUpper) + (*shiftLower));
}
// Apply Francis QR algorithm to implicitly restart Lanczos
for (i = 0; i < restartSteps; i += 2)
if (francisQRIteration(
iter, shifts_host[i], shifts_host[i + 1], alpha_host, beta_host, V_host, work_host))
WARNING("error in implicitly shifted QR algorithm");
// Obtain new residual
RAFT_CUDA_TRY(cudaMemcpyAsync(
V_dev, V_host, iter * iter * sizeof(value_type_t), cudaMemcpyHostToDevice, stream));
beta_host[iter - 1] = beta_host[iter - 1] * V_host[IDX(iter - 1, iter_new - 1, iter)];
RAFT_CUBLAS_TRY(cublasgemv(cublas_h,
CUBLAS_OP_N,
n,
iter,
beta_host + iter_new - 1,
lanczosVecs_dev,
n,
V_dev + IDX(0, iter_new, iter),
1,
beta_host + iter - 1,
lanczosVecs_dev + IDX(0, iter, n),
1,
stream));
// Obtain new Lanczos vectors
RAFT_CUBLAS_TRY(cublasgemm(cublas_h,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
iter_new,
iter,
&one,
lanczosVecs_dev,
n,
V_dev,
iter,
&zero,
work_dev,
n,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(lanczosVecs_dev,
work_dev,
n * iter_new * sizeof(value_type_t),
cudaMemcpyDeviceToDevice,
stream));
// Normalize residual to obtain new Lanczos vector
RAFT_CUDA_TRY(cudaMemcpyAsync(lanczosVecs_dev + IDX(0, iter_new, n),
lanczosVecs_dev + IDX(0, iter, n),
n * sizeof(value_type_t),
cudaMemcpyDeviceToDevice,
stream));
RAFT_CUBLAS_TRY(cublasnrm2(
cublas_h, n, lanczosVecs_dev + IDX(0, iter_new, n), 1, beta_host + iter_new - 1, stream));
auto h_beta = 1 / beta_host[iter_new - 1];
RAFT_CUBLAS_TRY(
cublasscal(cublas_h, n, &h_beta, lanczosVecs_dev + IDX(0, iter_new, n), 1, stream));
return 0;
}
/**
* @brief Compute smallest eigenvectors of symmetric matrix
* Computes eigenvalues and eigenvectors that are least
* positive. If matrix is positive definite or positive
* semidefinite, the computed eigenvalues are smallest in
* magnitude.
* The largest eigenvalue is estimated by performing several
* Lanczos iterations. An implicitly restarted Lanczos method is
* then applied to A+s*I, where s is negative the largest
* eigenvalue.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param A Matrix.
* @param nEigVecs Number of eigenvectors to compute.
* @param maxIter Maximum number of Lanczos steps. Does not include
* Lanczos steps used to estimate largest eigenvalue.
* @param restartIter Maximum size of Lanczos system before
* performing an implicit restart. Should be at least 4.
* @param tol Convergence tolerance. Lanczos iteration will
* terminate when the residual norm is less than tol*theta, where
* theta is an estimate for the smallest unwanted eigenvalue
* (i.e. the (nEigVecs+1)th smallest eigenvalue).
* @param reorthogonalize Whether to reorthogonalize Lanczos
* vectors.
* @param effIter On exit, pointer to final size of Lanczos system.
* @param totalIter On exit, pointer to total number of Lanczos
* iterations performed. Does not include Lanczos steps used to
* estimate largest eigenvalue.
* @param shift On exit, pointer to matrix shift (estimate for
* largest eigenvalue).
* @param alpha_host (Output, host memory, restartIter entries)
* Diagonal entries of Lanczos system.
* @param beta_host (Output, host memory, restartIter entries)
* Off-diagonal entries of Lanczos system.
* @param lanczosVecs_dev (Output, device memory, n*(restartIter+1)
* entries) Lanczos vectors. Vectors are stored as columns of a
* column-major matrix with dimensions n x (restartIter+1).
* @param work_dev (Output, device memory,
* (n+restartIter)*restartIter entries) Workspace.
* @param eigVals_dev (Output, device memory, nEigVecs entries)
* Largest eigenvalues of matrix.
* @param eigVecs_dev (Output, device memory, n*nEigVecs entries)
* Eigenvectors corresponding to smallest eigenvalues of
* matrix. Vectors are stored as columns of a column-major matrix
* with dimensions n x nEigVecs.
* @param seed random seed.
* @return error flag.
*/
template <typename index_type_t, typename value_type_t>
int computeSmallestEigenvectors(
raft::resources const& handle,
spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const* A,
index_type_t nEigVecs,
index_type_t maxIter,
index_type_t restartIter,
value_type_t tol,
bool reorthogonalize,
index_type_t* effIter,
index_type_t* totalIter,
value_type_t* shift,
value_type_t* __restrict__ alpha_host,
value_type_t* __restrict__ beta_host,
value_type_t* __restrict__ lanczosVecs_dev,
value_type_t* __restrict__ work_dev,
value_type_t* __restrict__ eigVals_dev,
value_type_t* __restrict__ eigVecs_dev,
unsigned long long seed)
{
// Useful constants
constexpr value_type_t one = 1;
constexpr value_type_t zero = 0;
// Matrix dimension
index_type_t n = A->nrows_;
// Shift for implicit restart
value_type_t shiftUpper;
value_type_t shiftLower;
// Lanczos iteration counters
index_type_t maxIter_curr = restartIter; // Maximum size of Lanczos system
// Status flags
int status;
// Loop index
index_type_t i;
// Host memory
value_type_t* Z_host; // Eigenvectors in Lanczos basis
value_type_t* work_host; // Workspace
// -------------------------------------------------------
// Check that parameters are valid
// -------------------------------------------------------
RAFT_EXPECTS(nEigVecs > 0 && nEigVecs <= n, "Invalid number of eigenvectors.");
RAFT_EXPECTS(restartIter > 0, "Invalid restartIter.");
RAFT_EXPECTS(tol > 0, "Invalid tolerance.");
RAFT_EXPECTS(maxIter >= nEigVecs, "Invalid maxIter.");
RAFT_EXPECTS(restartIter >= nEigVecs, "Invalid restartIter.");
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// -------------------------------------------------------
// Variable initialization
// -------------------------------------------------------
// Total number of Lanczos iterations
*totalIter = 0;
// Allocate host memory
std::vector<value_type_t> Z_host_v(restartIter * restartIter);
std::vector<value_type_t> work_host_v(4 * restartIter);
Z_host = Z_host_v.data();
work_host = work_host_v.data();
// Initialize cuBLAS
RAFT_CUBLAS_TRY(cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// -------------------------------------------------------
// Compute largest eigenvalue to determine shift
// -------------------------------------------------------
// Random number generator
curandGenerator_t randGen;
// Initialize random number generator
curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_PHILOX4_32_10);
curandSetPseudoRandomGeneratorSeed(randGen, seed);
// Initialize initial Lanczos vector
curandGenerateNormalX(randGen, lanczosVecs_dev, n + n % 2, zero, one);
value_type_t normQ1;
RAFT_CUBLAS_TRY(cublasnrm2(cublas_h, n, lanczosVecs_dev, 1, &normQ1, stream));
auto h_val = 1 / normQ1;
RAFT_CUBLAS_TRY(cublasscal(cublas_h, n, &h_val, lanczosVecs_dev, 1, stream));
// Obtain tridiagonal matrix with Lanczos
*effIter = 0;
*shift = 0;
status = performLanczosIteration<index_type_t, value_type_t>(handle,
A,
effIter,
maxIter_curr,
*shift,
0.0,
reorthogonalize,
alpha_host,
beta_host,
lanczosVecs_dev,
work_dev);
if (status) WARNING("error in Lanczos iteration");
// Determine largest eigenvalue
Lapack<value_type_t>::sterf(*effIter, alpha_host, beta_host);
*shift = -alpha_host[*effIter - 1];
// -------------------------------------------------------
// Compute eigenvectors of shifted matrix
// -------------------------------------------------------
// Obtain tridiagonal matrix with Lanczos
*effIter = 0;
status = performLanczosIteration<index_type_t, value_type_t>(handle,
A,
effIter,
maxIter_curr,
*shift,
0,
reorthogonalize,
alpha_host,
beta_host,
lanczosVecs_dev,
work_dev);
if (status) WARNING("error in Lanczos iteration");
*totalIter += *effIter;
// Apply Lanczos method until convergence
shiftLower = 1;
shiftUpper = -1;
while (*totalIter < maxIter && beta_host[*effIter - 1] > tol * shiftLower) {
// Determine number of restart steps
// Number of steps must be even due to Francis algorithm
index_type_t iter_new = nEigVecs + 1;
if (restartIter - (maxIter - *totalIter) > nEigVecs + 1)
iter_new = restartIter - (maxIter - *totalIter);
if ((restartIter - iter_new) % 2) iter_new -= 1;
if (iter_new == *effIter) break;
// Implicit restart of Lanczos method
status = lanczosRestart<index_type_t, value_type_t>(handle,
n,
*effIter,
iter_new,
&shiftUpper,
&shiftLower,
alpha_host,
beta_host,
Z_host,
work_host,
lanczosVecs_dev,
work_dev,
true);
if (status) WARNING("error in Lanczos implicit restart");
*effIter = iter_new;
// Check for convergence
if (beta_host[*effIter - 1] <= tol * fabs(shiftLower)) break;
// Proceed with Lanczos method
status = performLanczosIteration<index_type_t, value_type_t>(handle,
A,
effIter,
maxIter_curr,
*shift,
tol * fabs(shiftLower),
reorthogonalize,
alpha_host,
beta_host,
lanczosVecs_dev,
work_dev);
if (status) WARNING("error in Lanczos iteration");
*totalIter += *effIter - iter_new;
}
// Warning if Lanczos has failed to converge
if (beta_host[*effIter - 1] > tol * fabs(shiftLower)) {
WARNING("implicitly restarted Lanczos failed to converge");
}
// Solve tridiagonal system
memcpy(work_host + 2 * (*effIter), alpha_host, (*effIter) * sizeof(value_type_t));
memcpy(work_host + 3 * (*effIter), beta_host, (*effIter - 1) * sizeof(value_type_t));
Lapack<value_type_t>::steqr('I',
*effIter,
work_host + 2 * (*effIter),
work_host + 3 * (*effIter),
Z_host,
*effIter,
work_host);
// Obtain desired eigenvalues by applying shift
for (i = 0; i < *effIter; ++i)
work_host[i + 2 * (*effIter)] -= *shift;
for (i = *effIter; i < nEigVecs; ++i)
work_host[i + 2 * (*effIter)] = 0;
// Copy results to device memory
RAFT_CUDA_TRY(cudaMemcpyAsync(eigVals_dev,
work_host + 2 * (*effIter),
nEigVecs * sizeof(value_type_t),
cudaMemcpyHostToDevice,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(work_dev,
Z_host,
(*effIter) * nEigVecs * sizeof(value_type_t),
cudaMemcpyHostToDevice,
stream));
RAFT_CHECK_CUDA(stream);
// Convert eigenvectors from Lanczos basis to standard basis
RAFT_CUBLAS_TRY(cublasgemm(cublas_h,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
nEigVecs,
*effIter,
&one,
lanczosVecs_dev,
n,
work_dev,
*effIter,
&zero,
eigVecs_dev,
n,
stream));
// Clean up and exit
curandDestroyGenerator(randGen);
return 0;
}
template <typename index_type_t, typename value_type_t>
int computeSmallestEigenvectors(
raft::resources const& handle,
spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const& A,
index_type_t nEigVecs,
index_type_t maxIter,
index_type_t restartIter,
value_type_t tol,
bool reorthogonalize,
index_type_t& iter,
value_type_t* __restrict__ eigVals_dev,
value_type_t* __restrict__ eigVecs_dev,
unsigned long long seed = 1234567)
{
// Matrix dimension
index_type_t n = A.nrows_;
// Check that parameters are valid
RAFT_EXPECTS(nEigVecs > 0 && nEigVecs <= n, "Invalid number of eigenvectors.");
RAFT_EXPECTS(restartIter > 0, "Invalid restartIter.");
RAFT_EXPECTS(tol > 0, "Invalid tolerance.");
RAFT_EXPECTS(maxIter >= nEigVecs, "Invalid maxIter.");
RAFT_EXPECTS(restartIter >= nEigVecs, "Invalid restartIter.");
// Allocate memory
std::vector<value_type_t> alpha_host_v(restartIter);
std::vector<value_type_t> beta_host_v(restartIter);
value_type_t* alpha_host = alpha_host_v.data();
value_type_t* beta_host = beta_host_v.data();
spectral::matrix::vector_t<value_type_t> lanczosVecs_dev(handle, n * (restartIter + 1));
spectral::matrix::vector_t<value_type_t> work_dev(handle, (n + restartIter) * restartIter);
// Perform Lanczos method
index_type_t effIter;
value_type_t shift;
int status = computeSmallestEigenvectors(handle,
&A,
nEigVecs,
maxIter,
restartIter,
tol,
reorthogonalize,
&effIter,
&iter,
&shift,
alpha_host,
beta_host,
lanczosVecs_dev.raw(),
work_dev.raw(),
eigVals_dev,
eigVecs_dev,
seed);
// Clean up and return
return status;
}
/**
* @brief Compute largest eigenvectors of symmetric matrix
* Computes eigenvalues and eigenvectors that are least
* positive. If matrix is positive definite or positive
* semidefinite, the computed eigenvalues are largest in
* magnitude.
* The largest eigenvalue is estimated by performing several
* Lanczos iterations. An implicitly restarted Lanczos method is
* then applied.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param A Matrix.
* @param nEigVecs Number of eigenvectors to compute.
* @param maxIter Maximum number of Lanczos steps.
* @param restartIter Maximum size of Lanczos system before
* performing an implicit restart. Should be at least 4.
* @param tol Convergence tolerance. Lanczos iteration will
* terminate when the residual norm is less than tol*theta, where
* theta is an estimate for the largest unwanted eigenvalue
* (i.e. the (nEigVecs+1)th largest eigenvalue).
* @param reorthogonalize Whether to reorthogonalize Lanczos
* vectors.
* @param effIter On exit, pointer to final size of Lanczos system.
* @param totalIter On exit, pointer to total number of Lanczos
* iterations performed.
* @param alpha_host (Output, host memory, restartIter entries)
* Diagonal entries of Lanczos system.
* @param beta_host (Output, host memory, restartIter entries)
* Off-diagonal entries of Lanczos system.
* @param lanczosVecs_dev (Output, device memory, n*(restartIter+1)
* entries) Lanczos vectors. Vectors are stored as columns of a
* column-major matrix with dimensions n x (restartIter+1).
* @param work_dev (Output, device memory,
* (n+restartIter)*restartIter entries) Workspace.
* @param eigVals_dev (Output, device memory, nEigVecs entries)
* Largest eigenvalues of matrix.
* @param eigVecs_dev (Output, device memory, n*nEigVecs entries)
* Eigenvectors corresponding to largest eigenvalues of
* matrix. Vectors are stored as columns of a column-major matrix
* with dimensions n x nEigVecs.
* @param seed random seed.
* @return error flag.
*/
template <typename index_type_t, typename value_type_t>
int computeLargestEigenvectors(
raft::resources const& handle,
spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const* A,
index_type_t nEigVecs,
index_type_t maxIter,
index_type_t restartIter,
value_type_t tol,
bool reorthogonalize,
index_type_t* effIter,
index_type_t* totalIter,
value_type_t* __restrict__ alpha_host,
value_type_t* __restrict__ beta_host,
value_type_t* __restrict__ lanczosVecs_dev,
value_type_t* __restrict__ work_dev,
value_type_t* __restrict__ eigVals_dev,
value_type_t* __restrict__ eigVecs_dev,
unsigned long long seed)
{
// Useful constants
constexpr value_type_t one = 1;
constexpr value_type_t zero = 0;
// Matrix dimension
index_type_t n = A->nrows_;
// Lanczos iteration counters
index_type_t maxIter_curr = restartIter; // Maximum size of Lanczos system
// Status flags
int status;
// Loop index
index_type_t i;
// Host memory
value_type_t* Z_host; // Eigenvectors in Lanczos basis
value_type_t* work_host; // Workspace
// -------------------------------------------------------
// Check that LAPACK is enabled
// -------------------------------------------------------
// Lapack<value_type_t>::check_lapack_enabled();
// -------------------------------------------------------
// Check that parameters are valid
// -------------------------------------------------------
RAFT_EXPECTS(nEigVecs > 0 && nEigVecs <= n, "Invalid number of eigenvectors.");
RAFT_EXPECTS(restartIter > 0, "Invalid restartIter.");
RAFT_EXPECTS(tol > 0, "Invalid tolerance.");
RAFT_EXPECTS(maxIter >= nEigVecs, "Invalid maxIter.");
RAFT_EXPECTS(restartIter >= nEigVecs, "Invalid restartIter.");
auto cublas_h = resource::get_cublas_handle(handle);
auto stream = resource::get_cuda_stream(handle);
// -------------------------------------------------------
// Variable initialization
// -------------------------------------------------------
// Total number of Lanczos iterations
*totalIter = 0;
// Allocate host memory
std::vector<value_type_t> Z_host_v(restartIter * restartIter);
std::vector<value_type_t> work_host_v(4 * restartIter);
Z_host = Z_host_v.data();
work_host = work_host_v.data();
// Initialize cuBLAS
RAFT_CUBLAS_TRY(cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// -------------------------------------------------------
// Compute largest eigenvalue
// -------------------------------------------------------
// Random number generator
curandGenerator_t randGen;
// Initialize random number generator
curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_PHILOX4_32_10);
curandSetPseudoRandomGeneratorSeed(randGen, seed);
// Initialize initial Lanczos vector
curandGenerateNormalX(randGen, lanczosVecs_dev, n + n % 2, zero, one);
value_type_t normQ1;
RAFT_CUBLAS_TRY(cublasnrm2(cublas_h, n, lanczosVecs_dev, 1, &normQ1, stream));
auto h_val = 1 / normQ1;
RAFT_CUBLAS_TRY(cublasscal(cublas_h, n, &h_val, lanczosVecs_dev, 1, stream));
// Obtain tridiagonal matrix with Lanczos
*effIter = 0;
value_type_t shift_val = 0.0;
value_type_t* shift = &shift_val;
status = performLanczosIteration<index_type_t, value_type_t>(handle,
A,
effIter,
maxIter_curr,
*shift,
0,
reorthogonalize,
alpha_host,
beta_host,
lanczosVecs_dev,
work_dev);
if (status) WARNING("error in Lanczos iteration");
*totalIter += *effIter;
// Apply Lanczos method until convergence
value_type_t shiftLower = 1;
value_type_t shiftUpper = -1;
while (*totalIter < maxIter && beta_host[*effIter - 1] > tol * shiftLower) {
// Determine number of restart steps
// Number of steps must be even due to Francis algorithm
index_type_t iter_new = nEigVecs + 1;
if (restartIter - (maxIter - *totalIter) > nEigVecs + 1)
iter_new = restartIter - (maxIter - *totalIter);
if ((restartIter - iter_new) % 2) iter_new -= 1;
if (iter_new == *effIter) break;
// Implicit restart of Lanczos method
status = lanczosRestart<index_type_t, value_type_t>(handle,
n,
*effIter,
iter_new,
&shiftUpper,
&shiftLower,
alpha_host,
beta_host,
Z_host,
work_host,
lanczosVecs_dev,
work_dev,
false);
if (status) WARNING("error in Lanczos implicit restart");
*effIter = iter_new;
// Check for convergence
if (beta_host[*effIter - 1] <= tol * fabs(shiftLower)) break;
// Proceed with Lanczos method
status = performLanczosIteration<index_type_t, value_type_t>(handle,
A,
effIter,
maxIter_curr,
*shift,
tol * fabs(shiftLower),
reorthogonalize,
alpha_host,
beta_host,
lanczosVecs_dev,
work_dev);
if (status) WARNING("error in Lanczos iteration");
*totalIter += *effIter - iter_new;
}
// Warning if Lanczos has failed to converge
if (beta_host[*effIter - 1] > tol * fabs(shiftLower)) {
WARNING("implicitly restarted Lanczos failed to converge");
}
for (int i = 0; i < restartIter; ++i) {
for (int j = 0; j < restartIter; ++j)
Z_host[i * restartIter + j] = 0;
}
// Solve tridiagonal system
memcpy(work_host + 2 * (*effIter), alpha_host, (*effIter) * sizeof(value_type_t));
memcpy(work_host + 3 * (*effIter), beta_host, (*effIter - 1) * sizeof(value_type_t));
Lapack<value_type_t>::steqr('I',
*effIter,
work_host + 2 * (*effIter),
work_host + 3 * (*effIter),
Z_host,
*effIter,
work_host);
// note: We need to pick the top nEigVecs eigenvalues
// but effItter can be larger than nEigVecs
// hence we add an offset for that case, because we want to access top nEigVecs eigenpairs in the
// matrix of size effIter. remember the array is sorted, so it is not needed for smallest
// eigenvalues case because the first ones are the smallest ones
index_type_t top_eigenparis_idx_offset = *effIter - nEigVecs;
// Debug : print nEigVecs largest eigenvalues
// for (int i = top_eigenparis_idx_offset; i < *effIter; ++i)
// std::cout <<*(work_host+(2*(*effIter)+i))<< " ";
// std::cout <<std::endl;
// Debug : print nEigVecs largest eigenvectors
// for (int i = top_eigenparis_idx_offset; i < *effIter; ++i)
//{
// for (int j = 0; j < *effIter; ++j)
// std::cout <<Z_host[i*(*effIter)+j]<< " ";
// std::cout <<std::endl;
//}
// Obtain desired eigenvalues by applying shift
for (i = 0; i < *effIter; ++i)
work_host[i + 2 * (*effIter)] -= *shift;
for (i = 0; i < top_eigenparis_idx_offset; ++i)
work_host[i + 2 * (*effIter)] = 0;
// Copy results to device memory
// skip smallest eigenvalue if needed
RAFT_CUDA_TRY(cudaMemcpyAsync(eigVals_dev,
work_host + 2 * (*effIter) + top_eigenparis_idx_offset,
nEigVecs * sizeof(value_type_t),
cudaMemcpyHostToDevice,
stream));
// skip smallest eigenvector if needed
RAFT_CUDA_TRY(cudaMemcpyAsync(work_dev,
Z_host + (top_eigenparis_idx_offset * (*effIter)),
(*effIter) * nEigVecs * sizeof(value_type_t),
cudaMemcpyHostToDevice,
stream));
RAFT_CHECK_CUDA(stream);
// Convert eigenvectors from Lanczos basis to standard basis
RAFT_CUBLAS_TRY(cublasgemm(cublas_h,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
nEigVecs,
*effIter,
&one,
lanczosVecs_dev,
n,
work_dev,
*effIter,
&zero,
eigVecs_dev,
n,
stream));
// Clean up and exit
curandDestroyGenerator(randGen);
return 0;
}
template <typename index_type_t, typename value_type_t>
int computeLargestEigenvectors(
raft::resources const& handle,
spectral::matrix::sparse_matrix_t<index_type_t, value_type_t> const& A,
index_type_t nEigVecs,
index_type_t maxIter,
index_type_t restartIter,
value_type_t tol,
bool reorthogonalize,
index_type_t& iter,
value_type_t* __restrict__ eigVals_dev,
value_type_t* __restrict__ eigVecs_dev,
unsigned long long seed = 123456)
{
// Matrix dimension
index_type_t n = A.nrows_;
// Check that parameters are valid
RAFT_EXPECTS(nEigVecs > 0 && nEigVecs <= n, "Invalid number of eigenvectors.");
RAFT_EXPECTS(restartIter > 0, "Invalid restartIter.");
RAFT_EXPECTS(tol > 0, "Invalid tolerance.");
RAFT_EXPECTS(maxIter >= nEigVecs, "Invalid maxIter.");
RAFT_EXPECTS(restartIter >= nEigVecs, "Invalid restartIter.");
// Allocate memory
std::vector<value_type_t> alpha_host_v(restartIter);
std::vector<value_type_t> beta_host_v(restartIter);
value_type_t* alpha_host = alpha_host_v.data();
value_type_t* beta_host = beta_host_v.data();
spectral::matrix::vector_t<value_type_t> lanczosVecs_dev(handle, n * (restartIter + 1));
spectral::matrix::vector_t<value_type_t> work_dev(handle, (n + restartIter) * restartIter);
// Perform Lanczos method
index_type_t effIter;
int status = computeLargestEigenvectors(handle,
&A,
nEigVecs,
maxIter,
restartIter,
tol,
reorthogonalize,
&effIter,
&iter,
alpha_host,
beta_host,
lanczosVecs_dev.raw(),
work_dev.raw(),
eigVals_dev,
eigVecs_dev,
seed);
// Clean up and return
return status;
}
} // namespace detail
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/axpy.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cublas_v2.h>
#include <raft/core/resource/cublas_handle.hpp>
#include "cublas_wrappers.hpp"
#include <raft/core/resources.hpp>
namespace raft::linalg::detail {
template <typename T, bool DevicePointerMode = false>
void axpy(raft::resources const& handle,
const int n,
const T* alpha,
const T* x,
const int incx,
T* y,
const int incy,
cudaStream_t stream)
{
auto cublas_h = resource::get_cublas_handle(handle);
cublas_device_pointer_mode<DevicePointerMode> pmode(cublas_h);
RAFT_CUBLAS_TRY(cublasaxpy(cublas_h, n, alpha, x, incx, y, incy, stream));
}
} // namespace raft::linalg::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/subtract.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp>
#include <raft/linalg/binary_op.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename InT, typename OutT = InT, typename IdxType = int>
void subtractScalar(OutT* out, const InT* in, InT scalar, IdxType len, cudaStream_t stream)
{
raft::linalg::unaryOp(out, in, len, raft::sub_const_op<InT>(scalar), stream);
}
template <typename InT, typename OutT = InT, typename IdxType = int>
void subtract(OutT* out, const InT* in1, const InT* in2, IdxType len, cudaStream_t stream)
{
raft::linalg::binaryOp(out, in1, in2, len, raft::sub_op(), stream);
}
template <class math_t, typename IdxType>
RAFT_KERNEL subtract_dev_scalar_kernel(math_t* outDev,
const math_t* inDev,
const math_t* singleScalarDev,
IdxType len)
{
// TODO: kernel do not use shared memory in current implementation
int i = ((IdxType)blockIdx.x * (IdxType)blockDim.x) + threadIdx.x;
if (i < len) { outDev[i] = inDev[i] - *singleScalarDev; }
}
template <typename math_t, typename IdxType = int, int TPB = 256>
void subtractDevScalar(math_t* outDev,
const math_t* inDev,
const math_t* singleScalarDev,
IdxType len,
cudaStream_t stream)
{
// Just for the note - there is no way to express such operation with cuBLAS in effective way
// https://stackoverflow.com/questions/14051064/add-scalar-to-vector-in-blas-cublas-cuda
const IdxType nblks = raft::ceildiv(len, (IdxType)TPB);
subtract_dev_scalar_kernel<math_t>
<<<nblks, TPB, 0, stream>>>(outDev, inDev, singleScalarDev, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/reduce.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp>
#include <raft/linalg/coalesced_reduction.cuh>
#include <raft/linalg/strided_reduction.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void reduce(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
bool rowMajor,
bool alongRows,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
if (rowMajor && alongRows) {
raft::linalg::coalescedReduction<InType, OutType, IdxType>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
} else if (rowMajor && !alongRows) {
raft::linalg::stridedReduction<InType, OutType, IdxType>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
} else if (!rowMajor && alongRows) {
raft::linalg::stridedReduction<InType, OutType, IdxType>(
dots, data, N, D, init, stream, inplace, main_op, reduce_op, final_op);
} else {
raft::linalg::coalescedReduction<InType, OutType, IdxType>(
dots, data, N, D, init, stream, inplace, main_op, reduce_op, final_op);
}
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/eig.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "cusolver_wrappers.hpp"
#include <cuda_runtime_api.h>
#include <raft/core/resource/cusolver_dn_handle.hpp>
#include <raft/core/resources.hpp>
#include <raft/matrix/copy.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace linalg {
namespace detail {
template <typename math_t>
void eigDC_legacy(raft::resources const& handle,
const math_t* in,
std::size_t n_rows,
std::size_t n_cols,
math_t* eig_vectors,
math_t* eig_vals,
cudaStream_t stream)
{
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
int lwork;
RAFT_CUSOLVER_TRY(cusolverDnsyevd_bufferSize(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
CUBLAS_FILL_MODE_UPPER,
n_rows,
in,
n_cols,
eig_vals,
&lwork));
rmm::device_uvector<math_t> d_work(lwork, stream);
rmm::device_scalar<int> d_dev_info(stream);
raft::matrix::copy(handle,
make_device_matrix_view<const math_t>(in, n_rows, n_cols),
make_device_matrix_view<math_t>(eig_vectors, n_rows, n_cols));
RAFT_CUSOLVER_TRY(cusolverDnsyevd(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
CUBLAS_FILL_MODE_UPPER,
n_rows,
eig_vectors,
n_cols,
eig_vals,
d_work.data(),
lwork,
d_dev_info.data(),
stream));
RAFT_CUDA_TRY(cudaGetLastError());
auto dev_info = d_dev_info.value(stream);
ASSERT(dev_info == 0,
"eig.cuh: eigensolver couldn't converge to a solution. "
"This usually occurs when some of the features do not vary enough.");
}
template <typename math_t>
void eigDC(raft::resources const& handle,
const math_t* in,
std::size_t n_rows,
std::size_t n_cols,
math_t* eig_vectors,
math_t* eig_vals,
cudaStream_t stream)
{
#if CUDART_VERSION < 11010
eigDC_legacy(handle, in, n_rows, n_cols, eig_vectors, eig_vals, stream);
#else
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
cusolverDnParams_t dn_params = nullptr;
RAFT_CUSOLVER_TRY(cusolverDnCreateParams(&dn_params));
size_t workspaceDevice = 0;
size_t workspaceHost = 0;
RAFT_CUSOLVER_TRY(cusolverDnxsyevd_bufferSize(cusolverH,
dn_params,
CUSOLVER_EIG_MODE_VECTOR,
CUBLAS_FILL_MODE_UPPER,
static_cast<int64_t>(n_rows),
eig_vectors,
static_cast<int64_t>(n_cols),
eig_vals,
&workspaceDevice,
&workspaceHost,
stream));
rmm::device_uvector<math_t> d_work(workspaceDevice / sizeof(math_t), stream);
rmm::device_scalar<int> d_dev_info(stream);
std::vector<math_t> h_work(workspaceHost / sizeof(math_t));
raft::matrix::copy(handle,
make_device_matrix_view<const math_t>(in, n_rows, n_cols),
make_device_matrix_view<math_t>(eig_vectors, n_rows, n_cols));
RAFT_CUSOLVER_TRY(cusolverDnxsyevd(cusolverH,
dn_params,
CUSOLVER_EIG_MODE_VECTOR,
CUBLAS_FILL_MODE_UPPER,
static_cast<int64_t>(n_rows),
eig_vectors,
static_cast<int64_t>(n_cols),
eig_vals,
d_work.data(),
workspaceDevice,
h_work.data(),
workspaceHost,
d_dev_info.data(),
stream));
RAFT_CUDA_TRY(cudaGetLastError());
RAFT_CUSOLVER_TRY(cusolverDnDestroyParams(dn_params));
int dev_info = d_dev_info.value(stream);
ASSERT(dev_info == 0,
"eig.cuh: eigensolver couldn't converge to a solution. "
"This usually occurs when some of the features do not vary enough.");
#endif
}
enum EigVecMemUsage { OVERWRITE_INPUT, COPY_INPUT };
template <typename math_t>
void eigSelDC(raft::resources const& handle,
math_t* in,
std::size_t n_rows,
std::size_t n_cols,
std::size_t n_eig_vals,
math_t* eig_vectors,
math_t* eig_vals,
EigVecMemUsage memUsage,
cudaStream_t stream)
{
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
int lwork;
int h_meig;
RAFT_CUSOLVER_TRY(cusolverDnsyevdx_bufferSize(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
CUSOLVER_EIG_RANGE_I,
CUBLAS_FILL_MODE_UPPER,
static_cast<int64_t>(n_rows),
in,
static_cast<int64_t>(n_cols),
math_t(0.0),
math_t(0.0),
static_cast<int64_t>(n_cols - n_eig_vals + 1),
static_cast<int64_t>(n_cols),
&h_meig,
eig_vals,
&lwork));
rmm::device_uvector<math_t> d_work(lwork, stream);
rmm::device_scalar<int> d_dev_info(stream);
rmm::device_uvector<math_t> d_eig_vectors(0, stream);
if (memUsage == OVERWRITE_INPUT) {
RAFT_CUSOLVER_TRY(cusolverDnsyevdx(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
CUSOLVER_EIG_RANGE_I,
CUBLAS_FILL_MODE_UPPER,
static_cast<int64_t>(n_rows),
in,
static_cast<int64_t>(n_cols),
math_t(0.0),
math_t(0.0),
static_cast<int64_t>(n_cols - n_eig_vals + 1),
static_cast<int64_t>(n_cols),
&h_meig,
eig_vals,
d_work.data(),
lwork,
d_dev_info.data(),
stream));
} else if (memUsage == COPY_INPUT) {
d_eig_vectors.resize(n_rows * n_cols, stream);
raft::matrix::copy(handle,
make_device_matrix_view<const math_t>(in, n_rows, n_cols),
make_device_matrix_view(eig_vectors, n_rows, n_cols));
RAFT_CUSOLVER_TRY(cusolverDnsyevdx(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
CUSOLVER_EIG_RANGE_I,
CUBLAS_FILL_MODE_UPPER,
static_cast<int64_t>(n_rows),
eig_vectors,
static_cast<int64_t>(n_cols),
math_t(0.0),
math_t(0.0),
static_cast<int64_t>(n_cols - n_eig_vals + 1),
static_cast<int64_t>(n_cols),
&h_meig,
eig_vals,
d_work.data(),
lwork,
d_dev_info.data(),
stream));
}
RAFT_CUDA_TRY(cudaGetLastError());
int dev_info = d_dev_info.value(stream);
ASSERT(dev_info == 0,
"eig.cuh: eigensolver couldn't converge to a solution. "
"This usually occurs when some of the features do not vary enough.");
if (memUsage == OVERWRITE_INPUT) {
raft::matrix::trunc_zero_origin(
handle,
make_device_matrix_view<const math_t, size_t, col_major>(in, n_rows, n_eig_vals),
make_device_matrix_view<math_t, size_t, col_major>(eig_vectors, n_rows, n_eig_vals));
} else if (memUsage == COPY_INPUT) {
raft::matrix::trunc_zero_origin(
handle,
make_device_matrix_view<const math_t, size_t, col_major>(
d_eig_vectors.data(), n_rows, n_eig_vals),
make_device_matrix_view<math_t, size_t, col_major>(eig_vectors, n_rows, n_eig_vals));
}
}
template <typename math_t>
void eigJacobi(raft::resources const& handle,
const math_t* in,
std::size_t n_rows,
std::size_t n_cols,
math_t* eig_vectors,
math_t* eig_vals,
cudaStream_t stream,
math_t tol = 1.e-7,
int sweeps = 15)
{
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
syevjInfo_t syevj_params = nullptr;
RAFT_CUSOLVER_TRY(cusolverDnCreateSyevjInfo(&syevj_params));
RAFT_CUSOLVER_TRY(cusolverDnXsyevjSetTolerance(syevj_params, tol));
RAFT_CUSOLVER_TRY(cusolverDnXsyevjSetMaxSweeps(syevj_params, sweeps));
int lwork;
RAFT_CUSOLVER_TRY(cusolverDnsyevj_bufferSize(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
CUBLAS_FILL_MODE_UPPER,
static_cast<int64_t>(n_rows),
eig_vectors,
static_cast<int64_t>(n_cols),
eig_vals,
&lwork,
syevj_params));
rmm::device_uvector<math_t> d_work(lwork, stream);
rmm::device_scalar<int> dev_info(stream);
raft::matrix::copy(handle,
make_device_matrix_view<const math_t>(in, n_rows, n_cols),
make_device_matrix_view(eig_vectors, n_rows, n_cols));
RAFT_CUSOLVER_TRY(cusolverDnsyevj(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
CUBLAS_FILL_MODE_UPPER,
static_cast<int64_t>(n_rows),
eig_vectors,
static_cast<int64_t>(n_cols),
eig_vals,
d_work.data(),
lwork,
dev_info.data(),
syevj_params,
stream));
int executed_sweeps;
RAFT_CUSOLVER_TRY(cusolverDnXsyevjGetSweeps(cusolverH, syevj_params, &executed_sweeps));
RAFT_CUDA_TRY(cudaGetLastError());
RAFT_CUSOLVER_TRY(cusolverDnDestroySyevjInfo(syevj_params));
}
} // namespace detail
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/reduce_cols_by_key.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <limits>
#include <raft/util/cuda_utils.cuh>
#include <stdlib.h>
namespace raft {
namespace linalg {
namespace detail {
///@todo: support col-major
///@todo: specialize this to support shared-mem based atomics
template <typename T, typename KeyIteratorT, typename IdxType>
RAFT_KERNEL reduce_cols_by_key_direct_kernel(
const T* data, const KeyIteratorT keys, T* out, IdxType nrows, IdxType ncols, IdxType nkeys)
{
typedef typename std::iterator_traits<KeyIteratorT>::value_type KeyType;
IdxType idx = static_cast<IdxType>(blockIdx.x) * blockDim.x + threadIdx.x;
if (idx >= (nrows * ncols)) return;
///@todo: yikes! use fast-int-div
IdxType colId = idx % ncols;
IdxType rowId = idx / ncols;
KeyType key = keys[colId];
raft::myAtomicAdd(out + rowId * nkeys + key, data[idx]);
}
template <typename T, typename KeyIteratorT, typename IdxType>
RAFT_KERNEL reduce_cols_by_key_cached_kernel(
const T* data, const KeyIteratorT keys, T* out, IdxType nrows, IdxType ncols, IdxType nkeys)
{
typedef typename std::iterator_traits<KeyIteratorT>::value_type KeyType;
extern __shared__ char smem[];
T* out_cache = reinterpret_cast<T*>(smem);
// Initialize the shared memory accumulators to 0.
for (IdxType idx = threadIdx.x; idx < nrows * nkeys; idx += blockDim.x) {
out_cache[idx] = T{0};
}
__syncthreads();
// Accumulate in shared memory
for (IdxType idx = static_cast<IdxType>(blockIdx.x) * blockDim.x + threadIdx.x;
idx < nrows * ncols;
idx += blockDim.x * static_cast<IdxType>(gridDim.x)) {
IdxType colId = idx % ncols;
IdxType rowId = idx / ncols;
KeyType key = keys[colId];
raft::myAtomicAdd(out_cache + rowId * nkeys + key, data[idx]);
}
// Add the shared-memory accumulators to the global results.
__syncthreads();
for (IdxType idx = threadIdx.x; idx < nrows * nkeys; idx += blockDim.x) {
T val = out_cache[idx];
if (val != T{0}) { raft::myAtomicAdd(out + idx, val); }
}
}
/**
* @brief Computes the sum-reduction of matrix columns for each given key
* @tparam T the input data type (as well as the output reduced matrix)
* @tparam KeyType data type of the keys
* @tparam IdxType indexing arithmetic type
* @param data the input data (dim = nrows x ncols). This is assumed to be in
* row-major layout
* @param keys keys array (len = ncols). It is assumed that each key in this
* array is between [0, nkeys). In case this is not true, the caller is expected
* to have called make_monotonic primitive to prepare such a contiguous and
* monotonically increasing keys array.
* @param out the output reduced matrix along columns (dim = nrows x nkeys).
* This will be assumed to be in row-major layout
* @param nrows number of rows in the input data
* @param ncols number of columns in the input data
* @param nkeys number of unique keys in the keys array
* @param stream cuda stream to launch the kernel onto
* @param reset_sums Whether to reset the output sums to zero before reducing
*/
template <typename T, typename KeyIteratorT, typename IdxType = int>
void reduce_cols_by_key(const T* data,
const KeyIteratorT keys,
T* out,
IdxType nrows,
IdxType ncols,
IdxType nkeys,
cudaStream_t stream,
bool reset_sums)
{
typedef typename std::iterator_traits<KeyIteratorT>::value_type KeyType;
RAFT_EXPECTS(static_cast<size_t>(nrows) * static_cast<size_t>(ncols) <=
static_cast<size_t>(std::numeric_limits<IdxType>::max()),
"Index type too small to represent indices in the input array.");
RAFT_EXPECTS(static_cast<size_t>(nrows) * static_cast<size_t>(nkeys) <=
static_cast<size_t>(std::numeric_limits<IdxType>::max()),
"Index type too small to represent indices in the output array.");
// Memset the output to zero to use atomics-based reduction.
if (reset_sums) { RAFT_CUDA_TRY(cudaMemsetAsync(out, 0, sizeof(T) * nrows * nkeys, stream)); }
// The cached version is used when the cache fits in shared memory and the number of input
// elements is above a threshold (the cached version is slightly slower for small input arrays,
// and orders of magnitude faster for large input arrays).
size_t cache_size = static_cast<size_t>(nrows * nkeys) * sizeof(T);
if (cache_size <= 49152ull && nrows * ncols >= IdxType{8192}) {
constexpr int TPB = 256;
int n_sm = raft::getMultiProcessorCount();
int target_nblks = 4 * n_sm;
int max_nblks = raft::ceildiv<IdxType>(nrows * ncols, TPB);
int nblks = std::min(target_nblks, max_nblks);
reduce_cols_by_key_cached_kernel<<<nblks, TPB, cache_size, stream>>>(
data, keys, out, nrows, ncols, nkeys);
} else {
constexpr int TPB = 256;
int nblks = raft::ceildiv<IdxType>(nrows * ncols, TPB);
reduce_cols_by_key_direct_kernel<<<nblks, TPB, 0, stream>>>(
data, keys, out, nrows, ncols, nkeys);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/eltwise.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp>
#include <raft/linalg/binary_op.cuh>
#include <raft/linalg/unary_op.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename InType, typename IdxType, typename OutType = InType>
void scalarAdd(OutType* out, const InType* in, InType scalar, IdxType len, cudaStream_t stream)
{
raft::linalg::unaryOp(out, in, len, raft::add_const_op<InType>(scalar), stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void scalarMultiply(OutType* out, const InType* in, InType scalar, IdxType len, cudaStream_t stream)
{
raft::linalg::unaryOp(out, in, len, raft::mul_const_op<InType>(scalar), stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void eltwiseAdd(
OutType* out, const InType* in1, const InType* in2, IdxType len, cudaStream_t stream)
{
raft::linalg::binaryOp(out, in1, in2, len, raft::add_op(), stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void eltwiseSub(
OutType* out, const InType* in1, const InType* in2, IdxType len, cudaStream_t stream)
{
raft::linalg::binaryOp(out, in1, in2, len, raft::sub_op(), stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void eltwiseMultiply(
OutType* out, const InType* in1, const InType* in2, IdxType len, cudaStream_t stream)
{
raft::linalg::binaryOp(out, in1, in2, len, raft::mul_op(), stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void eltwiseDivide(
OutType* out, const InType* in1, const InType* in2, IdxType len, cudaStream_t stream)
{
raft::linalg::binaryOp(out, in1, in2, len, raft::div_op(), stream);
}
template <typename InType, typename IdxType, typename OutType = InType>
void eltwiseDivideCheckZero(
OutType* out, const InType* in1, const InType* in2, IdxType len, cudaStream_t stream)
{
raft::linalg::binaryOp(out, in1, in2, len, raft::div_checkzero_op(), stream);
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/matrix_vector_op.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/linewise_op.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename MatT, typename Lambda, typename VecT, typename IdxType = int, int TPB = 256>
void matrixVectorOp(MatT* out,
const MatT* matrix,
const VecT* vec,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
Lambda op,
cudaStream_t stream)
{
raft::resources handle;
resource::set_cuda_stream(handle, stream);
bool along_lines = rowMajor == bcastAlongRows;
if (rowMajor) {
matrix::linewise_op<MatT, IdxType, row_major, Lambda>(
handle,
make_device_matrix_view<const MatT, IdxType, row_major>(matrix, N, D),
make_device_matrix_view<MatT, IdxType, row_major>(out, N, D),
along_lines,
op,
make_device_vector_view<const VecT, IdxType>(vec, bcastAlongRows ? N : D));
} else {
matrix::linewise_op<MatT, IdxType, col_major, Lambda>(
handle,
make_device_matrix_view<const MatT, IdxType, col_major>(matrix, N, D),
make_device_matrix_view<MatT, IdxType, col_major>(out, N, D),
along_lines,
op,
make_device_vector_view<const VecT, IdxType>(vec, bcastAlongRows ? N : D));
}
}
template <typename MatT,
typename Lambda,
typename Vec1T,
typename Vec2T,
typename IdxType = int,
int TPB = 256>
void matrixVectorOp(MatT* out,
const MatT* matrix,
const Vec1T* vec1,
const Vec2T* vec2,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
Lambda op,
cudaStream_t stream)
{
raft::resources handle;
resource::set_cuda_stream(handle, stream);
bool along_lines = rowMajor == bcastAlongRows;
if (rowMajor) {
matrix::linewise_op<MatT, IdxType, row_major, Lambda>(
handle,
make_device_matrix_view<const MatT, IdxType, row_major>(matrix, N, D),
make_device_matrix_view<MatT, IdxType, row_major>(out, N, D),
along_lines,
op,
make_device_vector_view<const Vec1T, IdxType>(vec1, bcastAlongRows ? N : D),
make_device_vector_view<const Vec2T, IdxType>(vec2, bcastAlongRows ? N : D));
} else {
matrix::linewise_op<MatT, IdxType, col_major, Lambda>(
handle,
make_device_matrix_view<const MatT, IdxType, col_major>(matrix, N, D),
make_device_matrix_view<MatT, IdxType, col_major>(out, N, D),
along_lines,
op,
make_device_vector_view<const Vec1T, IdxType>(vec1, bcastAlongRows ? N : D),
make_device_vector_view<const Vec2T, IdxType>(vec2, bcastAlongRows ? N : D));
}
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/map.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/input_validation.hpp>
#include <raft/util/integer_utils.hpp>
#include <raft/util/pow2_utils.cuh>
#include <raft/util/vectorized.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/tuple.h>
namespace raft::linalg::detail {
template <bool PassOffset, typename OutT, typename IdxT, typename Func, typename... InTs>
__device__ __forceinline__ auto map_apply(Func f, const IdxT& offset, const InTs&... ins) -> OutT
{
if constexpr (PassOffset) {
return f(offset, ins...);
} else {
return f(ins...);
}
}
template <int R,
bool PassOffset,
typename OutT,
typename IdxT,
typename Func,
typename... InTs,
size_t... Is>
__device__ __forceinline__ void map_kernel_mainloop(
OutT* out_ptr, IdxT offset, IdxT len, Func f, const InTs*... in_ptrs, std::index_sequence<Is...>)
{
TxN_t<OutT, R> wide;
thrust::tuple<TxN_t<InTs, R>...> wide_args;
if (offset + R <= len) {
(thrust::get<Is>(wide_args).load(in_ptrs, offset), ...);
#pragma unroll
for (int j = 0; j < R; ++j) {
wide.val.data[j] = map_apply<PassOffset, OutT, IdxT, Func, InTs...>(
f, offset + j, thrust::get<Is>(wide_args).val.data[j]...);
}
wide.store(out_ptr, offset);
}
}
template <int R, bool PassOffset, typename OutT, typename IdxT, typename Func, typename... InTs>
RAFT_KERNEL map_kernel(OutT* out_ptr, IdxT len, Func f, const InTs*... in_ptrs)
{
const IdxT tid = blockIdx.x * blockDim.x + threadIdx.x;
if constexpr (R <= 1) {
if (tid < len) {
out_ptr[tid] = map_apply<PassOffset, OutT, IdxT, Func, InTs...>(f, tid, in_ptrs[tid]...);
}
} else {
using align_bytes = Pow2<sizeof(OutT) * size_t(R)>;
using align_elems = Pow2<R>;
// how many elements to skip in order to do aligned vectorized store
const IdxT skip_cnt_left = std::min<IdxT>(IdxT(align_bytes::roundUp(out_ptr) - out_ptr), len);
// The main loop: process all aligned data
map_kernel_mainloop<R, PassOffset, OutT, IdxT, Func, InTs...>(
out_ptr, tid * R + skip_cnt_left, len, f, in_ptrs..., std::index_sequence_for<InTs...>());
static_assert(WarpSize >= R);
// Processes the skipped elements on the left
if (tid < skip_cnt_left) {
out_ptr[tid] = map_apply<PassOffset, OutT, IdxT, Func, InTs...>(f, tid, in_ptrs[tid]...);
}
// Processes the skipped elements on the right
const IdxT skip_cnt_right = align_elems::mod(len - skip_cnt_left);
const IdxT remain_i = len - skip_cnt_right + tid;
if (remain_i < len) {
out_ptr[remain_i] =
map_apply<PassOffset, OutT, IdxT, Func, InTs...>(f, remain_i, in_ptrs[remain_i]...);
}
}
}
template <int R, bool PassOffset, typename OutT, typename IdxT, typename Func, typename... InTs>
void map_call(rmm::cuda_stream_view stream, OutT* out_ptr, IdxT len, Func f, const InTs*... in_ptrs)
{
const IdxT len_vectorized = raft::div_rounding_up_safe<IdxT>(len, R);
const int threads =
std::max<int>(WarpSize, std::min<IdxT>(raft::bound_by_power_of_two<IdxT>(len_vectorized), 256));
const IdxT blocks = raft::div_rounding_up_unsafe<IdxT>(len_vectorized, threads);
map_kernel<R, PassOffset><<<blocks, threads, 0, stream>>>(out_ptr, len, f, in_ptrs...);
}
constexpr int kCoalescedVectorSize = 16;
constexpr int kSmallInputThreshold = 1024;
struct ratio_selector {
int ratio;
int align;
constexpr inline ratio_selector(int r, int a) : ratio(r), align(a) {}
template <typename T>
constexpr static auto ignoring_alignment() -> ratio_selector
{
constexpr bool T_evenly_fits_in_cache_line = (kCoalescedVectorSize % sizeof(T)) == 0;
if constexpr (T_evenly_fits_in_cache_line) {
return ratio_selector{size_t(kCoalescedVectorSize / sizeof(T)), 0};
} else {
return ratio_selector{1, 0};
}
}
template <typename T>
explicit ratio_selector(const T* ptr)
{
constexpr auto s = ignoring_alignment<T>(); // NOLINT
if constexpr (s.ratio == 1) {
align = 0;
} else {
align = int(Pow2<sizeof(T) * s.ratio>::roundUp(ptr) - ptr);
}
ratio = int(s.ratio);
}
};
constexpr inline auto operator*(const ratio_selector& a, const ratio_selector& b) -> ratio_selector
{
auto ratio = std::min<int>(a.ratio, b.ratio);
while ((a.align % ratio) != (b.align % ratio)) {
ratio >>= 1;
}
return ratio_selector{ratio, a.align % ratio};
}
template <int R, bool PassOffset, typename OutT, typename IdxT, typename Func, typename... InTs>
void map_call_rt(
int r, rmm::cuda_stream_view stream, OutT* out_ptr, IdxT len, Func f, const InTs*... in_ptrs)
{
if (r >= R) { return map_call<R, PassOffset>(stream, out_ptr, len, f, in_ptrs...); }
if constexpr (R > 1) {
return map_call_rt<(R >> 1), PassOffset>(r, stream, out_ptr, len, f, in_ptrs...);
}
}
template <bool PassOffset, typename OutT, typename IdxT, typename Func, typename... InTs>
void map(rmm::cuda_stream_view stream, OutT* out_ptr, IdxT len, Func f, const InTs*... in_ptrs)
{
// don't vectorize on small inputs
if (len <= kSmallInputThreshold) {
return map_call<1, PassOffset>(stream, out_ptr, len, f, in_ptrs...);
}
constexpr int kRatio =
(ratio_selector::ignoring_alignment<OutT>() * ... * ratio_selector::ignoring_alignment<InTs>())
.ratio;
static_assert(kRatio > 0, "Unexpected zero vector size.");
const int ratio = (ratio_selector(out_ptr) * ... * ratio_selector(in_ptrs)).ratio;
return map_call_rt<kRatio, PassOffset>(ratio, stream, out_ptr, len, f, in_ptrs...);
}
template <typename OutType,
typename InType,
typename = raft::enable_if_output_device_mdspan<OutType>,
typename = raft::enable_if_input_device_mdspan<InType>>
void map_check_shape(OutType out, InType in)
{
RAFT_EXPECTS(raft::is_row_or_column_major(in) && out.size() == in.size(),
"All inputs must be contiguous and have the same size as the output");
}
/**
* @brief Map a function over a zero or more inputs and optionally a 0-based flat index
* (element offset).
*
* _Performance note_: when possible, this function loads the argument arrays and stores the output
* array using vectorized cuda load/store instructions. The size of the vectorization depends on the
* size of the largest input/output element type and on the alignment of all pointers.
*
* @tparam PassOffset whether to pass an offset as a first argument to Func
* @tparam OutType data-type of the result (device_mdspan)
* @tparam Func the device-lambda performing the actual operation
* @tparam InTypes data-types of the inputs (device_mdspan)
*
* @param[in] res raft::resources
* @param[out] out the output of the map operation (device_mdspan)
* @param[in] f device lambda of type
* ([auto offset], InTypes::value_type xs...) -> OutType::value_type
* @param[in] ins the inputs (each of the same size as the output) (device_mdspan)
*/
template <bool PassOffset,
typename OutType,
typename Func,
typename... InTypes,
typename = raft::enable_if_output_device_mdspan<OutType>,
typename = raft::enable_if_input_device_mdspan<InTypes...>>
void map(const raft::resources& res, OutType out, Func f, InTypes... ins)
{
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous");
(map_check_shape(out, ins), ...);
if (out.size() <= std::numeric_limits<std::uint32_t>::max()) {
map<PassOffset,
typename OutType::value_type,
std::uint32_t,
Func,
typename InTypes::value_type...>(resource::get_cuda_stream(res),
out.data_handle(),
uint32_t(out.size()),
f,
ins.data_handle()...);
} else {
map<PassOffset,
typename OutType::value_type,
std::uint64_t,
Func,
typename InTypes::value_type...>(resource::get_cuda_stream(res),
out.data_handle(),
uint64_t(out.size()),
f,
ins.data_handle()...);
}
}
} // namespace raft::linalg::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/strided_reduction.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <raft/core/operators.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cuda_utils.cuh>
#include <type_traits>
namespace raft {
namespace linalg {
namespace detail {
// Kernel to perform reductions along the strided dimension
// of the matrix, i.e. reduce along columns for row major or reduce along rows
// for column major layout
template <typename Type, typename MainLambda>
RAFT_KERNEL stridedSummationKernel(
Type* dots, const Type* data, int D, int N, Type init, MainLambda main_op)
{
// Thread reduction
Type thread_data = Type(init);
int colStart = blockIdx.x * blockDim.x + threadIdx.x;
if (colStart < D) {
int rowStart = blockIdx.y * blockDim.y + threadIdx.y;
int stride = blockDim.y * gridDim.y;
for (int j = rowStart; j < N; j += stride) {
int idx = colStart + j * D;
thread_data += main_op(data[idx], j);
}
}
// Block reduction
extern __shared__ char tmp[]; // One element per thread in block
Type* temp = (Type*)tmp; // Cast to desired type
int myidx = threadIdx.x + blockDim.x * threadIdx.y;
temp[myidx] = thread_data;
__syncthreads();
for (int j = blockDim.y / 2; j > 0; j /= 2) {
if (threadIdx.y < j) temp[myidx] += temp[myidx + j * blockDim.x];
__syncthreads();
}
// Grid reduction
if ((colStart < D) && (threadIdx.y == 0)) raft::myAtomicAdd(dots + colStart, temp[myidx]);
}
// Kernel to perform reductions along the strided dimension
// of the matrix, i.e. reduce along columns for row major or reduce along rows
// for column major layout
template <typename InType,
typename OutType,
typename IdxType,
typename MainLambda,
typename ReduceLambda>
RAFT_KERNEL stridedReductionKernel(OutType* dots,
const InType* data,
int D,
int N,
OutType init,
MainLambda main_op,
ReduceLambda reduce_op)
{
// Thread reduction
OutType thread_data = init;
IdxType colStart = blockIdx.x * blockDim.x + threadIdx.x;
if (colStart < D) {
IdxType rowStart = blockIdx.y * blockDim.y + threadIdx.y;
IdxType stride = blockDim.y * gridDim.y;
for (IdxType j = rowStart; j < N; j += stride) {
IdxType idx = colStart + j * D;
thread_data = reduce_op(thread_data, main_op(data[idx], j));
}
}
// Block reduction
extern __shared__ char tmp[]; // One element per thread in block
auto* temp = (OutType*)tmp; // Cast to desired type
IdxType myidx = threadIdx.x + ((IdxType)blockDim.x * (IdxType)threadIdx.y);
temp[myidx] = thread_data;
__syncthreads();
for (int j = blockDim.y / 2; j > 0; j /= 2) {
if (threadIdx.y < j) temp[myidx] = reduce_op(temp[myidx], temp[myidx + j * blockDim.x]);
__syncthreads();
}
// Grid reduction
if ((colStart < D) && (threadIdx.y == 0))
raft::myAtomicReduce(dots + colStart, temp[myidx], reduce_op);
}
template <typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void stridedReduction(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
///@todo: this extra should go away once we have eliminated the need
/// for atomics in stridedKernel (redesign for this is already underway)
if (!inplace) raft::linalg::unaryOp(dots, dots, D, raft::const_op(init), stream);
// Arbitrary numbers for now, probably need to tune
const dim3 thrds(32, 16);
IdxType elemsPerThread = raft::ceildiv(N, (IdxType)thrds.y);
elemsPerThread = (elemsPerThread > 8) ? 8 : elemsPerThread;
const dim3 nblks(raft::ceildiv(D, (IdxType)thrds.x),
raft::ceildiv(N, (IdxType)thrds.y * elemsPerThread));
const size_t shmemSize = sizeof(OutType) * thrds.x * thrds.y;
///@todo: this complication should go away once we have eliminated the need
/// for atomics in stridedKernel (redesign for this is already underway)
if constexpr (std::is_same<ReduceLambda, raft::add_op>::value &&
std::is_same<InType, OutType>::value)
stridedSummationKernel<InType>
<<<nblks, thrds, shmemSize, stream>>>(dots, data, D, N, init, main_op);
else
stridedReductionKernel<InType, OutType, IdxType>
<<<nblks, thrds, shmemSize, stream>>>(dots, data, D, N, init, main_op, reduce_op);
///@todo: this complication should go away once we have eliminated the need
/// for atomics in stridedKernel (redesign for this is already underway)
// Perform final op on output data
if (!std::is_same<FinalLambda, raft::identity_op>::value)
raft::linalg::unaryOp(dots, dots, D, final_op, stream);
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/cusolver_wrappers.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusolverDn.h>
#include <cusolverSp.h>
#include <raft/core/cusolver_macros.hpp>
#include <raft/util/cudart_utils.hpp>
#include <type_traits>
namespace raft {
namespace linalg {
namespace detail {
/**
* @defgroup Getrf cusolver getrf operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDngetrf(cusolverDnHandle_t handle,
int m, // NOLINT
int n,
T* A,
int lda,
T* Workspace,
int* devIpiv,
int* devInfo,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDngetrf(cusolverDnHandle_t handle, // NOLINT
int m,
int n,
float* A,
int lda,
float* Workspace,
int* devIpiv,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSgetrf(handle, m, n, A, lda, Workspace, devIpiv, devInfo);
}
template <>
inline cusolverStatus_t cusolverDngetrf(cusolverDnHandle_t handle, // NOLINT
int m,
int n,
double* A,
int lda,
double* Workspace,
int* devIpiv,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDgetrf(handle, m, n, A, lda, Workspace, devIpiv, devInfo);
}
template <typename T>
cusolverStatus_t cusolverDngetrf_bufferSize( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
T* A,
int lda,
int* Lwork);
template <>
inline cusolverStatus_t cusolverDngetrf_bufferSize( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
float* A,
int lda,
int* Lwork)
{
return cusolverDnSgetrf_bufferSize(handle, m, n, A, lda, Lwork);
}
template <>
inline cusolverStatus_t cusolverDngetrf_bufferSize( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
double* A,
int lda,
int* Lwork)
{
return cusolverDnDgetrf_bufferSize(handle, m, n, A, lda, Lwork);
}
/**
* @defgroup Getrs cusolver getrs operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDngetrs(cusolverDnHandle_t handle, // NOLINT
cublasOperation_t trans,
int n,
int nrhs,
const T* A,
int lda,
const int* devIpiv,
T* B,
int ldb,
int* devInfo,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDngetrs(cusolverDnHandle_t handle, // NOLINT
cublasOperation_t trans,
int n,
int nrhs,
const float* A,
int lda,
const int* devIpiv,
float* B,
int ldb,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSgetrs(handle, trans, n, nrhs, A, lda, devIpiv, B, ldb, devInfo);
}
template <>
inline cusolverStatus_t cusolverDngetrs(cusolverDnHandle_t handle, // NOLINT
cublasOperation_t trans,
int n,
int nrhs,
const double* A,
int lda,
const int* devIpiv,
double* B,
int ldb,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDgetrs(handle, trans, n, nrhs, A, lda, devIpiv, B, ldb, devInfo);
}
/** @} */
/**
* @defgroup syevd cusolver syevd operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDnsyevd_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
const T* A,
int lda,
const T* W,
int* lwork);
template <>
inline cusolverStatus_t cusolverDnsyevd_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
const float* A,
int lda,
const float* W,
int* lwork)
{
return cusolverDnSsyevd_bufferSize(handle, jobz, uplo, n, A, lda, W, lwork);
}
template <>
inline cusolverStatus_t cusolverDnsyevd_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
const double* A,
int lda,
const double* W,
int* lwork)
{
return cusolverDnDsyevd_bufferSize(handle, jobz, uplo, n, A, lda, W, lwork);
}
/** @} */
/**
* @defgroup syevj cusolver syevj operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDnsyevj(cusolverDnHandle_t handle, // NOLINT
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
T* A,
int lda,
T* W,
T* work,
int lwork,
int* info,
syevjInfo_t params,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDnsyevj( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
float* A,
int lda,
float* W,
float* work,
int lwork,
int* info,
syevjInfo_t params,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSsyevj(handle, jobz, uplo, n, A, lda, W, work, lwork, info, params);
}
template <>
inline cusolverStatus_t cusolverDnsyevj( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
double* A,
int lda,
double* W,
double* work,
int lwork,
int* info,
syevjInfo_t params,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDsyevj(handle, jobz, uplo, n, A, lda, W, work, lwork, info, params);
}
template <typename T>
cusolverStatus_t cusolverDnsyevj_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
const T* A,
int lda,
const T* W,
int* lwork,
syevjInfo_t params);
template <>
inline cusolverStatus_t cusolverDnsyevj_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
const float* A,
int lda,
const float* W,
int* lwork,
syevjInfo_t params)
{
return cusolverDnSsyevj_bufferSize(handle, jobz, uplo, n, A, lda, W, lwork, params);
}
template <>
inline cusolverStatus_t cusolverDnsyevj_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
const double* A,
int lda,
const double* W,
int* lwork,
syevjInfo_t params)
{
return cusolverDnDsyevj_bufferSize(handle, jobz, uplo, n, A, lda, W, lwork, params);
}
/** @} */
/**
* @defgroup syevd cusolver syevd operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDnsyevd(cusolverDnHandle_t handle, // NOLINT
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
T* A,
int lda,
T* W,
T* work,
int lwork,
int* devInfo,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDnsyevd(cusolverDnHandle_t handle, // NOLINT
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
float* A,
int lda,
float* W,
float* work,
int lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSsyevd(handle, jobz, uplo, n, A, lda, W, work, lwork, devInfo);
}
template <>
inline cusolverStatus_t cusolverDnsyevd(cusolverDnHandle_t handle, // NOLINT
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int n,
double* A,
int lda,
double* W,
double* work,
int lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDsyevd(handle, jobz, uplo, n, A, lda, W, work, lwork, devInfo);
}
/** @} */
/**
* @defgroup syevdx cusolver syevdx operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDnsyevdx_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cusolverEigRange_t range,
cublasFillMode_t uplo,
int n,
const T* A,
int lda,
T vl,
T vu,
int il,
int iu,
int* h_meig,
const T* W,
int* lwork);
template <>
inline cusolverStatus_t cusolverDnsyevdx_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cusolverEigRange_t range,
cublasFillMode_t uplo,
int n,
const float* A,
int lda,
float vl,
float vu,
int il,
int iu,
int* h_meig,
const float* W,
int* lwork)
{
return cusolverDnSsyevdx_bufferSize(
handle, jobz, range, uplo, n, A, lda, vl, vu, il, iu, h_meig, W, lwork);
}
template <>
inline cusolverStatus_t cusolverDnsyevdx_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cusolverEigRange_t range,
cublasFillMode_t uplo,
int n,
const double* A,
int lda,
double vl,
double vu,
int il,
int iu,
int* h_meig,
const double* W,
int* lwork)
{
return cusolverDnDsyevdx_bufferSize(
handle, jobz, range, uplo, n, A, lda, vl, vu, il, iu, h_meig, W, lwork);
}
template <typename T>
cusolverStatus_t cusolverDnsyevdx( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cusolverEigRange_t range,
cublasFillMode_t uplo,
int n,
T* A,
int lda,
T vl,
T vu,
int il,
int iu,
int* h_meig,
T* W,
T* work,
int lwork,
int* devInfo,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDnsyevdx( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cusolverEigRange_t range,
cublasFillMode_t uplo,
int n,
float* A,
int lda,
float vl,
float vu,
int il,
int iu,
int* h_meig,
float* W,
float* work,
int lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSsyevdx(
handle, jobz, range, uplo, n, A, lda, vl, vu, il, iu, h_meig, W, work, lwork, devInfo);
}
template <>
inline cusolverStatus_t cusolverDnsyevdx( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
cusolverEigRange_t range,
cublasFillMode_t uplo,
int n,
double* A,
int lda,
double vl,
double vu,
int il,
int iu,
int* h_meig,
double* W,
double* work,
int lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDsyevdx(
handle, jobz, range, uplo, n, A, lda, vl, vu, il, iu, h_meig, W, work, lwork, devInfo);
}
/** @} */
/**
* @defgroup svd cusolver svd operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDngesvd_bufferSize( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
int* lwork)
{
if (std::is_same<std::decay_t<T>, float>::value) {
return cusolverDnSgesvd_bufferSize(handle, m, n, lwork);
} else {
return cusolverDnDgesvd_bufferSize(handle, m, n, lwork);
}
}
template <typename T>
cusolverStatus_t cusolverDngesvd( // NOLINT
cusolverDnHandle_t handle,
signed char jobu,
signed char jobvt,
int m,
int n,
T* A,
int lda,
T* S,
T* U,
int ldu,
T* VT,
int ldvt,
T* work,
int lwork,
T* rwork,
int* devInfo,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDngesvd( // NOLINT
cusolverDnHandle_t handle,
signed char jobu,
signed char jobvt,
int m,
int n,
float* A,
int lda,
float* S,
float* U,
int ldu,
float* VT,
int ldvt,
float* work,
int lwork,
float* rwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSgesvd(
handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo);
}
template <>
inline cusolverStatus_t cusolverDngesvd( // NOLINT
cusolverDnHandle_t handle,
signed char jobu,
signed char jobvt,
int m,
int n,
double* A,
int lda,
double* S,
double* U,
int ldu,
double* VT,
int ldvt,
double* work,
int lwork,
double* rwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDgesvd(
handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo);
}
template <typename T>
inline cusolverStatus_t CUSOLVERAPI cusolverDngesvdj_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
int econ,
int m,
int n,
const T* A,
int lda,
const T* S,
const T* U,
int ldu,
const T* V,
int ldv,
int* lwork,
gesvdjInfo_t params);
template <>
inline cusolverStatus_t CUSOLVERAPI cusolverDngesvdj_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
int econ,
int m,
int n,
const float* A,
int lda,
const float* S,
const float* U,
int ldu,
const float* V,
int ldv,
int* lwork,
gesvdjInfo_t params)
{
return cusolverDnSgesvdj_bufferSize(
handle, jobz, econ, m, n, A, lda, S, U, ldu, V, ldv, lwork, params);
}
template <>
inline cusolverStatus_t CUSOLVERAPI cusolverDngesvdj_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
int econ,
int m,
int n,
const double* A,
int lda,
const double* S,
const double* U,
int ldu,
const double* V,
int ldv,
int* lwork,
gesvdjInfo_t params)
{
return cusolverDnDgesvdj_bufferSize(
handle, jobz, econ, m, n, A, lda, S, U, ldu, V, ldv, lwork, params);
}
template <typename T>
inline cusolverStatus_t CUSOLVERAPI cusolverDngesvdj( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
int econ,
int m,
int n,
T* A,
int lda,
T* S,
T* U,
int ldu,
T* V,
int ldv,
T* work,
int lwork,
int* info,
gesvdjInfo_t params,
cudaStream_t stream);
template <>
inline cusolverStatus_t CUSOLVERAPI cusolverDngesvdj( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
int econ,
int m,
int n,
float* A,
int lda,
float* S,
float* U,
int ldu,
float* V,
int ldv,
float* work,
int lwork,
int* info,
gesvdjInfo_t params,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSgesvdj(
handle, jobz, econ, m, n, A, lda, S, U, ldu, V, ldv, work, lwork, info, params);
}
template <>
inline cusolverStatus_t CUSOLVERAPI cusolverDngesvdj( // NOLINT
cusolverDnHandle_t handle,
cusolverEigMode_t jobz,
int econ,
int m,
int n,
double* A,
int lda,
double* S,
double* U,
int ldu,
double* V,
int ldv,
double* work,
int lwork,
int* info,
gesvdjInfo_t params,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDgesvdj(
handle, jobz, econ, m, n, A, lda, S, U, ldu, V, ldv, work, lwork, info, params);
}
#if CUDART_VERSION >= 11010
template <typename T>
cusolverStatus_t cusolverDnxgesvdr_bufferSize( // NOLINT
cusolverDnHandle_t handle,
signed char jobu,
signed char jobv,
int64_t m,
int64_t n,
int64_t k,
int64_t p,
int64_t niters,
const T* a,
int64_t lda,
const T* Srand,
const T* Urand,
int64_t ldUrand,
const T* Vrand,
int64_t ldVrand,
size_t* workspaceInBytesOnDevice,
size_t* workspaceInBytesOnHost,
cudaStream_t stream)
{
RAFT_EXPECTS(std::is_floating_point_v<T>, "Unsupported data type");
cudaDataType dataType = std::is_same_v<T, float> ? CUDA_R_32F : CUDA_R_64F;
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
cusolverDnParams_t dn_params = nullptr;
RAFT_CUSOLVER_TRY(cusolverDnCreateParams(&dn_params));
auto result = cusolverDnXgesvdr_bufferSize(handle,
dn_params,
jobu,
jobv,
m,
n,
k,
p,
niters,
dataType,
a,
lda,
dataType,
Srand,
dataType,
Urand,
ldUrand,
dataType,
Vrand,
ldVrand,
dataType,
workspaceInBytesOnDevice,
workspaceInBytesOnHost);
RAFT_CUSOLVER_TRY(cusolverDnDestroyParams(dn_params));
return result;
}
template <typename T>
cusolverStatus_t cusolverDnxgesvdr( // NOLINT
cusolverDnHandle_t handle,
signed char jobu,
signed char jobv,
int64_t m,
int64_t n,
int64_t k,
int64_t p,
int64_t niters,
T* a,
int64_t lda,
T* Srand,
T* Urand,
int64_t ldUrand,
T* Vrand,
int64_t ldVrand,
void* bufferOnDevice,
size_t workspaceInBytesOnDevice,
void* bufferOnHost,
size_t workspaceInBytesOnHost,
int* d_info,
cudaStream_t stream)
{
cudaDataType dataType = std::is_same_v<T, float> ? CUDA_R_32F : CUDA_R_64F;
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
cusolverDnParams_t dn_params = nullptr;
RAFT_CUSOLVER_TRY(cusolverDnCreateParams(&dn_params));
auto result = cusolverDnXgesvdr(handle,
dn_params,
jobu,
jobv,
m,
n,
k,
p,
niters,
dataType,
a,
lda,
dataType,
Srand,
dataType,
Urand,
ldUrand,
dataType,
Vrand,
ldVrand,
dataType,
bufferOnDevice,
workspaceInBytesOnDevice,
bufferOnHost,
workspaceInBytesOnHost,
d_info);
RAFT_CUSOLVER_TRY(cusolverDnDestroyParams(dn_params));
return result;
}
#endif // CUDART_VERSION >= 11010
/** @} */
/**
* @defgroup potrf cusolver potrf operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDnpotrf_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cublasFillMode_t uplo,
int n,
T* A,
int lda,
int* Lwork);
template <>
inline cusolverStatus_t cusolverDnpotrf_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cublasFillMode_t uplo,
int n,
float* A,
int lda,
int* Lwork)
{
return cusolverDnSpotrf_bufferSize(handle, uplo, n, A, lda, Lwork);
}
template <>
inline cusolverStatus_t cusolverDnpotrf_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cublasFillMode_t uplo,
int n,
double* A,
int lda,
int* Lwork)
{
return cusolverDnDpotrf_bufferSize(handle, uplo, n, A, lda, Lwork);
}
template <typename T>
inline cusolverStatus_t cusolverDnpotrf(cusolverDnHandle_t handle, // NOLINT
cublasFillMode_t uplo,
int n,
T* A,
int lda,
T* Workspace,
int Lwork,
int* devInfo,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDnpotrf(cusolverDnHandle_t handle, // NOLINT
cublasFillMode_t uplo,
int n,
float* A,
int lda,
float* Workspace,
int Lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSpotrf(handle, uplo, n, A, lda, Workspace, Lwork, devInfo);
}
template <>
inline cusolverStatus_t cusolverDnpotrf(cusolverDnHandle_t handle, // NOLINT
cublasFillMode_t uplo,
int n,
double* A,
int lda,
double* Workspace,
int Lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDpotrf(handle, uplo, n, A, lda, Workspace, Lwork, devInfo);
}
/** @} */
/**
* @defgroup potrs cusolver potrs operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDnpotrs(cusolverDnHandle_t handle, // NOLINT
cublasFillMode_t uplo,
int n,
int nrhs,
const T* A,
int lda,
T* B,
int ldb,
int* devInfo,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDnpotrs(cusolverDnHandle_t handle, // NOLINT
cublasFillMode_t uplo,
int n,
int nrhs,
const float* A,
int lda,
float* B,
int ldb,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSpotrs(handle, uplo, n, nrhs, A, lda, B, ldb, devInfo);
}
template <>
inline cusolverStatus_t cusolverDnpotrs(cusolverDnHandle_t handle, // NOLINT
cublasFillMode_t uplo,
int n,
int nrhs,
const double* A,
int lda,
double* B,
int ldb,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDpotrs(handle, uplo, n, nrhs, A, lda, B, ldb, devInfo);
}
/** @} */
/**
* @defgroup geqrf cusolver geqrf operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDngeqrf(cusolverDnHandle_t handle,
int m, // NOLINT
int n,
T* A,
int lda,
T* TAU,
T* Workspace,
int Lwork,
int* devInfo,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDngeqrf(cusolverDnHandle_t handle, // NOLINT
int m,
int n,
float* A,
int lda,
float* TAU,
float* Workspace,
int Lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSgeqrf(handle, m, n, A, lda, TAU, Workspace, Lwork, devInfo);
}
template <>
inline cusolverStatus_t cusolverDngeqrf(cusolverDnHandle_t handle, // NOLINT
int m,
int n,
double* A,
int lda,
double* TAU,
double* Workspace,
int Lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDgeqrf(handle, m, n, A, lda, TAU, Workspace, Lwork, devInfo);
}
template <typename T>
cusolverStatus_t cusolverDngeqrf_bufferSize( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
T* A,
int lda,
int* Lwork);
template <>
inline cusolverStatus_t cusolverDngeqrf_bufferSize( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
float* A,
int lda,
int* Lwork)
{
return cusolverDnSgeqrf_bufferSize(handle, m, n, A, lda, Lwork);
}
template <>
inline cusolverStatus_t cusolverDngeqrf_bufferSize( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
double* A,
int lda,
int* Lwork)
{
return cusolverDnDgeqrf_bufferSize(handle, m, n, A, lda, Lwork);
}
/** @} */
/**
* @defgroup orgqr cusolver orgqr operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDnorgqr( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
int k,
T* A,
int lda,
const T* tau,
T* work,
int lwork,
int* devInfo,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDnorgqr( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
int k,
float* A,
int lda,
const float* tau,
float* work,
int lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSorgqr(handle, m, n, k, A, lda, tau, work, lwork, devInfo);
}
template <>
inline cusolverStatus_t cusolverDnorgqr( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
int k,
double* A,
int lda,
const double* tau,
double* work,
int lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDorgqr(handle, m, n, k, A, lda, tau, work, lwork, devInfo);
}
template <typename T>
cusolverStatus_t cusolverDnorgqr_bufferSize( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
int k,
const T* A,
int lda,
const T* TAU,
int* lwork);
template <>
inline cusolverStatus_t cusolverDnorgqr_bufferSize( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
int k,
const float* A,
int lda,
const float* TAU,
int* lwork)
{
return cusolverDnSorgqr_bufferSize(handle, m, n, k, A, lda, TAU, lwork);
}
template <>
inline cusolverStatus_t cusolverDnorgqr_bufferSize( // NOLINT
cusolverDnHandle_t handle,
int m,
int n,
int k,
const double* A,
int lda,
const double* TAU,
int* lwork)
{
return cusolverDnDorgqr_bufferSize(handle, m, n, k, A, lda, TAU, lwork);
}
/** @} */
/**
* @defgroup ormqr cusolver ormqr operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDnormqr(cusolverDnHandle_t handle, // NOLINT
cublasSideMode_t side,
cublasOperation_t trans,
int m,
int n,
int k,
const T* A,
int lda,
const T* tau,
T* C,
int ldc,
T* work,
int lwork,
int* devInfo,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDnormqr( // NOLINT
cusolverDnHandle_t handle,
cublasSideMode_t side,
cublasOperation_t trans,
int m,
int n,
int k,
const float* A,
int lda,
const float* tau,
float* C,
int ldc,
float* work,
int lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnSormqr(handle, side, trans, m, n, k, A, lda, tau, C, ldc, work, lwork, devInfo);
}
template <>
inline cusolverStatus_t cusolverDnormqr( // NOLINT
cusolverDnHandle_t handle,
cublasSideMode_t side,
cublasOperation_t trans,
int m,
int n,
int k,
const double* A,
int lda,
const double* tau,
double* C,
int ldc,
double* work,
int lwork,
int* devInfo,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnDormqr(handle, side, trans, m, n, k, A, lda, tau, C, ldc, work, lwork, devInfo);
}
template <typename T>
cusolverStatus_t cusolverDnormqr_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cublasSideMode_t side,
cublasOperation_t trans,
int m,
int n,
int k,
const T* A,
int lda,
const T* tau,
const T* C,
int ldc,
int* lwork);
template <>
inline cusolverStatus_t cusolverDnormqr_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cublasSideMode_t side,
cublasOperation_t trans,
int m,
int n,
int k,
const float* A,
int lda,
const float* tau,
const float* C,
int ldc,
int* lwork)
{
return cusolverDnSormqr_bufferSize(handle, side, trans, m, n, k, A, lda, tau, C, ldc, lwork);
}
template <>
inline cusolverStatus_t cusolverDnormqr_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cublasSideMode_t side,
cublasOperation_t trans,
int m,
int n,
int k,
const double* A,
int lda,
const double* tau,
const double* C,
int ldc,
int* lwork)
{
return cusolverDnDormqr_bufferSize(handle, side, trans, m, n, k, A, lda, tau, C, ldc, lwork);
}
/** @} */
/**
* @defgroup csrqrBatched cusolver batched
* @{
*/
template <typename T>
cusolverStatus_t cusolverSpcsrqrBufferInfoBatched( // NOLINT
cusolverSpHandle_t handle,
int m,
int n,
int nnzA,
const cusparseMatDescr_t descrA,
const T* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
int batchSize,
csrqrInfo_t info,
size_t* internalDataInBytes,
size_t* workspaceInBytes);
template <>
inline cusolverStatus_t cusolverSpcsrqrBufferInfoBatched( // NOLINT
cusolverSpHandle_t handle,
int m,
int n,
int nnzA,
const cusparseMatDescr_t descrA,
const float* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
int batchSize,
csrqrInfo_t info,
size_t* internalDataInBytes,
size_t* workspaceInBytes)
{
return cusolverSpScsrqrBufferInfoBatched(handle,
m,
n,
nnzA,
descrA,
csrValA,
csrRowPtrA,
csrColIndA,
batchSize,
info,
internalDataInBytes,
workspaceInBytes);
}
template <>
inline cusolverStatus_t cusolverSpcsrqrBufferInfoBatched( // NOLINT
cusolverSpHandle_t handle,
int m,
int n,
int nnzA,
const cusparseMatDescr_t descrA,
const double* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
int batchSize,
csrqrInfo_t info,
size_t* internalDataInBytes,
size_t* workspaceInBytes)
{
return cusolverSpDcsrqrBufferInfoBatched(handle,
m,
n,
nnzA,
descrA,
csrValA,
csrRowPtrA,
csrColIndA,
batchSize,
info,
internalDataInBytes,
workspaceInBytes);
}
template <typename T>
cusolverStatus_t cusolverSpcsrqrsvBatched( // NOLINT
cusolverSpHandle_t handle,
int m,
int n,
int nnzA,
const cusparseMatDescr_t descrA,
const T* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
const T* b,
T* x,
int batchSize,
csrqrInfo_t info,
void* pBuffer,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverSpcsrqrsvBatched( // NOLINT
cusolverSpHandle_t handle,
int m,
int n,
int nnzA,
const cusparseMatDescr_t descrA,
const float* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
const float* b,
float* x,
int batchSize,
csrqrInfo_t info,
void* pBuffer,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverSpSetStream(handle, stream));
return cusolverSpScsrqrsvBatched(
handle, m, n, nnzA, descrA, csrValA, csrRowPtrA, csrColIndA, b, x, batchSize, info, pBuffer);
}
template <>
inline cusolverStatus_t cusolverSpcsrqrsvBatched( // NOLINT
cusolverSpHandle_t handle,
int m,
int n,
int nnzA,
const cusparseMatDescr_t descrA,
const double* csrValA,
const int* csrRowPtrA,
const int* csrColIndA,
const double* b,
double* x,
int batchSize,
csrqrInfo_t info,
void* pBuffer,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverSpSetStream(handle, stream));
return cusolverSpDcsrqrsvBatched(
handle, m, n, nnzA, descrA, csrValA, csrRowPtrA, csrColIndA, b, x, batchSize, info, pBuffer);
}
/** @} */
#if CUDART_VERSION >= 11010
/**
* @defgroup DnXsyevd cusolver DnXsyevd operations
* @{
*/
template <typename T>
cusolverStatus_t cusolverDnxsyevd_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverDnParams_t params,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int64_t n,
const T* A,
int64_t lda,
const T* W,
size_t* workspaceInBytesOnDevice,
size_t* workspaceInBytesOnHost,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDnxsyevd_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverDnParams_t params,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int64_t n,
const float* A,
int64_t lda,
const float* W,
size_t* workspaceInBytesOnDevice,
size_t* workspaceInBytesOnHost,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnXsyevd_bufferSize(handle,
params,
jobz,
uplo,
n,
CUDA_R_32F,
A,
lda,
CUDA_R_32F,
W,
CUDA_R_32F,
workspaceInBytesOnDevice,
workspaceInBytesOnHost);
}
template <>
inline cusolverStatus_t cusolverDnxsyevd_bufferSize( // NOLINT
cusolverDnHandle_t handle,
cusolverDnParams_t params,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int64_t n,
const double* A,
int64_t lda,
const double* W,
size_t* workspaceInBytesOnDevice,
size_t* workspaceInBytesOnHost,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnXsyevd_bufferSize(handle,
params,
jobz,
uplo,
n,
CUDA_R_64F,
A,
lda,
CUDA_R_64F,
W,
CUDA_R_64F,
workspaceInBytesOnDevice,
workspaceInBytesOnHost);
}
template <typename T>
cusolverStatus_t cusolverDnxsyevd( // NOLINT
cusolverDnHandle_t handle,
cusolverDnParams_t params,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int64_t n,
T* A,
int64_t lda,
T* W,
T* bufferOnDevice,
size_t workspaceInBytesOnDevice,
T* bufferOnHost,
size_t workspaceInBytesOnHost,
int* info,
cudaStream_t stream);
template <>
inline cusolverStatus_t cusolverDnxsyevd( // NOLINT
cusolverDnHandle_t handle,
cusolverDnParams_t params,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int64_t n,
float* A,
int64_t lda,
float* W,
float* bufferOnDevice,
size_t workspaceInBytesOnDevice,
float* bufferOnHost,
size_t workspaceInBytesOnHost,
int* info,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnXsyevd(handle,
params,
jobz,
uplo,
n,
CUDA_R_32F,
A,
lda,
CUDA_R_32F,
W,
CUDA_R_32F,
bufferOnDevice,
workspaceInBytesOnDevice,
bufferOnHost,
workspaceInBytesOnHost,
info);
}
template <>
inline cusolverStatus_t cusolverDnxsyevd( // NOLINT
cusolverDnHandle_t handle,
cusolverDnParams_t params,
cusolverEigMode_t jobz,
cublasFillMode_t uplo,
int64_t n,
double* A,
int64_t lda,
double* W,
double* bufferOnDevice,
size_t workspaceInBytesOnDevice,
double* bufferOnHost,
size_t workspaceInBytesOnHost,
int* info,
cudaStream_t stream)
{
RAFT_CUSOLVER_TRY(cusolverDnSetStream(handle, stream));
return cusolverDnXsyevd(handle,
params,
jobz,
uplo,
n,
CUDA_R_64F,
A,
lda,
CUDA_R_64F,
W,
CUDA_R_64F,
bufferOnDevice,
workspaceInBytesOnDevice,
bufferOnHost,
workspaceInBytesOnHost,
info);
}
/** @} */
#endif
} // namespace detail
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/multiply.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp>
#include <raft/linalg/unary_op.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename math_t, typename IdxType = int>
void multiplyScalar(
math_t* out, const math_t* in, const math_t scalar, IdxType len, cudaStream_t stream)
{
raft::linalg::unaryOp(out, in, len, raft::mul_const_op<math_t>{scalar}, stream);
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/cublas_wrappers.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cublas_v2.h>
#include <raft/core/cublas_macros.hpp>
#include <raft/core/error.hpp>
#include <cstdint>
#include <cublas_v2.h>
namespace raft {
namespace linalg {
namespace detail {
/**
* Assuming the default CUBLAS_POINTER_MODE_HOST, change it to host or device mode
* temporary for the lifetime of this object.
*/
template <bool DevicePointerMode = false>
class cublas_device_pointer_mode {
public:
explicit cublas_device_pointer_mode(cublasHandle_t handle) : handle_(handle)
{
if constexpr (DevicePointerMode) {
RAFT_CUBLAS_TRY(cublasSetPointerMode(handle_, CUBLAS_POINTER_MODE_DEVICE));
}
}
auto operator=(const cublas_device_pointer_mode&) -> cublas_device_pointer_mode& = delete;
auto operator=(cublas_device_pointer_mode&&) -> cublas_device_pointer_mode& = delete;
static auto operator new(std::size_t) -> void* = delete;
static auto operator new[](std::size_t) -> void* = delete;
~cublas_device_pointer_mode()
{
if constexpr (DevicePointerMode) {
RAFT_CUBLAS_TRY_NO_THROW(cublasSetPointerMode(handle_, CUBLAS_POINTER_MODE_HOST));
}
}
private:
cublasHandle_t handle_ = nullptr;
};
/**
* @defgroup Axpy cublas ax+y operations
* @{
*/
template <typename T>
cublasStatus_t cublasaxpy(cublasHandle_t handle,
int n,
const T* alpha,
const T* x,
int incx,
T* y,
int incy,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasaxpy(cublasHandle_t handle,
int n,
const float* alpha,
const float* x,
int incx,
float* y,
int incy,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSaxpy(handle, n, alpha, x, incx, y, incy);
}
template <>
inline cublasStatus_t cublasaxpy(cublasHandle_t handle,
int n,
const double* alpha,
const double* x,
int incx,
double* y,
int incy,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDaxpy(handle, n, alpha, x, incx, y, incy);
}
/** @} */
/**
* @defgroup cublas swap operations
* @{
*/
template <typename T>
cublasStatus_t cublasSwap(
cublasHandle_t handle, int n, T* x, int incx, T* y, int incy, cudaStream_t stream);
template <>
inline cublasStatus_t cublasSwap(
cublasHandle_t handle, int n, float* x, int incx, float* y, int incy, cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSswap(handle, n, x, incx, y, incy);
}
template <>
inline cublasStatus_t cublasSwap(
cublasHandle_t handle, int n, double* x, int incx, double* y, int incy, cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDswap(handle, n, x, incx, y, incy);
}
/** @} */
/**
* @defgroup cublas copy operations
* @{
*/
template <typename T>
cublasStatus_t cublasCopy(
cublasHandle_t handle, int n, const T* x, int incx, T* y, int incy, cudaStream_t stream);
template <>
inline cublasStatus_t cublasCopy(
cublasHandle_t handle, int n, const float* x, int incx, float* y, int incy, cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasScopy(handle, n, x, incx, y, incy);
}
template <>
inline cublasStatus_t cublasCopy(
cublasHandle_t handle, int n, const double* x, int incx, double* y, int incy, cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDcopy(handle, n, x, incx, y, incy);
}
/** @} */
/**
* @defgroup gemv cublas gemv calls
* @{
*/
template <typename T>
cublasStatus_t cublasgemv(cublasHandle_t handle,
cublasOperation_t transA,
int m,
int n,
const T* alfa,
const T* A,
int lda,
const T* x,
int incx,
const T* beta,
T* y,
int incy,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasgemv(cublasHandle_t handle,
cublasOperation_t transA,
int m,
int n,
const float* alfa,
const float* A,
int lda,
const float* x,
int incx,
const float* beta,
float* y,
int incy,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSgemv(handle, transA, m, n, alfa, A, lda, x, incx, beta, y, incy);
}
template <>
inline cublasStatus_t cublasgemv(cublasHandle_t handle,
cublasOperation_t transA,
int m,
int n,
const double* alfa,
const double* A,
int lda,
const double* x,
int incx,
const double* beta,
double* y,
int incy,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDgemv(handle, transA, m, n, alfa, A, lda, x, incx, beta, y, incy);
}
/** @} */
/**
* @defgroup ger cublas a(x*y.T) + A calls
* @{
*/
template <typename T>
cublasStatus_t cublasger(cublasHandle_t handle,
int m,
int n,
const T* alpha,
const T* x,
int incx,
const T* y,
int incy,
T* A,
int lda,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasger(cublasHandle_t handle,
int m,
int n,
const float* alpha,
const float* x,
int incx,
const float* y,
int incy,
float* A,
int lda,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSger(handle, m, n, alpha, x, incx, y, incy, A, lda);
}
template <>
inline cublasStatus_t cublasger(cublasHandle_t handle,
int m,
int n,
const double* alpha,
const double* x,
int incx,
const double* y,
int incy,
double* A,
int lda,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDger(handle, m, n, alpha, x, incx, y, incy, A, lda);
}
/** @} */
/**
* @defgroup gemm cublas gemm calls
* @{
*/
template <typename T>
cublasStatus_t cublasgemm(cublasHandle_t handle,
cublasOperation_t transA,
cublasOperation_t transB,
int m,
int n,
int k,
const T* alfa,
const T* A,
int lda,
const T* B,
int ldb,
const T* beta,
T* C,
int ldc,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasgemm(cublasHandle_t handle,
cublasOperation_t transA,
cublasOperation_t transB,
int m,
int n,
int k,
const float* alfa,
const float* A,
int lda,
const float* B,
int ldb,
const float* beta,
float* C,
int ldc,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSgemm(handle, transA, transB, m, n, k, alfa, A, lda, B, ldb, beta, C, ldc);
}
template <>
inline cublasStatus_t cublasgemm(cublasHandle_t handle,
cublasOperation_t transA,
cublasOperation_t transB,
int m,
int n,
int k,
const double* alfa,
const double* A,
int lda,
const double* B,
int ldb,
const double* beta,
double* C,
int ldc,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDgemm(handle, transA, transB, m, n, k, alfa, A, lda, B, ldb, beta, C, ldc);
}
/** @} */
/**
* @defgroup gemmbatched cublas gemmbatched calls
* @{
*/
template <typename T>
cublasStatus_t cublasgemmBatched(cublasHandle_t handle, // NOLINT
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const T* alpha,
const T* const Aarray[], // NOLINT
int lda,
const T* const Barray[], // NOLINT
int ldb,
const T* beta,
T* Carray[], // NOLINT
int ldc,
int batchCount,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasgemmBatched( // NOLINT
cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const float* alpha,
const float* const Aarray[], // NOLINT
int lda,
const float* const Barray[], // NOLINT
int ldb,
const float* beta,
float* Carray[], // NOLINT
int ldc,
int batchCount,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSgemmBatched(handle,
transa,
transb,
m,
n,
k,
alpha,
Aarray,
lda,
Barray,
ldb,
beta,
Carray,
ldc,
batchCount);
}
template <>
inline cublasStatus_t cublasgemmBatched( // NOLINT
cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const double* alpha,
const double* const Aarray[], // NOLINT
int lda,
const double* const Barray[], // NOLINT
int ldb,
const double* beta,
double* Carray[], // NOLINT
int ldc,
int batchCount,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDgemmBatched(handle,
transa,
transb,
m,
n,
k,
alpha,
Aarray,
lda,
Barray,
ldb,
beta,
Carray,
ldc,
batchCount);
}
/** @} */
/**
* @defgroup gemmbatched cublas gemmbatched calls
* @{
*/
template <typename T>
cublasStatus_t cublasgemmStridedBatched( // NOLINT
cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const T* alpha,
const T* const Aarray,
int lda,
int64_t strideA,
const T* const Barray,
int ldb,
int64_t strideB,
const T* beta,
T* Carray,
int ldc,
int64_t strideC,
int batchCount,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasgemmStridedBatched( // NOLINT
cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const float* alpha,
const float* const Aarray,
int lda,
int64_t strideA,
const float* const Barray,
int ldb,
int64_t strideB,
const float* beta,
float* Carray,
int ldc,
int64_t strideC,
int batchCount,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSgemmStridedBatched(handle,
transa,
transb,
m,
n,
k,
alpha,
Aarray,
lda,
strideA,
Barray,
ldb,
strideB,
beta,
Carray,
ldc,
strideC,
batchCount);
}
template <>
inline cublasStatus_t cublasgemmStridedBatched( // NOLINT
cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
const double* alpha,
const double* const Aarray,
int lda,
int64_t strideA,
const double* const Barray,
int ldb,
int64_t strideB,
const double* beta,
double* Carray,
int ldc,
int64_t strideC,
int batchCount,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDgemmStridedBatched(handle,
transa,
transb,
m,
n,
k,
alpha,
Aarray,
lda,
strideA,
Barray,
ldb,
strideB,
beta,
Carray,
ldc,
strideC,
batchCount);
}
/** @} */
/**
* @defgroup solverbatched cublas getrf/gettribatched calls
* @{
*/
template <typename T>
cublasStatus_t cublasgetrfBatched(cublasHandle_t handle,
int n, // NOLINT
T* const A[], // NOLINT
int lda,
int* P,
int* info,
int batchSize,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasgetrfBatched(cublasHandle_t handle, // NOLINT
int n,
float* const A[], // NOLINT
int lda,
int* P,
int* info,
int batchSize,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSgetrfBatched(handle, n, A, lda, P, info, batchSize);
}
template <>
inline cublasStatus_t cublasgetrfBatched(cublasHandle_t handle, // NOLINT
int n,
double* const A[], // NOLINT
int lda,
int* P,
int* info,
int batchSize,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDgetrfBatched(handle, n, A, lda, P, info, batchSize);
}
template <typename T>
cublasStatus_t cublasgetriBatched(cublasHandle_t handle,
int n, // NOLINT
const T* const A[], // NOLINT
int lda,
const int* P,
T* const C[], // NOLINT
int ldc,
int* info,
int batchSize,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasgetriBatched( // NOLINT
cublasHandle_t handle,
int n,
const float* const A[], // NOLINT
int lda,
const int* P,
float* const C[], // NOLINT
int ldc,
int* info,
int batchSize,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSgetriBatched(handle, n, A, lda, P, C, ldc, info, batchSize);
}
template <>
inline cublasStatus_t cublasgetriBatched( // NOLINT
cublasHandle_t handle,
int n,
const double* const A[], // NOLINT
int lda,
const int* P,
double* const C[], // NOLINT
int ldc,
int* info,
int batchSize,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDgetriBatched(handle, n, A, lda, P, C, ldc, info, batchSize);
}
/** @} */
/**
* @defgroup gelsbatched cublas gelsbatched calls
* @{
*/
template <typename T>
inline cublasStatus_t cublasgelsBatched(cublasHandle_t handle, // NOLINT
cublasOperation_t trans,
int m,
int n,
int nrhs,
T* Aarray[], // NOLINT
int lda,
T* Carray[], // NOLINT
int ldc,
int* info,
int* devInfoArray,
int batchSize,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasgelsBatched(cublasHandle_t handle, // NOLINT
cublasOperation_t trans,
int m,
int n,
int nrhs,
float* Aarray[], // NOLINT
int lda,
float* Carray[], // NOLINT
int ldc,
int* info,
int* devInfoArray,
int batchSize,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSgelsBatched(
handle, trans, m, n, nrhs, Aarray, lda, Carray, ldc, info, devInfoArray, batchSize);
}
template <>
inline cublasStatus_t cublasgelsBatched(cublasHandle_t handle, // NOLINT
cublasOperation_t trans,
int m,
int n,
int nrhs,
double* Aarray[], // NOLINT
int lda,
double* Carray[], // NOLINT
int ldc,
int* info,
int* devInfoArray,
int batchSize,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDgelsBatched(
handle, trans, m, n, nrhs, Aarray, lda, Carray, ldc, info, devInfoArray, batchSize);
}
/** @} */
/**
* @defgroup geam cublas geam calls
* @{
*/
template <typename T>
cublasStatus_t cublasgeam(cublasHandle_t handle,
cublasOperation_t transA,
cublasOperation_t transB,
int m,
int n,
const T* alfa,
const T* A,
int lda,
const T* beta,
const T* B,
int ldb,
T* C,
int ldc,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasgeam(cublasHandle_t handle,
cublasOperation_t transA,
cublasOperation_t transB,
int m,
int n,
const float* alfa,
const float* A,
int lda,
const float* beta,
const float* B,
int ldb,
float* C,
int ldc,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSgeam(handle, transA, transB, m, n, alfa, A, lda, beta, B, ldb, C, ldc);
}
template <>
inline cublasStatus_t cublasgeam(cublasHandle_t handle,
cublasOperation_t transA,
cublasOperation_t transB,
int m,
int n,
const double* alfa,
const double* A,
int lda,
const double* beta,
const double* B,
int ldb,
double* C,
int ldc,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDgeam(handle, transA, transB, m, n, alfa, A, lda, beta, B, ldb, C, ldc);
}
/** @} */
/**
* @defgroup symm cublas symm calls
* @{
*/
template <typename T>
cublasStatus_t cublassymm(cublasHandle_t handle,
cublasSideMode_t side,
cublasFillMode_t uplo,
int m,
int n,
const T* alpha,
const T* A,
int lda,
const T* B,
int ldb,
const T* beta,
T* C,
int ldc,
cudaStream_t stream);
template <>
inline cublasStatus_t cublassymm(cublasHandle_t handle,
cublasSideMode_t side,
cublasFillMode_t uplo,
int m,
int n,
const float* alpha,
const float* A,
int lda,
const float* B,
int ldb,
const float* beta,
float* C,
int ldc,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc);
}
template <>
inline cublasStatus_t cublassymm(cublasHandle_t handle,
cublasSideMode_t side,
cublasFillMode_t uplo,
int m,
int n,
const double* alpha,
const double* A,
int lda,
const double* B,
int ldb,
const double* beta,
double* C,
int ldc,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc);
}
/** @} */
/**
* @defgroup syrk cublas syrk calls
* @{
*/
template <typename T>
cublasStatus_t cublassyrk(cublasHandle_t handle,
cublasFillMode_t uplo,
cublasOperation_t trans,
int n,
int k,
const T* alpha,
const T* A,
int lda,
const T* beta,
T* C,
int ldc,
cudaStream_t stream);
template <>
inline cublasStatus_t cublassyrk(cublasHandle_t handle,
cublasFillMode_t uplo,
cublasOperation_t trans,
int n,
int k,
const float* alpha,
const float* A,
int lda,
const float* beta,
float* C,
int ldc,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSsyrk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc);
}
template <>
inline cublasStatus_t cublassyrk(cublasHandle_t handle,
cublasFillMode_t uplo,
cublasOperation_t trans,
int n,
int k,
const double* alpha,
const double* A,
int lda,
const double* beta,
double* C,
int ldc,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDsyrk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc);
}
/** @} */
/**
* @defgroup nrm2 cublas nrm2 calls
* @{
*/
template <typename T>
cublasStatus_t cublasnrm2(
cublasHandle_t handle, int n, const T* x, int incx, T* result, cudaStream_t stream);
template <>
inline cublasStatus_t cublasnrm2(
cublasHandle_t handle, int n, const float* x, int incx, float* result, cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSnrm2(handle, n, x, incx, result);
}
template <>
inline cublasStatus_t cublasnrm2(
cublasHandle_t handle, int n, const double* x, int incx, double* result, cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDnrm2(handle, n, x, incx, result);
}
/** @} */
template <typename T>
cublasStatus_t cublastrsm(cublasHandle_t handle,
cublasSideMode_t side,
cublasFillMode_t uplo,
cublasOperation_t trans,
cublasDiagType_t diag,
int m,
int n,
const T* alpha,
const T* A,
int lda,
T* B,
int ldb,
cudaStream_t stream);
template <>
inline cublasStatus_t cublastrsm(cublasHandle_t handle,
cublasSideMode_t side,
cublasFillMode_t uplo,
cublasOperation_t trans,
cublasDiagType_t diag,
int m,
int n,
const float* alpha,
const float* A,
int lda,
float* B,
int ldb,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasStrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
}
template <>
inline cublasStatus_t cublastrsm(cublasHandle_t handle,
cublasSideMode_t side,
cublasFillMode_t uplo,
cublasOperation_t trans,
cublasDiagType_t diag,
int m,
int n,
const double* alpha,
const double* A,
int lda,
double* B,
int ldb,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDtrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
}
/**
* @defgroup dot cublas dot calls
* @{
*/
template <typename T>
cublasStatus_t cublasdot(cublasHandle_t handle,
int n,
const T* x,
int incx,
const T* y,
int incy,
T* result,
cudaStream_t stream);
template <>
inline cublasStatus_t cublasdot(cublasHandle_t handle,
int n,
const float* x,
int incx,
const float* y,
int incy,
float* result,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDotEx(
handle, n, x, CUDA_R_32F, incx, y, CUDA_R_32F, incy, result, CUDA_R_32F, CUDA_R_32F);
}
template <>
inline cublasStatus_t cublasdot(cublasHandle_t handle,
int n,
const double* x,
int incx,
const double* y,
int incy,
double* result,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDotEx(
handle, n, x, CUDA_R_64F, incx, y, CUDA_R_64F, incy, result, CUDA_R_64F, CUDA_R_64F);
}
/** @} */
/**
* @defgroup setpointermode cublas set pointer mode method
* @{
*/
// no T dependency...
// template <typename T>
// cublasStatus_t cublassetpointermode( // NOLINT
// cublasHandle_t handle,
// cublasPointerMode_t mode,
// cudaStream_t stream);
// template<>
inline cublasStatus_t cublassetpointermode(cublasHandle_t handle,
cublasPointerMode_t mode,
cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSetPointerMode(handle, mode);
}
/** @} */
/**
* @defgroup scal cublas dot calls
* @{
*/
template <typename T>
cublasStatus_t cublasscal(
cublasHandle_t handle, int n, const T* alpha, T* x, int incx, cudaStream_t stream);
template <>
inline cublasStatus_t cublasscal(
cublasHandle_t handle, int n, const float* alpha, float* x, int incx, cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasSscal(handle, n, alpha, x, incx);
}
template <>
inline cublasStatus_t cublasscal(
cublasHandle_t handle, int n, const double* alpha, double* x, int incx, cudaStream_t stream)
{
RAFT_CUBLAS_TRY(cublasSetStream(handle, stream));
return cublasDscal(handle, n, alpha, x, incx);
}
/** @} */
} // namespace detail
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/gemv.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cublas_v2.h>
#include <raft/core/resource/cublas_handle.hpp>
#include "cublas_wrappers.hpp"
#include <raft/core/resources.hpp>
namespace raft {
namespace linalg {
namespace detail {
template <typename math_t, bool DevicePointerMode = false>
void gemv(raft::resources const& handle,
const bool trans_a,
const int m,
const int n,
const math_t* alpha,
const math_t* A,
const int lda,
const math_t* x,
const int incx,
const math_t* beta,
math_t* y,
const int incy,
cudaStream_t stream)
{
cublasHandle_t cublas_h = resource::get_cublas_handle(handle);
detail::cublas_device_pointer_mode<DevicePointerMode> pmode(cublas_h);
RAFT_CUBLAS_TRY(detail::cublasgemv(cublas_h,
trans_a ? CUBLAS_OP_T : CUBLAS_OP_N,
m,
n,
alpha,
A,
lda,
x,
incx,
beta,
y,
incy,
stream));
}
template <typename math_t>
void gemv(raft::resources const& handle,
const math_t* A,
const int n_rows,
const int n_cols,
const math_t* x,
const int incx,
math_t* y,
const int incy,
const bool trans_a,
const math_t alpha,
const math_t beta,
cudaStream_t stream)
{
gemv(handle, trans_a, n_rows, n_cols, &alpha, A, n_rows, x, incx, &beta, y, incy, stream);
}
template <typename math_t>
void gemv(raft::resources const& handle,
const math_t* A,
const int n_rows_a,
const int n_cols_a,
const math_t* x,
math_t* y,
const bool trans_a,
const math_t alpha,
const math_t beta,
cudaStream_t stream)
{
gemv(handle, A, n_rows_a, n_cols_a, x, 1, y, 1, trans_a, alpha, beta, stream);
}
template <typename math_t>
void gemv(raft::resources const& handle,
const math_t* A,
const int n_rows_a,
const int n_cols_a,
const math_t* x,
math_t* y,
const bool trans_a,
cudaStream_t stream)
{
math_t alpha = math_t(1);
math_t beta = math_t(0);
gemv(handle, A, n_rows_a, n_cols_a, x, 1, y, 1, trans_a, alpha, beta, stream);
}
template <typename math_t>
void gemv(raft::resources const& handle,
const math_t* A,
const int n_rows_a,
const int n_cols_a,
const int lda,
const math_t* x,
math_t* y,
const bool trans_a,
const math_t alpha,
const math_t beta,
cudaStream_t stream)
{
cublasHandle_t cublas_h = resource::get_cublas_handle(handle);
cublasOperation_t op_a = trans_a ? CUBLAS_OP_T : CUBLAS_OP_N;
RAFT_CUBLAS_TRY(
cublasgemv(cublas_h, op_a, n_rows_a, n_cols_a, &alpha, A, lda, x, 1, &beta, y, 1, stream));
}
template <typename math_t>
void gemv(raft::resources const& handle,
const math_t* A,
const int n_rows_a,
const int n_cols_a,
const int lda,
const math_t* x,
math_t* y,
const bool trans_a,
cudaStream_t stream)
{
math_t alpha = math_t(1);
math_t beta = math_t(0);
gemv(handle, A, n_rows_a, n_cols_a, lda, x, y, trans_a, alpha, beta, stream);
}
}; // namespace detail
}; // namespace linalg
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/svd.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "cublas_wrappers.hpp"
#include "cusolver_wrappers.hpp"
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cusolver_dn_handle.hpp>
#include <raft/linalg/eig.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/transpose.cuh>
#include <raft/common/nvtx.hpp>
#include <raft/core/resources.hpp>
#include <raft/matrix/diagonal.cuh>
#include <raft/matrix/math.cuh>
#include <raft/matrix/norm.cuh>
#include <raft/matrix/reverse.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace linalg {
namespace detail {
template <typename T>
void svdQR(raft::resources const& handle,
T* in,
int n_rows,
int n_cols,
T* sing_vals,
T* left_sing_vecs,
T* right_sing_vecs,
bool trans_right,
bool gen_left_vec,
bool gen_right_vec,
cudaStream_t stream)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"raft::linalg::svdQR(%d, %d)", n_rows, n_cols);
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
cublasHandle_t cublasH = resource::get_cublas_handle(handle);
const int m = n_rows;
const int n = n_cols;
rmm::device_scalar<int> devInfo(stream);
T* d_rwork = nullptr;
int lwork = 0;
RAFT_CUSOLVER_TRY(cusolverDngesvd_bufferSize<T>(cusolverH, n_rows, n_cols, &lwork));
rmm::device_uvector<T> d_work(lwork, stream);
char jobu = 'S';
char jobvt = 'A';
if (!gen_left_vec) { jobu = 'N'; }
if (!gen_right_vec) { jobvt = 'N'; }
RAFT_CUSOLVER_TRY(cusolverDngesvd(cusolverH,
jobu,
jobvt,
m,
n,
in,
m,
sing_vals,
left_sing_vecs,
m,
right_sing_vecs,
n,
d_work.data(),
lwork,
d_rwork,
devInfo.data(),
stream));
// Transpose the right singular vector back
if (trans_right && right_sing_vecs != nullptr)
raft::linalg::transpose(right_sing_vecs, n_cols, stream);
RAFT_CUDA_TRY(cudaGetLastError());
int dev_info;
raft::update_host(&dev_info, devInfo.data(), 1, stream);
resource::sync_stream(handle, stream);
ASSERT(dev_info == 0,
"svd.cuh: svd couldn't converge to a solution. "
"This usually occurs when some of the features do not vary enough.");
}
template <typename math_t, typename idx_t>
void svdEig(raft::resources const& handle,
math_t* in,
idx_t n_rows,
idx_t n_cols,
math_t* S,
math_t* U,
math_t* V,
bool gen_left_vec,
cudaStream_t stream)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"raft::linalg::svdEig(%d, %d)", n_rows, n_cols);
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
cublasHandle_t cublasH = resource::get_cublas_handle(handle);
auto len = n_cols * n_cols;
rmm::device_uvector<math_t> in_cross_mult(len, stream);
math_t alpha = math_t(1);
math_t beta = math_t(0);
raft::linalg::gemm(handle,
in,
n_rows,
n_cols,
in,
in_cross_mult.data(),
n_cols,
n_cols,
CUBLAS_OP_T,
CUBLAS_OP_N,
alpha,
beta,
stream);
raft::linalg::eigDC(handle, in_cross_mult.data(), n_cols, n_cols, V, S, stream);
raft::matrix::col_reverse(handle,
make_device_matrix_view<math_t, idx_t, col_major>(V, n_cols, n_cols));
raft::matrix::row_reverse(handle,
make_device_matrix_view<math_t, idx_t, col_major>(S, n_cols, idx_t(1)));
raft::matrix::seqRoot(S, S, alpha, n_cols, stream, true);
if (gen_left_vec) {
raft::linalg::gemm(handle,
in,
n_rows,
n_cols,
V,
U,
n_rows,
n_cols,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
stream);
raft::matrix::matrixVectorBinaryDivSkipZero(U, S, n_rows, n_cols, false, true, stream);
}
}
template <typename math_t>
void svdJacobi(raft::resources const& handle,
math_t* in,
int n_rows,
int n_cols,
math_t* sing_vals,
math_t* left_sing_vecs,
math_t* right_sing_vecs,
bool gen_left_vec,
bool gen_right_vec,
math_t tol,
int max_sweeps,
cudaStream_t stream)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"raft::linalg::svdJacobi(%d, %d)", n_rows, n_cols);
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
gesvdjInfo_t gesvdj_params = NULL;
RAFT_CUSOLVER_TRY(cusolverDnCreateGesvdjInfo(&gesvdj_params));
RAFT_CUSOLVER_TRY(cusolverDnXgesvdjSetTolerance(gesvdj_params, tol));
RAFT_CUSOLVER_TRY(cusolverDnXgesvdjSetMaxSweeps(gesvdj_params, max_sweeps));
int m = n_rows;
int n = n_cols;
rmm::device_scalar<int> devInfo(stream);
int lwork = 0;
int econ = 1;
RAFT_CUSOLVER_TRY(cusolverDngesvdj_bufferSize(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
econ,
m,
n,
in,
m,
sing_vals,
left_sing_vecs,
m,
right_sing_vecs,
n,
&lwork,
gesvdj_params));
rmm::device_uvector<math_t> d_work(lwork, stream);
RAFT_CUSOLVER_TRY(cusolverDngesvdj(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
econ,
m,
n,
in,
m,
sing_vals,
left_sing_vecs,
m,
right_sing_vecs,
n,
d_work.data(),
lwork,
devInfo.data(),
gesvdj_params,
stream));
RAFT_CUSOLVER_TRY(cusolverDnDestroyGesvdjInfo(gesvdj_params));
}
template <typename math_t>
void svdReconstruction(raft::resources const& handle,
math_t* U,
math_t* S,
math_t* V,
math_t* out,
int n_rows,
int n_cols,
int k,
cudaStream_t stream)
{
const math_t alpha = 1.0, beta = 0.0;
rmm::device_uvector<math_t> SVT(k * n_cols, stream);
raft::linalg::gemm(
handle, S, k, k, V, SVT.data(), k, n_cols, CUBLAS_OP_N, CUBLAS_OP_T, alpha, beta, stream);
raft::linalg::gemm(handle,
U,
n_rows,
k,
SVT.data(),
out,
n_rows,
n_cols,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
stream);
}
template <typename math_t>
bool evaluateSVDByL2Norm(raft::resources const& handle,
math_t* A_d,
math_t* U,
math_t* S_vec,
math_t* V,
int n_rows,
int n_cols,
int k,
math_t tol,
cudaStream_t stream)
{
cublasHandle_t cublasH = resource::get_cublas_handle(handle);
int m = n_rows, n = n_cols;
// form product matrix
rmm::device_uvector<math_t> P_d(m * n, stream);
rmm::device_uvector<math_t> S_mat(k * k, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(P_d.data(), 0, sizeof(math_t) * m * n, stream));
RAFT_CUDA_TRY(cudaMemsetAsync(S_mat.data(), 0, sizeof(math_t) * k * k, stream));
raft::matrix::set_diagonal(handle,
make_device_vector_view<const math_t>(S_vec, k),
make_device_matrix_view<math_t>(S_mat.data(), k, k));
svdReconstruction(handle, U, S_mat.data(), V, P_d.data(), m, n, k, stream);
// get norms of each
math_t normA = raft::matrix::l2_norm(handle, make_device_matrix_view<const math_t>(A_d, m, n));
math_t normU = raft::matrix::l2_norm(handle, make_device_matrix_view<const math_t>(U, m, k));
math_t normS =
raft::matrix::l2_norm(handle, make_device_matrix_view<const math_t>(S_mat.data(), k, k));
math_t normV = raft::matrix::l2_norm(handle, make_device_matrix_view<const math_t>(V, n, k));
math_t normP =
raft::matrix::l2_norm(handle, make_device_matrix_view<const math_t>(P_d.data(), m, n));
// calculate percent error
const math_t alpha = 1.0, beta = -1.0;
rmm::device_uvector<math_t> A_minus_P(m * n, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(A_minus_P.data(), 0, sizeof(math_t) * m * n, stream));
RAFT_CUBLAS_TRY(cublasgeam(cublasH,
CUBLAS_OP_N,
CUBLAS_OP_N,
m,
n,
&alpha,
A_d,
m,
&beta,
P_d.data(),
m,
A_minus_P.data(),
m,
stream));
math_t norm_A_minus_P =
raft::matrix::l2_norm(handle, make_device_matrix_view<const math_t>(A_minus_P.data(), m, n));
math_t percent_error = 100.0 * norm_A_minus_P / normA;
return (percent_error / 100.0 < tol);
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/map_then_reduce.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <raft/core/resources.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/vectorized.cuh>
namespace raft {
namespace linalg {
namespace detail {
struct sum_tag {};
template <typename InType, typename OutType, int TPB>
__device__ void reduce(OutType* out, const InType acc, sum_tag)
{
typedef cub::BlockReduce<InType, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
OutType tmp = BlockReduce(temp_storage).Sum(acc);
if (threadIdx.x == 0) { raft::myAtomicAdd(out, tmp); }
}
template <typename InType, typename OutType, int TPB, typename ReduceLambda>
__device__ void reduce(OutType* out, const InType acc, ReduceLambda op)
{
typedef cub::BlockReduce<InType, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
OutType tmp = BlockReduce(temp_storage).Reduce(acc, op);
if (threadIdx.x == 0) { raft::myAtomicReduce(out, tmp, op); }
}
template <typename InType,
typename OutType,
typename IdxType,
typename MapOp,
typename ReduceLambda,
int TPB,
typename... Args>
RAFT_KERNEL mapThenReduceKernel(OutType* out,
IdxType len,
OutType neutral,
MapOp map,
ReduceLambda op,
const InType* in,
Args... args)
{
OutType acc = neutral;
auto idx = (threadIdx.x + (blockIdx.x * blockDim.x));
if (idx < len) { acc = map(in[idx], args[idx]...); }
__syncthreads();
reduce<InType, OutType, TPB>(out, acc, op);
}
template <typename InType,
typename OutType,
typename IdxType,
typename MapOp,
typename ReduceLambda,
int TPB,
typename... Args>
void mapThenReduceImpl(OutType* out,
IdxType len,
OutType neutral,
MapOp map,
ReduceLambda op,
cudaStream_t stream,
const InType* in,
Args... args)
{
raft::update_device(out, &neutral, 1, stream);
const int nblks = raft::ceildiv(len, IdxType(TPB));
mapThenReduceKernel<InType, OutType, IdxType, MapOp, ReduceLambda, TPB, Args...>
<<<nblks, TPB, 0, stream>>>(out, len, neutral, map, op, in, args...);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/contractions.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/device_loads_stores.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename DataT, typename IdxT, typename Policy, bool isRowMajor = true>
struct Contractions_NT {
protected:
typedef Policy P;
/** number of rows in X */
IdxT m;
/** number of rows in Y */
IdxT n;
/** number of columns in X and Y */
IdxT k;
/** leading dimension in X */
IdxT lda;
/** leading dimension in Y */
IdxT ldb;
/** leading dimension in Output D */
IdxT ldd;
/** global memory pointer to X matrix */
const DataT* x_base;
/** global memory pointer to Y matrix */
const DataT* y_base;
/** current thread's smem row id */
int srowid;
/** current thread's smem column id */
int scolid;
/** current thread's accumulation row id */
int accrowid;
/** current thread's accumulation column id */
int acccolid;
/** base smem pointer for X data storage */
DataT* sx;
/** base smem pointer for Y data storage */
DataT* sy;
/** index pointing the correct smem page for writing after `ldgXY()` */
int pageWr;
/** index pointing the correct smem page for reading during `ldsXY()` */
int pageRd;
/** block of X data loaded from smem after `ldsXY()` */
DataT regx[P::AccRowsPerTh][P::Veclen];
/** block of Y data loaded from smem after `ldsXY()` */
DataT regy[P::AccColsPerTh][P::Veclen];
/** block of X data loaded from global mem after `ldgXY()` */
DataT ldgDataX[P::LdgPerThX][P::Veclen];
/** block of Y data loaded from global mem after `ldgXY()` */
DataT ldgDataY[P::LdgPerThY][P::Veclen];
static constexpr DataT Zero = (DataT)0;
public:
/**
* @brief Ctor
* @param[in] _x X matrix. [on device] [dim = _m x _k] [row-major]
* @param[in] _y Y matrix. [on device] [dim = _n x _k] [row-major]
* @param[in] _m number of rows of X
* @param[in] _n number of rows of Y
* @param[in] _k number of cols of X and Y
* @param[in] _smem shared memory region used during computations
*/
DI Contractions_NT(const DataT* _x, const DataT* _y, IdxT _m, IdxT _n, IdxT _k, char* _smem)
: m(_m),
n(_n),
k(_k),
lda(_k),
ldb(_k),
x_base(_x),
y_base(_y),
srowid(threadIdx.x / P::LdgThRow),
scolid((threadIdx.x % P::LdgThRow) * P::Veclen),
accrowid(threadIdx.x / P::AccThCols),
acccolid(threadIdx.x % P::AccThCols),
sx((DataT*)_smem),
sy(&(sx[P::SmemPageX])),
pageWr(0),
pageRd(0)
{
}
/**
* @brief Ctor
* @param[in] _x X matrix. [on device] [dim = _m x _k] [row-major]
* @param[in] _y Y matrix. [on device] [dim = _n x _k] [row-major]
* @param[in] _m number of rows of X
* @param[in] _n number of rows of Y
* @param[in] _k number of cols of X and Y
* @param[in] _smem shared memory region used during computations
*/
DI Contractions_NT(const DataT* _x,
const DataT* _y,
IdxT _m,
IdxT _n,
IdxT _k,
IdxT _lda,
IdxT _ldb,
IdxT _ldd,
char* _smem)
: m(_m),
n(_n),
k(_k),
lda(_lda),
ldb(_ldb),
ldd(_ldd),
x_base(_x),
y_base(_y),
srowid(threadIdx.x / P::LdgThRow),
scolid((threadIdx.x % P::LdgThRow) * P::Veclen),
accrowid(threadIdx.x / P::AccThCols),
acccolid(threadIdx.x % P::AccThCols),
sx((DataT*)_smem),
sy(&(sx[P::SmemPageX])),
pageWr(0),
pageRd(0)
{
}
protected:
/**
* @brief Load current block of X/Y from global memory to registers
* @param[in] kidx current start index of k to be loaded
*/
DI void ldgXY(IdxT tile_idx_m, IdxT tile_idx_n, IdxT kidx)
{
ldgX(tile_idx_m, kidx);
ldgY(tile_idx_n, kidx);
}
DI void ldgXY(IdxT tile_idx_m, IdxT tile_idx_n, IdxT kidx, IdxT tile_end_n)
{
ldgX(tile_idx_m, kidx);
ldgY(tile_idx_n, kidx, tile_end_n);
}
/**
* @brief Store current block of X/Y from registers to smem
* @param[in] kidx current start index of k to be loaded
*/
DI void stsXY()
{
stsX(sx + pageWr * P::SmemPage);
stsY(sy + pageWr * P::SmemPage);
}
/**
* @brief Load X and Y block from shared memory to registers
* @param[in] kidx k value from the current k-block to be loaded from smem
*/
DI void ldsXY(int kidx)
{
ldsX(kidx, sx + pageRd * P::SmemPage);
ldsY(kidx, sy + pageRd * P::SmemPage);
}
DI void switch_read_buffer() { this->pageRd ^= 1; }
DI void switch_write_buffer() { this->pageWr ^= 1; }
private:
DI void ldgX(IdxT tile_idx_m, IdxT kidx)
{
IdxT xrowid = isRowMajor ? tile_idx_m + srowid : tile_idx_m;
auto x = isRowMajor ? x_base + xrowid * lda : x_base + xrowid + srowid * lda;
if (isRowMajor) {
auto numRows = m;
auto koffset = kidx + scolid;
#pragma unroll
for (int i = 0; i < P::LdgPerThX; ++i) {
if (koffset < lda && (xrowid + i * P::LdgRowsX) < numRows) {
ldg(ldgDataX[i], x + i * P::LdgRowsX * lda + koffset);
} else {
#pragma unroll
for (int j = 0; j < P::Veclen; ++j) {
ldgDataX[i][j] = Zero;
}
}
}
} else {
const auto numRows = k;
auto koffset = scolid;
#pragma unroll
for (int i = 0; i < P::LdgPerThX; ++i) {
if ((koffset + xrowid) < lda && (srowid + kidx + i * P::LdgRowsX) < numRows) {
ldg(ldgDataX[i], x + (kidx + i * P::LdgRowsX) * lda + koffset);
} else {
#pragma unroll
for (int j = 0; j < P::Veclen; ++j) {
ldgDataX[i][j] = Zero;
}
}
}
}
}
DI void ldgY(IdxT tile_idx_n, IdxT kidx) { ldgY(tile_idx_n, kidx, n); }
DI void ldgY(IdxT tile_idx_n, IdxT kidx, IdxT end_n)
{
IdxT yrowid = isRowMajor ? tile_idx_n + srowid : tile_idx_n;
auto y = isRowMajor ? y_base + yrowid * ldb : y_base + yrowid + srowid * ldb;
if (isRowMajor) {
auto numRows = end_n;
auto koffset = kidx + scolid;
#pragma unroll
for (int i = 0; i < P::LdgPerThY; ++i) {
if (koffset < ldb && (yrowid + i * P::LdgRowsY) < numRows) {
ldg(ldgDataY[i], y + i * P::LdgRowsY * ldb + koffset);
} else {
#pragma unroll
for (int j = 0; j < P::Veclen; ++j) {
ldgDataY[i][j] = Zero;
}
}
}
} else {
auto numRows = k;
auto koffset = scolid;
#pragma unroll
for (int i = 0; i < P::LdgPerThY; ++i) {
if ((koffset + yrowid) < end_n && (srowid + kidx + i * P::LdgRowsY) < numRows) {
ldg(ldgDataY[i], y + (kidx + i * P::LdgRowsY) * ldb + koffset);
} else {
#pragma unroll
for (int j = 0; j < P::Veclen; ++j) {
ldgDataY[i][j] = Zero;
}
}
}
}
}
DI void stsX(DataT* smem)
{
auto* saddr = smem + srowid * P::SmemStride + scolid;
#pragma unroll
for (int i = 0; i < P::LdgPerThX; ++i) {
sts(saddr + i * P::LdgRowsX * P::SmemStride, ldgDataX[i]);
}
}
DI void stsY(DataT* smem)
{
auto* saddr = smem + srowid * P::SmemStride + scolid;
#pragma unroll
for (int i = 0; i < P::LdgPerThY; ++i) {
sts(saddr + i * P::LdgRowsY * P::SmemStride, ldgDataY[i]);
}
}
DI void ldsX(int kidx, DataT* smem)
{
if (isRowMajor) {
auto* saddr = smem + accrowid * P::SmemStride + kidx;
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
lds(regx[i], saddr + i * P::AccThRows * P::SmemStride);
}
} else {
auto* saddr = smem + accrowid + kidx * P::SmemStride;
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int v = 0; v < P::Veclen; ++v) {
regx[i][v] = saddr[i * P::AccThRows + v * P::SmemStride];
}
}
}
}
DI void ldsY(int kidx, DataT* smem)
{
if (isRowMajor) {
auto* saddr = smem + acccolid * P::SmemStride + kidx;
#pragma unroll
for (int i = 0; i < P::AccColsPerTh; ++i) {
lds(regy[i], saddr + i * P::AccThCols * P::SmemStride);
}
} else {
auto* saddr = smem + acccolid + kidx * P::SmemStride;
#pragma unroll
for (int i = 0; i < P::AccColsPerTh; ++i) {
#pragma unroll
for (int v = 0; v < P::Veclen; ++v) {
regy[i][v] = saddr[i * P::AccThCols + v * P::SmemStride];
}
}
}
}
}; // struct Contractions_NT
} // namespace detail
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/mean_squared_error.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/map_then_reduce.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename math_t, int TPB = 256>
void meanSquaredError(
math_t* out, const math_t* A, const math_t* B, size_t len, math_t weight, cudaStream_t stream)
{
auto sq_diff = [len, weight] __device__(const math_t a, const math_t b) {
math_t diff = a - b;
return diff * diff * weight / len;
};
raft::linalg::mapThenSumReduce<math_t, decltype(sq_diff)>(out, len, sq_diff, stream, A, B);
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/rsvd.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cusolver_dn_handle.hpp>
#include <raft/linalg/eig.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/qr.cuh>
#include <raft/linalg/svd.cuh>
#include <raft/linalg/transpose.cuh>
#include <raft/matrix/diagonal.cuh>
#include <raft/matrix/math.cuh>
#include <raft/matrix/reverse.cuh>
#include <raft/matrix/slice.cuh>
#include <raft/matrix/triangular.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <algorithm>
namespace raft {
namespace linalg {
namespace detail {
template <typename math_t>
void randomized_svd(const raft::resources& handle,
const math_t* in,
std::size_t n_rows,
std::size_t n_cols,
std::size_t k,
std::size_t p,
std::size_t niters,
math_t* S,
math_t* U,
math_t* V,
bool gen_U,
bool gen_V)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"raft::linalg::randomized_svd(%d, %d, %d)", n_rows, n_cols, k);
RAFT_EXPECTS(k < std::min(n_rows, n_cols), "k must be < min(n_rows, n_cols)");
RAFT_EXPECTS((k + p) < std::min(n_rows, n_cols), "k + p must be < min(n_rows, n_cols)");
RAFT_EXPECTS(!gen_U || (U != nullptr), "computation of U vector requested but found nullptr");
RAFT_EXPECTS(!gen_V || (V != nullptr), "computation of V vector requested but found nullptr");
#if CUDART_VERSION < 11050
RAFT_EXPECTS(gen_U && gen_V, "not computing U or V is not supported in CUDA version < 11.5");
#endif
cudaStream_t stream = resource::get_cuda_stream(handle);
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
char jobu = gen_U ? 'S' : 'N';
char jobv = gen_V ? 'S' : 'N';
auto lda = n_rows;
auto ldu = n_rows;
auto ldv = n_cols;
auto* in_ptr = const_cast<math_t*>(in);
size_t workspaceDevice = 0;
size_t workspaceHost = 0;
RAFT_CUSOLVER_TRY(cusolverDnxgesvdr_bufferSize(cusolverH,
jobu,
jobv,
n_rows,
n_cols,
k,
p,
niters,
in_ptr,
lda,
S,
U,
ldu,
V,
ldv,
&workspaceDevice,
&workspaceHost,
stream));
auto d_workspace = raft::make_device_vector<char>(handle, workspaceDevice);
auto h_workspace = raft::make_host_vector<char>(workspaceHost);
auto devInfo = raft::make_device_scalar<int>(handle, 0);
RAFT_CUSOLVER_TRY(cusolverDnxgesvdr(cusolverH,
jobu,
jobv,
n_rows,
n_cols,
k,
p,
niters,
in_ptr,
lda,
S,
U,
ldu,
V,
ldv,
d_workspace.data_handle(),
workspaceDevice,
h_workspace.data_handle(),
workspaceHost,
devInfo.data_handle(),
stream));
RAFT_CUDA_TRY(cudaGetLastError());
int dev_info;
raft::update_host(&dev_info, devInfo.data_handle(), 1, stream);
resource::sync_stream(handle);
ASSERT(dev_info == 0, "rsvd.cuh: Invalid parameter encountered.");
}
/**
* @brief randomized singular value decomposition (RSVD) on the column major
* float type input matrix (Jacobi-based), by specifying no. of PCs and
* upsamples directly
* @param handle: raft handle
* @param M: input matrix
* @param n_rows: number rows of input matrix
* @param n_cols: number columns of input matrix
* @param S_vec: singular values of input matrix
* @param U: left singular values of input matrix
* @param V: right singular values of input matrix
* @param k: no. of singular values to be computed
* @param p: no. of upsamples
* @param use_bbt: whether use eigen decomposition in computation or not
* @param gen_left_vec: left vector needs to be generated or not?
* @param gen_right_vec: right vector needs to be generated or not?
* @param use_jacobi: whether to jacobi solver for decomposition
* @param tol: tolerance for Jacobi-based solvers
* @param max_sweeps: maximum number of sweeps for Jacobi-based solvers
* @param stream cuda stream
*/
template <typename math_t>
void rsvdFixedRank(raft::resources const& handle,
math_t* M,
int n_rows,
int n_cols,
math_t* S_vec,
math_t* U,
math_t* V,
int k,
int p,
bool use_bbt,
bool gen_left_vec,
bool gen_right_vec,
bool use_jacobi,
math_t tol,
int max_sweeps,
cudaStream_t stream)
{
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
cublasHandle_t cublasH = resource::get_cublas_handle(handle);
// All the notations are following Algorithm 4 & 5 in S. Voronin's paper:
// https://arxiv.org/abs/1502.05366
int m = n_rows, n = n_cols;
int l = k + p; // Total number of singular values to be computed before truncation
int q = 2; // Number of power sampling counts
int s = 1; // Frequency controller for QR decomposition during power sampling
// scheme. s = 1: 2 QR per iteration; s = 2: 1 QR per iteration; s
// > 2: less frequent QR
const math_t alpha = 1.0, beta = 0.0;
// Build temporary U, S, V matrices
rmm::device_uvector<math_t> S_vec_tmp(l, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(S_vec_tmp.data(), 0, sizeof(math_t) * l, stream));
// build random matrix
rmm::device_uvector<math_t> RN(n * l, stream);
raft::random::RngState state{484};
raft::random::normal(handle, state, RN.data(), n * l, math_t(0.0), alpha);
// multiply to get matrix of random samples Y
rmm::device_uvector<math_t> Y(m * l, stream);
raft::linalg::gemm(
handle, M, m, n, RN.data(), Y.data(), m, l, CUBLAS_OP_N, CUBLAS_OP_N, alpha, beta, stream);
// now build up (M M^T)^q R
rmm::device_uvector<math_t> Z(n * l, stream);
rmm::device_uvector<math_t> Yorth(m * l, stream);
rmm::device_uvector<math_t> Zorth(n * l, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Z.data(), 0, sizeof(math_t) * n * l, stream));
RAFT_CUDA_TRY(cudaMemsetAsync(Yorth.data(), 0, sizeof(math_t) * m * l, stream));
RAFT_CUDA_TRY(cudaMemsetAsync(Zorth.data(), 0, sizeof(math_t) * n * l, stream));
// power sampling scheme
for (int j = 1; j < q; j++) {
if ((2 * j - 2) % s == 0) {
raft::linalg::qrGetQ(handle, Y.data(), Yorth.data(), m, l, stream);
raft::linalg::gemm(handle,
M,
m,
n,
Yorth.data(),
Z.data(),
n,
l,
CUBLAS_OP_T,
CUBLAS_OP_N,
alpha,
beta,
stream);
} else {
raft::linalg::gemm(
handle, M, m, n, Y.data(), Z.data(), n, l, CUBLAS_OP_T, CUBLAS_OP_N, alpha, beta, stream);
}
if ((2 * j - 1) % s == 0) {
raft::linalg::qrGetQ(handle, Z.data(), Zorth.data(), n, l, stream);
raft::linalg::gemm(handle,
M,
m,
n,
Zorth.data(),
Y.data(),
m,
l,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
stream);
} else {
raft::linalg::gemm(
handle, M, m, n, Z.data(), Y.data(), m, l, CUBLAS_OP_N, CUBLAS_OP_N, alpha, beta, stream);
}
}
// orthogonalize on exit from loop to get Q
rmm::device_uvector<math_t> Q(m * l, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Q.data(), 0, sizeof(math_t) * m * l, stream));
raft::linalg::qrGetQ(handle, Y.data(), Q.data(), m, l, stream);
// either QR of B^T method, or eigendecompose BB^T method
if (!use_bbt) {
// form Bt = Mt*Q : nxm * mxl = nxl
rmm::device_uvector<math_t> Bt(n * l, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Bt.data(), 0, sizeof(math_t) * n * l, stream));
raft::linalg::gemm(
handle, M, m, n, Q.data(), Bt.data(), n, l, CUBLAS_OP_T, CUBLAS_OP_N, alpha, beta, stream);
// compute QR factorization of Bt
// M is mxn ; Q is mxn ; R is min(m,n) x min(m,n) */
rmm::device_uvector<math_t> Qhat(n * l, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Qhat.data(), 0, sizeof(math_t) * n * l, stream));
rmm::device_uvector<math_t> Rhat(l * l, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Rhat.data(), 0, sizeof(math_t) * l * l, stream));
raft::linalg::qrGetQR(handle, Bt.data(), Qhat.data(), Rhat.data(), n, l, stream);
// compute SVD of Rhat (lxl)
rmm::device_uvector<math_t> Uhat(l * l, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Uhat.data(), 0, sizeof(math_t) * l * l, stream));
rmm::device_uvector<math_t> Vhat(l * l, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Vhat.data(), 0, sizeof(math_t) * l * l, stream));
if (use_jacobi)
raft::linalg::svdJacobi(handle,
Rhat.data(),
l,
l,
S_vec_tmp.data(),
Uhat.data(),
Vhat.data(),
true,
true,
tol,
max_sweeps,
stream);
else
raft::linalg::svdQR(handle,
Rhat.data(),
l,
l,
S_vec_tmp.data(),
Uhat.data(),
Vhat.data(),
true,
true,
true,
stream);
// First k elements of S_vec
raft::matrix::slice(
handle,
make_device_matrix_view<const math_t, int, col_major>(S_vec_tmp.data(), 1, l),
make_device_matrix_view<math_t, int, col_major>(S_vec, 1, k),
raft::matrix::slice_coordinates(0, 0, 1, k));
// Merge step 14 & 15 by calculating U = Q*Vhat[:,1:k] mxl * lxk = mxk
if (gen_left_vec) {
raft::linalg::gemm(handle,
Q.data(),
m,
l,
Vhat.data(),
U,
m,
k /*used to be l and needs slicing*/,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
stream);
}
// Merge step 14 & 15 by calculating V = Qhat*Uhat[:,1:k] nxl * lxk = nxk
if (gen_right_vec) {
raft::linalg::gemm(handle,
Qhat.data(),
n,
l,
Uhat.data(),
V,
n,
k /*used to be l and needs slicing*/,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
stream);
}
} else {
// build the matrix B B^T = Q^T M M^T Q column by column
// Bt = M^T Q ; nxm * mxk = nxk
rmm::device_uvector<math_t> B(n * l, stream);
raft::linalg::gemm(
handle, Q.data(), m, l, M, B.data(), l, n, CUBLAS_OP_T, CUBLAS_OP_N, alpha, beta, stream);
rmm::device_uvector<math_t> BBt(l * l, stream);
raft::linalg::gemm(handle,
B.data(),
l,
n,
B.data(),
BBt.data(),
l,
l,
CUBLAS_OP_N,
CUBLAS_OP_T,
alpha,
beta,
stream);
// compute eigendecomposition of BBt
rmm::device_uvector<math_t> Uhat(l * l, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Uhat.data(), 0, sizeof(math_t) * l * l, stream));
rmm::device_uvector<math_t> Uhat_dup(l * l, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Uhat_dup.data(), 0, sizeof(math_t) * l * l, stream));
raft::matrix::upper_triangular(
handle,
make_device_matrix_view<const math_t, int, col_major>(BBt.data(), l, l),
make_device_matrix_view<math_t, int, col_major>(Uhat_dup.data(), l, l));
if (use_jacobi)
raft::linalg::eigJacobi(
handle, Uhat_dup.data(), l, l, Uhat.data(), S_vec_tmp.data(), stream, tol, max_sweeps);
else
raft::linalg::eigDC(handle, Uhat_dup.data(), l, l, Uhat.data(), S_vec_tmp.data(), stream);
raft::matrix::seqRoot(S_vec_tmp.data(), l, stream);
auto S_vec_view = make_device_matrix_view<math_t, int, col_major>(S_vec, 1, k);
raft::matrix::slice(
handle,
raft::make_device_matrix_view<const math_t, int, col_major>(S_vec_tmp.data(), 1, l),
S_vec_view,
raft::matrix::slice_coordinates(0, p, 1, l)); // Last k elements of S_vec
raft::matrix::col_reverse(handle, S_vec_view);
// Merge step 14 & 15 by calculating U = Q*Uhat[:,(p+1):l] mxl * lxk = mxk
if (gen_left_vec) {
raft::linalg::gemm(handle,
Q.data(),
m,
l,
Uhat.data() + p * l,
U,
m,
k,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
stream);
raft::matrix::col_reverse(handle, make_device_matrix_view<math_t, int, col_major>(U, m, k));
}
// Merge step 14 & 15 by calculating V = B^T Uhat[:,(p+1):l] *
// Sigma^{-1}[(p+1):l, (p+1):l] nxl * lxk * kxk = nxk
if (gen_right_vec) {
rmm::device_uvector<math_t> Sinv(k * k, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(Sinv.data(), 0, sizeof(math_t) * k * k, stream));
rmm::device_uvector<math_t> UhatSinv(l * k, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(UhatSinv.data(), 0, sizeof(math_t) * l * k, stream));
raft::matrix::reciprocal(S_vec_tmp.data(), l, stream);
raft::matrix::set_diagonal(handle,
make_device_vector_view<const math_t>(S_vec_tmp.data() + p, k),
make_device_matrix_view<math_t>(Sinv.data(), k, k));
raft::linalg::gemm(handle,
Uhat.data() + p * l,
l,
k,
Sinv.data(),
UhatSinv.data(),
l,
k,
CUBLAS_OP_N,
CUBLAS_OP_N,
alpha,
beta,
stream);
raft::linalg::gemm(handle,
B.data(),
l,
n,
UhatSinv.data(),
V,
n,
k,
CUBLAS_OP_T,
CUBLAS_OP_N,
alpha,
beta,
stream);
raft::matrix::col_reverse(handle, make_device_matrix_view<math_t, int, col_major>(V, n, k));
}
}
}
/**
* @brief randomized singular value decomposition (RSVD) on the column major
* float type input matrix (Jacobi-based), by specifying the PC and upsampling
* ratio
* @param handle: raft handle
* @param M: input matrix
* @param n_rows: number rows of input matrix
* @param n_cols: number columns of input matrix
* @param S_vec: singular values of input matrix
* @param U: left singular values of input matrix
* @param V: right singular values of input matrix
* @param PC_perc: percentage of singular values to be computed
* @param UpS_perc: upsampling percentage
* @param use_bbt: whether use eigen decomposition in computation or not
* @param gen_left_vec: left vector needs to be generated or not?
* @param gen_right_vec: right vector needs to be generated or not?
* @param use_jacobi: whether to jacobi solver for decomposition
* @param tol: tolerance for Jacobi-based solvers
* @param max_sweeps: maximum number of sweeps for Jacobi-based solvers
* @param stream cuda stream
*/
template <typename math_t>
void rsvdPerc(raft::resources const& handle,
math_t* M,
int n_rows,
int n_cols,
math_t* S_vec,
math_t* U,
math_t* V,
math_t PC_perc,
math_t UpS_perc,
bool use_bbt,
bool gen_left_vec,
bool gen_right_vec,
bool use_jacobi,
math_t tol,
int max_sweeps,
cudaStream_t stream)
{
int k = std::max((int)(std::min(n_rows, n_cols) * PC_perc),
1); // Number of singular values to be computed
int p = std::max((int)(std::min(n_rows, n_cols) * UpS_perc), 1); // Upsamples
rsvdFixedRank(handle,
M,
n_rows,
n_cols,
S_vec,
U,
V,
k,
p,
use_bbt,
gen_left_vec,
gen_right_vec,
use_jacobi,
tol,
max_sweeps,
stream);
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/coalesced_reduction.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// Always include inline definitions of coalesced reduction, because we do not
// force explicit instantion.
#include "coalesced_reduction-inl.cuh"
// Do include the extern template instantiations when possible.
#ifdef RAFT_COMPILED
#include "coalesced_reduction-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/norm.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp>
#include <raft/linalg/norm_types.hpp>
#include <raft/linalg/reduce.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename Type, typename IdxType, typename Lambda>
void rowNormCaller(Type* dots,
const Type* data,
IdxType D,
IdxType N,
NormType type,
bool rowMajor,
cudaStream_t stream,
Lambda fin_op)
{
switch (type) {
case L1Norm:
raft::linalg::reduce<Type, Type, IdxType>(dots,
data,
D,
N,
(Type)0,
rowMajor,
true,
stream,
false,
raft::abs_op(),
raft::add_op(),
fin_op);
break;
case L2Norm:
raft::linalg::reduce<Type, Type, IdxType>(dots,
data,
D,
N,
(Type)0,
rowMajor,
true,
stream,
false,
raft::sq_op(),
raft::add_op(),
fin_op);
break;
case LinfNorm:
raft::linalg::reduce<Type, Type, IdxType>(dots,
data,
D,
N,
(Type)0,
rowMajor,
true,
stream,
false,
raft::abs_op(),
raft::max_op(),
fin_op);
break;
default: THROW("Unsupported norm type: %d", type);
};
}
template <typename Type, typename IdxType, typename Lambda>
void colNormCaller(Type* dots,
const Type* data,
IdxType D,
IdxType N,
NormType type,
bool rowMajor,
cudaStream_t stream,
Lambda fin_op)
{
switch (type) {
case L1Norm:
raft::linalg::reduce<Type, Type, IdxType>(dots,
data,
D,
N,
(Type)0,
rowMajor,
false,
stream,
false,
raft::abs_op(),
raft::add_op(),
fin_op);
break;
case L2Norm:
raft::linalg::reduce<Type, Type, IdxType>(dots,
data,
D,
N,
(Type)0,
rowMajor,
false,
stream,
false,
raft::sq_op(),
raft::add_op(),
fin_op);
break;
case LinfNorm:
raft::linalg::reduce<Type, Type, IdxType>(dots,
data,
D,
N,
(Type)0,
rowMajor,
false,
stream,
false,
raft::abs_op(),
raft::max_op(),
fin_op);
break;
default: THROW("Unsupported norm type: %d", type);
};
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/cholesky_r1_update.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "cublas_wrappers.hpp"
#include "cusolver_wrappers.hpp"
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/binary_op.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename math_t>
void choleskyRank1Update(raft::resources const& handle,
math_t* L,
int n,
int ld,
void* workspace,
int* n_bytes,
cublasFillMode_t uplo,
cudaStream_t stream,
math_t eps = -1)
{
// The matrix A' is defined as:
// A' = [[A_11, A_12]
// [A_21, A_22]]
// where:
// - A_11 = A, matrix of size (n-1)x(n-1)
// - A_21[j] = A_12.T[j] = A_new[j] j=0..n-2, vector with (n-1) elements
// - A_22 = A_new[n-1] scalar.
//
// Instead of calculating the Cholelsky decomposition of A' from scratch,
// we just update L with the new row. The new Cholesky decomposition will be
// calculated as:
// L' = [[L_11, 0]
// [L_12, L_22]]
// where L_11 is the Cholesky decomposition of A (size [n-1 x n-1]), and
// L_12 and L_22 are the new quantities that we need to calculate.
// We need a workspace in device memory to store a scalar. Additionally, in
// CUBLAS_FILL_MODE_LOWER we need space for n-1 floats.
const int align = 256;
int offset =
(uplo == CUBLAS_FILL_MODE_LOWER) ? raft::alignTo<int>(sizeof(math_t) * (n - 1), align) : 0;
if (workspace == nullptr) {
*n_bytes = offset + 1 * sizeof(math_t);
return;
}
math_t* s = reinterpret_cast<math_t*>(((char*)workspace) + offset);
math_t* L_22 = L + (n - 1) * ld + n - 1;
math_t* A_new = nullptr;
math_t* A_row = nullptr;
if (uplo == CUBLAS_FILL_MODE_UPPER) {
// A_new is stored as the n-1 th column of L
A_new = L + (n - 1) * ld;
} else {
// If the input is lower triangular, then the new elements of A are stored
// as the n-th row of L. Since the matrix is column major, this is non
// contiguous. We copy elements from A_row to a contiguous workspace A_new.
A_row = L + n - 1;
A_new = reinterpret_cast<math_t*>(workspace);
RAFT_CUBLAS_TRY(
cublasCopy(resource::get_cublas_handle(handle), n - 1, A_row, ld, A_new, 1, stream));
}
cublasOperation_t op = (uplo == CUBLAS_FILL_MODE_UPPER) ? CUBLAS_OP_T : CUBLAS_OP_N;
if (n > 1) {
// Calculate L_12 = x by solving equation L_11 x = A_12
math_t alpha = 1;
RAFT_CUBLAS_TRY(cublastrsm(resource::get_cublas_handle(handle),
CUBLAS_SIDE_LEFT,
uplo,
op,
CUBLAS_DIAG_NON_UNIT,
n - 1,
1,
&alpha,
L,
ld,
A_new,
n - 1,
stream));
// A_new now stores L_12, we calculate s = L_12 * L_12
RAFT_CUBLAS_TRY(
cublasdot(resource::get_cublas_handle(handle), n - 1, A_new, 1, A_new, 1, s, stream));
if (uplo == CUBLAS_FILL_MODE_LOWER) {
// Copy back the L_12 elements as the n-th row of L
RAFT_CUBLAS_TRY(
cublasCopy(resource::get_cublas_handle(handle), n - 1, A_new, 1, A_row, ld, stream));
}
} else { // n == 1 case
RAFT_CUDA_TRY(cudaMemsetAsync(s, 0, sizeof(math_t), stream));
}
// L_22 = sqrt(A_22 - L_12 * L_12)
math_t s_host;
math_t L_22_host;
raft::update_host(&s_host, s, 1, stream);
raft::update_host(&L_22_host, L_22, 1, stream); // L_22 stores A_22
resource::sync_stream(handle, stream);
L_22_host = std::sqrt(L_22_host - s_host);
// Check for numeric error with sqrt. If the matrix is not positive definite or
// the system is very ill conditioned then the A_22 - L_12 * L_12 can be
// negative, which would result L_22 = NaN. A small positive eps parameter
// can be used to prevent this.
if (eps >= 0 && (std::isnan(L_22_host) || L_22_host < eps)) { L_22_host = eps; }
ASSERT(!std::isnan(L_22_host), "Error during Cholesky rank one update");
raft::update_device(L_22, &L_22_host, 1, stream);
}
} // namespace detail
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/normalize.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <int warpSize, int rpb>
struct NormalizeThinPolicy {
static constexpr int LogicalWarpSize = warpSize;
static constexpr int RowsPerBlock = rpb;
static constexpr int ThreadsPerBlock = LogicalWarpSize * RowsPerBlock;
};
template <typename Policy,
typename Type,
typename IdxType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
RAFT_KERNEL __launch_bounds__(Policy::ThreadsPerBlock)
coalesced_normalize_thin_kernel(Type* out,
const Type* in,
IdxType D,
IdxType N,
Type init,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda fin_op,
Type eps)
{
IdxType i = threadIdx.y + (Policy::RowsPerBlock * static_cast<IdxType>(blockIdx.x));
if (i >= N) return;
Type acc = init;
for (IdxType j = threadIdx.x; j < D; j += Policy::LogicalWarpSize) {
Type val = in[j + D * i];
acc = reduce_op(acc, main_op(val, j));
}
acc = raft::logicalWarpReduce<Policy::LogicalWarpSize>(acc, reduce_op);
acc = fin_op(acc);
if (acc <= eps) return;
for (IdxType j = threadIdx.x; j < D; j += Policy::LogicalWarpSize) {
out[j + D * i] = in[j + D * i] / acc;
}
}
template <typename Policy,
typename Type,
typename IdxType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
inline void coalesced_normalize_thin(Type* out,
const Type* in,
IdxType D,
IdxType N,
Type init,
cudaStream_t stream,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda fin_op,
Type eps)
{
dim3 grid(ceildiv(N, (IdxType)Policy::RowsPerBlock), 1, 1);
dim3 block(Policy::LogicalWarpSize, Policy::RowsPerBlock, 1);
coalesced_normalize_thin_kernel<Policy>
<<<grid, block, 0, stream>>>(out, in, D, N, init, main_op, reduce_op, fin_op, eps);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <int TPB,
typename Type,
typename IdxType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
RAFT_KERNEL __launch_bounds__(TPB) coalesced_normalize_medium_kernel(Type* out,
const Type* in,
IdxType D,
IdxType N,
Type init,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda fin_op,
Type eps)
{
typedef cub::BlockReduce<Type, TPB, cub::BLOCK_REDUCE_RAKING> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ Type bcast_acc;
Type thread_data = init;
IdxType rowStart = blockIdx.x * D;
for (IdxType i = threadIdx.x; i < D; i += TPB) {
IdxType idx = rowStart + i;
thread_data = reduce_op(thread_data, main_op(in[idx], i));
}
Type acc = BlockReduce(temp_storage).Reduce(thread_data, reduce_op);
if (threadIdx.x == 0) { bcast_acc = fin_op(acc); }
__syncthreads();
if (bcast_acc <= eps) return;
for (IdxType i = threadIdx.x; i < D; i += TPB) {
IdxType idx = rowStart + i;
out[idx] = in[idx] / bcast_acc;
}
}
template <int TPB,
typename Type,
typename IdxType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
inline void coalesced_normalize_medium(Type* out,
const Type* in,
IdxType D,
IdxType N,
Type init,
cudaStream_t stream,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda fin_op,
Type eps)
{
coalesced_normalize_medium_kernel<TPB>
<<<N, TPB, 0, stream>>>(out, in, D, N, init, main_op, reduce_op, fin_op, eps);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename Type,
typename IdxType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
void coalesced_normalize(Type* out,
const Type* in,
IdxType D,
IdxType N,
Type init,
cudaStream_t stream,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda fin_op,
Type eps)
{
const IdxType numSMs = raft::getMultiProcessorCount();
if (D <= IdxType(256) || (D <= IdxType(512) && N >= 4 * numSMs)) {
if (D <= IdxType(2)) {
coalesced_normalize_thin<NormalizeThinPolicy<2, 64>>(
out, in, D, N, init, stream, main_op, reduce_op, fin_op, eps);
} else if (D <= IdxType(4)) {
coalesced_normalize_thin<NormalizeThinPolicy<4, 32>>(
out, in, D, N, init, stream, main_op, reduce_op, fin_op, eps);
} else if (D <= IdxType(8)) {
coalesced_normalize_thin<NormalizeThinPolicy<8, 16>>(
out, in, D, N, init, stream, main_op, reduce_op, fin_op, eps);
} else if (D <= IdxType(16)) {
coalesced_normalize_thin<NormalizeThinPolicy<16, 8>>(
out, in, D, N, init, stream, main_op, reduce_op, fin_op, eps);
} else {
coalesced_normalize_thin<NormalizeThinPolicy<32, 4>>(
out, in, D, N, init, stream, main_op, reduce_op, fin_op, eps);
}
} else {
coalesced_normalize_medium<256>(out, in, D, N, init, stream, main_op, reduce_op, fin_op, eps);
}
}
} // namespace detail
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/qr.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "cublas_wrappers.hpp"
#include "cusolver_wrappers.hpp"
#include <raft/core/resource/cusolver_dn_handle.hpp>
#include <raft/core/resources.hpp>
#include <raft/matrix/triangular.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <algorithm>
namespace raft {
namespace linalg {
namespace detail {
/**
* @brief Calculate the QR decomposition and get matrix Q in place of the input.
*
* Subject to the algorithm constraint `n_rows >= n_cols`.
*
* @param handle
* @param[inout] Q device pointer to input matrix and the output matrix Q,
* both column-major and of size [n_rows, n_cols].
* @param n_rows
* @param n_cols
* @param stream
*/
template <typename math_t>
void qrGetQ_inplace(
raft::resources const& handle, math_t* Q, int n_rows, int n_cols, cudaStream_t stream)
{
RAFT_EXPECTS(n_rows >= n_cols, "QR decomposition expects n_rows >= n_cols.");
cusolverDnHandle_t cusolver = resource::get_cusolver_dn_handle(handle);
rmm::device_uvector<math_t> tau(n_cols, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(tau.data(), 0, sizeof(math_t) * n_cols, stream));
rmm::device_scalar<int> dev_info(stream);
int ws_size;
RAFT_CUSOLVER_TRY(cusolverDngeqrf_bufferSize(cusolver, n_rows, n_cols, Q, n_rows, &ws_size));
rmm::device_uvector<math_t> workspace(ws_size, stream);
RAFT_CUSOLVER_TRY(cusolverDngeqrf(cusolver,
n_rows,
n_cols,
Q,
n_rows,
tau.data(),
workspace.data(),
ws_size,
dev_info.data(),
stream));
RAFT_CUSOLVER_TRY(
cusolverDnorgqr_bufferSize(cusolver, n_rows, n_cols, n_cols, Q, n_rows, tau.data(), &ws_size));
workspace.resize(ws_size, stream);
RAFT_CUSOLVER_TRY(cusolverDnorgqr(cusolver,
n_rows,
n_cols,
n_cols,
Q,
n_rows,
tau.data(),
workspace.data(),
ws_size,
dev_info.data(),
stream));
}
template <typename math_t>
void qrGetQ(raft::resources const& handle,
const math_t* M,
math_t* Q,
int n_rows,
int n_cols,
cudaStream_t stream)
{
raft::copy(Q, M, n_rows * n_cols, stream);
qrGetQ_inplace(handle, Q, n_rows, n_cols, stream);
}
template <typename math_t>
void qrGetQR(raft::resources const& handle,
math_t* M,
math_t* Q,
math_t* R,
int n_rows,
int n_cols,
cudaStream_t stream)
{
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
int m = n_rows, n = n_cols;
rmm::device_uvector<math_t> R_full(m * n, stream);
rmm::device_uvector<math_t> tau(std::min(m, n), stream);
RAFT_CUDA_TRY(cudaMemsetAsync(tau.data(), 0, sizeof(math_t) * std::min(m, n), stream));
int R_full_nrows = m, R_full_ncols = n;
RAFT_CUDA_TRY(
cudaMemcpyAsync(R_full.data(), M, sizeof(math_t) * m * n, cudaMemcpyDeviceToDevice, stream));
int Lwork;
rmm::device_scalar<int> devInfo(stream);
RAFT_CUSOLVER_TRY(cusolverDngeqrf_bufferSize(
cusolverH, R_full_nrows, R_full_ncols, R_full.data(), R_full_nrows, &Lwork));
rmm::device_uvector<math_t> workspace(Lwork, stream);
RAFT_CUSOLVER_TRY(cusolverDngeqrf(cusolverH,
R_full_nrows,
R_full_ncols,
R_full.data(),
R_full_nrows,
tau.data(),
workspace.data(),
Lwork,
devInfo.data(),
stream));
raft::matrix::upper_triangular<math_t, int>(
handle,
make_device_matrix_view<const math_t, int, col_major>(R_full.data(), m, n),
make_device_matrix_view<math_t, int, col_major>(R, std::min(m, n), std::min(m, n)));
RAFT_CUDA_TRY(
cudaMemcpyAsync(Q, R_full.data(), sizeof(math_t) * m * n, cudaMemcpyDeviceToDevice, stream));
int Q_nrows = m, Q_ncols = n;
RAFT_CUSOLVER_TRY(cusolverDnorgqr_bufferSize(
cusolverH, Q_nrows, Q_ncols, std::min(Q_ncols, Q_nrows), Q, Q_nrows, tau.data(), &Lwork));
workspace.resize(Lwork, stream);
RAFT_CUSOLVER_TRY(cusolverDnorgqr(cusolverH,
Q_nrows,
Q_ncols,
std::min(Q_ncols, Q_nrows),
Q,
Q_nrows,
tau.data(),
workspace.data(),
Lwork,
devInfo.data(),
stream));
}
}; // namespace detail
}; // namespace linalg
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/reduce_rows_by_key.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <cub/cub.cuh>
#include <limits>
#define MAX_BLOCKS 65535u
namespace raft {
namespace linalg {
namespace detail {
//
// Small helper function to convert from int->char and char->int
// Transform ncols*nrows read of int in 2*nrows reads of int + ncols*rows reads of chars
//
template <typename IteratorT1, typename IteratorT2>
RAFT_KERNEL convert_array_kernel(IteratorT1 dst, IteratorT2 src, int n)
{
for (int idx = blockDim.x * blockIdx.x + threadIdx.x; idx < n; idx += gridDim.x * blockDim.x) {
dst[idx] = src[idx];
}
}
//
// Small helper function to convert from int->char and char->int
// Transform ncols*nrows read of int in 2*nrows reads of int + ncols*rows reads of chars
//
template <typename IteratorT1, typename IteratorT2>
void convert_array(IteratorT1 dst, IteratorT2 src, int n, cudaStream_t st)
{
dim3 grid, block;
block.x = 256;
grid.x = raft::ceildiv(n, (int)block.x);
grid.x = std::min(grid.x, MAX_BLOCKS);
convert_array_kernel<<<grid, block, 0, st>>>(dst, src, n);
}
template <typename T>
struct quad {
T x, y, z, w;
};
//
// Functor for reduce by key, small k
//
template <typename T>
struct quadSum {
__host__ __device__ __forceinline__ quad<T> operator()(const quad<T>& a, const quad<T>& b) const
{
// wasting a double4..
quad<T> c;
c.x = a.x + b.x;
c.y = a.y + b.y;
c.z = a.z + b.z;
c.w = a.w + b.w;
return c;
}
};
//
// Reduce by keys
// We need to sum each dimension by labels
// The labels are not adjacent
//
//
// Reduce by keys - for keys <= 4
//
#define SUM_ROWS_SMALL_K_DIMX 256
#define SUM_ROWS_BY_KEY_SMALL_K_MAX_K 4
template <typename DataIteratorT, typename WeightT, typename SumsT, typename IdxT>
__launch_bounds__(SUM_ROWS_SMALL_K_DIMX, 4)
RAFT_KERNEL sum_rows_by_key_small_nkeys_kernel(const DataIteratorT d_A,
IdxT lda,
const char* d_keys,
const WeightT* d_weights,
IdxT nrows,
IdxT ncols,
IdxT nkeys,
SumsT* d_sums)
{
typedef typename std::iterator_traits<DataIteratorT>::value_type DataType;
typedef cub::BlockReduce<quad<SumsT>, SUM_ROWS_SMALL_K_DIMX> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (IdxT idim = static_cast<IdxT>(blockIdx.y); idim < ncols; idim += gridDim.y) {
if (idim != static_cast<IdxT>(blockIdx.y)) __syncthreads(); // we're reusing temp_storage
// threadIdx.x stores partial sum for current dim and key=threadIdx.x in this reg
quad<SumsT> thread_sums;
thread_sums.x = 0.0;
thread_sums.y = 0.0;
thread_sums.z = 0.0;
thread_sums.w = 0.0;
// May use vectorized load - not necessary for doubles
for (IdxT block_offset_irow = blockIdx.x * blockDim.x;
block_offset_irow < nrows; // we will syncthreads() inside the loop, no CTA divergence
block_offset_irow += blockDim.x * gridDim.x) {
IdxT irow = block_offset_irow + threadIdx.x;
DataType val = (irow < nrows) ? d_A[irow * lda + idim] : 0.0;
if (d_weights && irow < nrows) { val = val * d_weights[irow]; }
// we are not reusing the keys - after profiling
// d_keys is mainly loaded from L2, and this kernel is DRAM BW bounded
// (experimentation gave a 10% speed up - not worth the many code lines added)
IdxT row_key = (irow < nrows) ? d_keys[irow] : std::numeric_limits<IdxT>::max();
thread_sums.x += (row_key == 0) ? static_cast<SumsT>(val) : 0.0;
thread_sums.y += (row_key == 1) ? static_cast<SumsT>(val) : 0.0;
thread_sums.z += (row_key == 2) ? static_cast<SumsT>(val) : 0.0;
thread_sums.w += (row_key == 3) ? static_cast<SumsT>(val) : 0.0;
}
// End of column
// Saving local sums back to global mem
// Strided access
// Reducing by key
thread_sums = BlockReduce(temp_storage).Reduce(thread_sums, quadSum<SumsT>());
if (threadIdx.x < 32) {
// We only need 4
thread_sums = cub::ShuffleIndex<32>(thread_sums, 0, 0xffffffff);
if (static_cast<IdxT>(threadIdx.x) < nkeys) {
if (threadIdx.x == 0) raft::myAtomicAdd(&d_sums[threadIdx.x * ncols + idim], thread_sums.x);
if (threadIdx.x == 1) raft::myAtomicAdd(&d_sums[threadIdx.x * ncols + idim], thread_sums.y);
if (threadIdx.x == 2) raft::myAtomicAdd(&d_sums[threadIdx.x * ncols + idim], thread_sums.z);
if (threadIdx.x == 3) raft::myAtomicAdd(&d_sums[threadIdx.x * ncols + idim], thread_sums.w);
}
}
}
}
template <typename DataIteratorT, typename WeightT, typename SumsT, typename IdxT>
void sum_rows_by_key_small_nkeys(const DataIteratorT d_A,
IdxT lda,
const char* d_keys,
const WeightT* d_weights,
IdxT nrows,
IdxT ncols,
IdxT nkeys,
SumsT* d_sums,
cudaStream_t st)
{
dim3 grid, block;
block.x = SUM_ROWS_SMALL_K_DIMX;
block.y = 1; // Necessary
grid.x = raft::ceildiv(nrows, (IdxT)block.x);
grid.x = std::min(grid.x, 32u);
grid.y = ncols;
grid.y = std::min(grid.y, MAX_BLOCKS);
sum_rows_by_key_small_nkeys_kernel<<<grid, block, 0, st>>>(
d_A, lda, d_keys, d_weights, nrows, ncols, nkeys, d_sums);
}
//
// Reduce by keys - large number of keys
// Computing a "weighted histogram" with local histograms in smem
// Keeping it simple - not optimized
//
#define SUM_ROWS_BY_KEY_LARGE_K_MAX_K 1024
template <typename DataIteratorT,
typename KeysIteratorT,
typename WeightT,
typename SumsT,
typename IdxT>
RAFT_KERNEL sum_rows_by_key_large_nkeys_kernel_colmajor(const DataIteratorT d_A,
IdxT lda,
KeysIteratorT d_keys,
const WeightT* d_weights,
IdxT nrows,
IdxT ncols,
int key_offset,
IdxT nkeys,
SumsT* d_sums)
{
typedef typename std::iterator_traits<KeysIteratorT>::value_type KeyType;
typedef typename std::iterator_traits<DataIteratorT>::value_type DataType;
__shared__ SumsT local_sums[SUM_ROWS_BY_KEY_LARGE_K_MAX_K];
for (IdxT local_key = threadIdx.x; local_key < nkeys; local_key += blockDim.x)
local_sums[local_key] = 0.0;
for (IdxT idim = blockIdx.y; idim < ncols; idim += gridDim.y) {
__syncthreads(); // local_sums
// At this point local_sums if full of zeros
for (IdxT irow = blockIdx.x * blockDim.x + threadIdx.x; irow < nrows;
irow += blockDim.x * gridDim.x) {
// Branch div in this loop - not an issue with current code
DataType val = d_A[idim * lda + irow];
if (d_weights) val = val * d_weights[irow];
IdxT local_key = d_keys[irow] - key_offset;
// We could load next val here
raft::myAtomicAdd(&local_sums[local_key], static_cast<SumsT>(val));
}
__syncthreads(); // local_sums
for (IdxT local_key = threadIdx.x; local_key < nkeys; local_key += blockDim.x) {
SumsT local_sum = local_sums[local_key];
if (local_sum != 0.0) {
KeyType global_key = key_offset + local_key;
raft::myAtomicAdd(&d_sums[global_key * ncols + idim], local_sum);
local_sums[local_key] = 0.0;
}
}
}
}
template <typename DataIteratorT, typename KeysIteratorT, typename SumsT, typename IdxT>
void sum_rows_by_key_large_nkeys_colmajor(const DataIteratorT d_A,
IdxT lda,
KeysIteratorT d_keys,
IdxT nrows,
IdxT ncols,
int key_offset,
IdxT nkeys,
SumsT* d_sums,
cudaStream_t st)
{
dim3 grid, block;
block.x = SUM_ROWS_SMALL_K_DIMX;
block.y = 1; // Necessary
grid.x = raft::ceildiv(nrows, (IdxT)block.x);
grid.x = std::min(grid.x, 32u);
grid.y = ncols;
grid.y = std::min(grid.y, MAX_BLOCKS);
sum_rows_by_key_large_nkeys_kernel_colmajor<<<grid, block, 0, st>>>(
d_A, lda, d_keys, nrows, ncols, key_offset, nkeys, d_sums);
}
template <typename DataIteratorT,
typename KeysIteratorT,
typename WeightT,
typename SumsT,
typename IdxT>
RAFT_KERNEL sum_rows_by_key_large_nkeys_kernel_rowmajor(const DataIteratorT d_A,
IdxT lda,
const WeightT* d_weights,
KeysIteratorT d_keys,
IdxT nrows,
IdxT ncols,
SumsT* d_sums)
{
IdxT gid = threadIdx.x + (blockDim.x * static_cast<IdxT>(blockIdx.x));
IdxT j = gid % ncols;
IdxT i = gid / ncols;
if (i >= nrows) return;
IdxT l = static_cast<IdxT>(d_keys[i]);
SumsT val = d_A[j + lda * i];
if (d_weights != nullptr) val *= d_weights[i];
raft::myAtomicAdd(&d_sums[j + ncols * l], val);
}
template <typename DataIteratorT,
typename KeysIteratorT,
typename WeightT,
typename SumsT,
typename IdxT>
void sum_rows_by_key_large_nkeys_rowmajor(const DataIteratorT d_A,
IdxT lda,
const KeysIteratorT d_keys,
const WeightT* d_weights,
IdxT nrows,
IdxT ncols,
SumsT* d_sums,
cudaStream_t st)
{
uint32_t block_dim = 128;
auto grid_dim = static_cast<uint32_t>(ceildiv<IdxT>(nrows * ncols, (IdxT)block_dim));
sum_rows_by_key_large_nkeys_kernel_rowmajor<<<grid_dim, block_dim, 0, st>>>(
d_A, lda, d_weights, d_keys, nrows, ncols, d_sums);
}
/**
* @brief Computes the weighted reduction of matrix rows for each given key
*
* @tparam DataIteratorT Random-access iterator type, for reading input matrix
* (may be a simple pointer type)
* @tparam KeysIteratorT Random-access iterator type, for reading input keys
* (may be a simple pointer type)
* @tparam SumsT Type of the output sums
* @tparam IdxT Index type
*
* @param[in] d_A Input data array (lda x nrows)
* @param[in] lda Real row size for input data, d_A
* @param[in] d_keys Keys for each row (1 x nrows)
* @param[in] d_weights Weights for each observation in d_A (1 x nrows)
* @param[out] d_keys_char Scratch memory for conversion of keys to char
* @param[in] nrows Number of rows in d_A and d_keys
* @param[in] ncols Number of data columns in d_A
* @param[in] nkeys Number of unique keys in d_keys
* @param[out] d_sums Row sums by key (ncols x d_keys)
* @param[in] stream CUDA stream
* @param[in] reset_sums Whether to reset the output sums to zero before reducing
*/
template <typename DataIteratorT,
typename KeysIteratorT,
typename WeightT,
typename SumsT,
typename IdxT>
void reduce_rows_by_key(const DataIteratorT d_A,
IdxT lda,
KeysIteratorT d_keys,
const WeightT* d_weights,
char* d_keys_char,
IdxT nrows,
IdxT ncols,
IdxT nkeys,
SumsT* d_sums,
cudaStream_t stream,
bool reset_sums)
{
typedef typename std::iterator_traits<KeysIteratorT>::value_type KeyType;
// Following kernel needs memset
if (reset_sums) { cudaMemsetAsync(d_sums, 0, ncols * nkeys * sizeof(SumsT), stream); }
if (d_keys_char != nullptr && nkeys <= SUM_ROWS_BY_KEY_SMALL_K_MAX_K) {
// sum_rows_by_key_small_k is BW bounded. d_keys is loaded ncols time - avoiding wasting BW
// with doubles we have ~20% speed up - with floats we can hope something around 2x
// Converting d_keys to char
convert_array(d_keys_char, d_keys, nrows, stream);
sum_rows_by_key_small_nkeys(
d_A, lda, d_keys_char, d_weights, nrows, ncols, nkeys, d_sums, stream);
} else {
sum_rows_by_key_large_nkeys_rowmajor(d_A, lda, d_keys, d_weights, nrows, ncols, d_sums, stream);
}
}
/**
* @brief Computes the reduction of matrix rows for each given key
* @tparam DataIteratorT Random-access iterator type, for reading input matrix (may be a simple
* pointer type)
* @tparam KeysIteratorT Random-access iterator type, for reading input keys (may be a simple
* pointer type)
* @tparam SumsT Type of the output sums
* @tparam IdxT Index type
* @param[in] d_A Input data array (lda x nrows)
* @param[in] lda Real row size for input data, d_A
* @param[in] d_keys Keys for each row (1 x nrows)
* @param d_keys_char Scratch memory for conversion of keys to char
* @param[in] nrows Number of rows in d_A and d_keys
* @param[in] ncols Number of data columns in d_A
* @param[in] nkeys Number of unique keys in d_keys
* @param[out] d_sums Row sums by key (ncols x d_keys)
* @param[in] stream CUDA stream
*/
template <typename DataIteratorT, typename KeysIteratorT, typename SumsT, typename IdxT>
void reduce_rows_by_key(const DataIteratorT d_A,
IdxT lda,
KeysIteratorT d_keys,
char* d_keys_char,
IdxT nrows,
IdxT ncols,
IdxT nkeys,
SumsT* d_sums,
cudaStream_t stream,
bool reset_sums)
{
typedef typename std::iterator_traits<DataIteratorT>::value_type DataType;
reduce_rows_by_key(d_A,
lda,
d_keys,
static_cast<DataType*>(nullptr),
d_keys_char,
nrows,
ncols,
nkeys,
d_sums,
stream,
reset_sums);
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/divide.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/host_mdspan.hpp>
#include <raft/core/operators.hpp>
#include <raft/linalg/unary_op.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename InT, typename OutT = InT, typename IdxType = int>
void divideScalar(OutT* out, const InT* in, InT scalar, IdxType len, cudaStream_t stream)
{
raft::linalg::unaryOp(out, in, len, raft::div_const_op<InT>(scalar), stream);
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/transpose.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "cublas_wrappers.hpp"
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
namespace raft {
namespace linalg {
namespace detail {
template <typename math_t>
void transpose(raft::resources const& handle,
math_t* in,
math_t* out,
int n_rows,
int n_cols,
cudaStream_t stream)
{
cublasHandle_t cublas_h = resource::get_cublas_handle(handle);
RAFT_CUBLAS_TRY(cublasSetStream(cublas_h, stream));
int out_n_rows = n_cols;
int out_n_cols = n_rows;
const math_t alpha = 1.0;
const math_t beta = 0.0;
RAFT_CUBLAS_TRY(cublasgeam(cublas_h,
CUBLAS_OP_T,
CUBLAS_OP_N,
out_n_rows,
out_n_cols,
&alpha,
in,
n_rows,
&beta,
out,
out_n_rows,
out,
out_n_rows,
stream));
}
template <typename math_t>
void transpose(math_t* inout, int n, cudaStream_t stream)
{
auto m = n;
auto size = n * n;
auto d_inout = inout;
auto counting = thrust::make_counting_iterator<int>(0);
thrust::for_each(rmm::exec_policy(stream), counting, counting + size, [=] __device__(int idx) {
int s_row = idx % m;
int s_col = idx / m;
int d_row = s_col;
int d_col = s_row;
if (s_row < s_col) {
auto temp = d_inout[d_col * m + d_row];
d_inout[d_col * m + d_row] = d_inout[s_col * m + s_row];
d_inout[s_col * m + s_row] = temp;
}
});
}
template <typename T, typename IndexType, typename LayoutPolicy, typename AccessorPolicy>
void transpose_row_major_impl(
raft::resources const& handle,
raft::mdspan<T, raft::matrix_extent<IndexType>, LayoutPolicy, AccessorPolicy> in,
raft::mdspan<T, raft::matrix_extent<IndexType>, LayoutPolicy, AccessorPolicy> out)
{
auto out_n_rows = in.extent(1);
auto out_n_cols = in.extent(0);
T constexpr kOne = 1;
T constexpr kZero = 0;
CUBLAS_TRY(cublasgeam(resource::get_cublas_handle(handle),
CUBLAS_OP_T,
CUBLAS_OP_N,
out_n_cols,
out_n_rows,
&kOne,
in.data_handle(),
in.stride(0),
&kZero,
static_cast<T*>(nullptr),
out.stride(0),
out.data_handle(),
out.stride(0),
resource::get_cuda_stream(handle)));
}
template <typename T, typename IndexType, typename LayoutPolicy, typename AccessorPolicy>
void transpose_col_major_impl(
raft::resources const& handle,
raft::mdspan<T, raft::matrix_extent<IndexType>, LayoutPolicy, AccessorPolicy> in,
raft::mdspan<T, raft::matrix_extent<IndexType>, LayoutPolicy, AccessorPolicy> out)
{
auto out_n_rows = in.extent(1);
auto out_n_cols = in.extent(0);
T constexpr kOne = 1;
T constexpr kZero = 0;
CUBLAS_TRY(cublasgeam(resource::get_cublas_handle(handle),
CUBLAS_OP_T,
CUBLAS_OP_N,
out_n_rows,
out_n_cols,
&kOne,
in.data_handle(),
in.stride(1),
&kZero,
static_cast<T*>(nullptr),
out.stride(1),
out.data_handle(),
out.stride(1),
resource::get_cuda_stream(handle)));
}
}; // end namespace detail
}; // end namespace linalg
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/add.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp>
#include <raft/linalg/binary_op.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace linalg {
namespace detail {
template <typename InT, typename OutT = InT, typename IdxType = int>
void addScalar(OutT* out, const InT* in, InT scalar, IdxType len, cudaStream_t stream)
{
raft::linalg::unaryOp(out, in, len, raft::add_const_op<InT>(scalar), stream);
}
template <typename InT, typename OutT = InT, typename IdxType = int>
void add(OutT* out, const InT* in1, const InT* in2, IdxType len, cudaStream_t stream)
{
raft::linalg::binaryOp(out, in1, in2, len, raft::add_op(), stream);
}
template <class InT, typename IdxType, typename OutT = InT>
RAFT_KERNEL add_dev_scalar_kernel(OutT* outDev,
const InT* inDev,
const InT* singleScalarDev,
IdxType len)
{
IdxType i = ((IdxType)blockIdx.x * (IdxType)blockDim.x) + threadIdx.x;
if (i < len) { outDev[i] = inDev[i] + *singleScalarDev; }
}
template <typename InT, typename OutT = InT, typename IdxType = int>
void addDevScalar(
OutT* outDev, const InT* inDev, const InT* singleScalarDev, IdxType len, cudaStream_t stream)
{
// TODO: block dimension has not been tuned
dim3 block(256);
dim3 grid(raft::ceildiv(len, (IdxType)block.x));
add_dev_scalar_kernel<<<grid, block, 0, stream>>>(outDev, inDev, singleScalarDev, len);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
} // namespace detail
} // namespace linalg
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/coalesced_reduction-inl.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <raft/core/nvtx.hpp>
#include <raft/core/operators.hpp>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace linalg {
namespace detail {
template <int warpSize, int rpb>
struct ReductionThinPolicy {
static constexpr int LogicalWarpSize = warpSize;
static constexpr int RowsPerBlock = rpb;
static constexpr int ThreadsPerBlock = LogicalWarpSize * RowsPerBlock;
};
template <typename Policy,
typename InType,
typename OutType,
typename IdxType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
RAFT_KERNEL __launch_bounds__(Policy::ThreadsPerBlock)
coalescedReductionThinKernel(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda final_op,
bool inplace = false)
{
IdxType i = threadIdx.y + (Policy::RowsPerBlock * static_cast<IdxType>(blockIdx.x));
if (i >= N) return;
OutType acc = init;
for (IdxType j = threadIdx.x; j < D; j += Policy::LogicalWarpSize) {
acc = reduce_op(acc, main_op(data[j + (D * i)], j));
}
acc = raft::logicalWarpReduce<Policy::LogicalWarpSize>(acc, reduce_op);
if (threadIdx.x == 0) {
if (inplace) {
dots[i] = final_op(reduce_op(dots[i], acc));
} else {
dots[i] = final_op(acc);
}
}
}
template <typename Policy,
typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void coalescedReductionThin(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"coalescedReductionThin<%d,%d>", Policy::LogicalWarpSize, Policy::RowsPerBlock);
dim3 threads(Policy::LogicalWarpSize, Policy::RowsPerBlock, 1);
dim3 blocks(ceildiv<IdxType>(N, Policy::RowsPerBlock), 1, 1);
coalescedReductionThinKernel<Policy>
<<<blocks, threads, 0, stream>>>(dots, data, D, N, init, main_op, reduce_op, final_op, inplace);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void coalescedReductionThinDispatcher(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
if (D <= IdxType(2)) {
coalescedReductionThin<ReductionThinPolicy<2, 64>>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
} else if (D <= IdxType(4)) {
coalescedReductionThin<ReductionThinPolicy<4, 32>>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
} else if (D <= IdxType(8)) {
coalescedReductionThin<ReductionThinPolicy<8, 16>>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
} else if (D <= IdxType(16)) {
coalescedReductionThin<ReductionThinPolicy<16, 8>>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
} else {
coalescedReductionThin<ReductionThinPolicy<32, 4>>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
}
}
template <int TPB,
typename InType,
typename OutType,
typename IdxType,
typename MainLambda,
typename ReduceLambda,
typename FinalLambda>
RAFT_KERNEL __launch_bounds__(TPB) coalescedReductionMediumKernel(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
MainLambda main_op,
ReduceLambda reduce_op,
FinalLambda final_op,
bool inplace = false)
{
typedef cub::BlockReduce<OutType, TPB, cub::BLOCK_REDUCE_RAKING> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
OutType thread_data = init;
IdxType rowStart = blockIdx.x * D;
for (IdxType i = threadIdx.x; i < D; i += TPB) {
IdxType idx = rowStart + i;
thread_data = reduce_op(thread_data, main_op(data[idx], i));
}
OutType acc = BlockReduce(temp_storage).Reduce(thread_data, reduce_op);
if (threadIdx.x == 0) {
if (inplace) {
dots[blockIdx.x] = final_op(reduce_op(dots[blockIdx.x], acc));
} else {
dots[blockIdx.x] = final_op(acc);
}
}
}
template <int TPB,
typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void coalescedReductionMedium(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("coalescedReductionMedium<%d>", TPB);
coalescedReductionMediumKernel<TPB>
<<<N, TPB, 0, stream>>>(dots, data, D, N, init, main_op, reduce_op, final_op, inplace);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void coalescedReductionMediumDispatcher(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
// Note: for now, this kernel is only used when D > 256. If this changes in the future, use
// smaller block sizes when relevant.
coalescedReductionMedium<256>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
}
template <int tpb, int bpr>
struct ReductionThickPolicy {
static constexpr int ThreadsPerBlock = tpb;
static constexpr int BlocksPerRow = bpr;
static constexpr int BlockStride = tpb * bpr;
};
template <typename Policy,
typename InType,
typename OutType,
typename IdxType,
typename MainLambda,
typename ReduceLambda>
RAFT_KERNEL __launch_bounds__(Policy::ThreadsPerBlock)
coalescedReductionThickKernel(OutType* buffer,
const InType* data,
IdxType D,
IdxType N,
OutType init,
MainLambda main_op,
ReduceLambda reduce_op)
{
typedef cub::BlockReduce<OutType, Policy::ThreadsPerBlock, cub::BLOCK_REDUCE_RAKING> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
OutType thread_data = init;
IdxType rowStart = blockIdx.x * D;
for (IdxType i = blockIdx.y * Policy::ThreadsPerBlock + threadIdx.x; i < D;
i += Policy::BlockStride) {
IdxType idx = rowStart + i;
thread_data = reduce_op(thread_data, main_op(data[idx], i));
}
OutType acc = BlockReduce(temp_storage).Reduce(thread_data, reduce_op);
if (threadIdx.x == 0) { buffer[Policy::BlocksPerRow * blockIdx.x + blockIdx.y] = acc; }
}
template <typename ThickPolicy,
typename ThinPolicy,
typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void coalescedReductionThick(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"coalescedReductionThick<%d,%d>", ThickPolicy::ThreadsPerBlock, ThickPolicy::BlocksPerRow);
dim3 threads(ThickPolicy::ThreadsPerBlock, 1, 1);
dim3 blocks(N, ThickPolicy::BlocksPerRow, 1);
rmm::device_uvector<OutType> buffer(N * ThickPolicy::BlocksPerRow, stream);
/* We apply a two-step reduction:
* 1. coalescedReductionThickKernel reduces the [N x D] input data to [N x BlocksPerRow]. It
* applies the main_op but not the final op.
* 2. coalescedReductionThinKernel reduces [N x BlocksPerRow] to [N x 1]. It doesn't apply any
* main_op but applies final_op. If in-place, the existing and new values are reduced.
*/
coalescedReductionThickKernel<ThickPolicy>
<<<blocks, threads, 0, stream>>>(buffer.data(), data, D, N, init, main_op, reduce_op);
RAFT_CUDA_TRY(cudaPeekAtLastError());
coalescedReductionThin<ThinPolicy>(dots,
buffer.data(),
static_cast<IdxType>(ThickPolicy::BlocksPerRow),
N,
init,
stream,
inplace,
raft::identity_op(),
reduce_op,
final_op);
}
template <typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void coalescedReductionThickDispatcher(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
// Note: multiple elements per thread to take advantage of the sequential reduction and loop
// unrolling
if (D < IdxType(32768)) {
coalescedReductionThick<ReductionThickPolicy<256, 32>, ReductionThinPolicy<32, 4>>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
} else {
coalescedReductionThick<ReductionThickPolicy<256, 64>, ReductionThinPolicy<32, 4>>(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
}
}
// Primitive to perform reductions along the coalesced dimension of the matrix, i.e. reduce along
// rows for row major or reduce along columns for column major layout. Can do an inplace reduction
// adding to original values of dots if requested.
template <typename InType,
typename OutType = InType,
typename IdxType = int,
typename MainLambda = raft::identity_op,
typename ReduceLambda = raft::add_op,
typename FinalLambda = raft::identity_op>
void coalescedReduction(OutType* dots,
const InType* data,
IdxType D,
IdxType N,
OutType init,
cudaStream_t stream,
bool inplace = false,
MainLambda main_op = raft::identity_op(),
ReduceLambda reduce_op = raft::add_op(),
FinalLambda final_op = raft::identity_op())
{
/* The primitive selects one of three implementations based on heuristics:
* - Thin: very efficient when D is small and/or N is large
* - Thick: used when N is very small and D very large
* - Medium: used when N is too small to fill the GPU with the thin kernel
*/
const IdxType numSMs = raft::getMultiProcessorCount();
if (D <= IdxType(256) || N >= IdxType(4) * numSMs) {
coalescedReductionThinDispatcher(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
} else if (N < numSMs && D >= IdxType(16384)) {
coalescedReductionThickDispatcher(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
} else {
coalescedReductionMediumDispatcher(
dots, data, D, N, init, stream, inplace, main_op, reduce_op, final_op);
}
}
} // namespace detail
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/gemm.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cublas_v2.h>
#include "cublas_wrappers.hpp"
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resources.hpp>
namespace raft {
namespace linalg {
namespace detail {
/**
* @brief the wrapper of cublas gemm function
* It computes the following equation: C = alpha .* opA(A) * opB(B) + beta .* C
*
* @tparam math_t the element type
* @tparam DevicePointerMode whether pointers alpha, beta point to device memory
* @param [in] handle raft handle
* @param [in] trans_a cublas transpose op for A
* @param [in] trans_b cublas transpose op for B
* @param [in] m number of rows of C
* @param [in] n number of columns of C
* @param [in] k number of rows of opB(B) / number of columns of opA(A)
* @param [in] alpha host or device scalar
* @param [in] A such a matrix that the shape of column-major opA(A) is [m, k]
* @param [in] lda leading dimension of A
* @param [in] B such a matrix that the shape of column-major opA(B) is [k, n]
* @param [in] ldb leading dimension of B
* @param [in] beta host or device scalar
* @param [inout] C column-major matrix of size [m, n]
* @param [in] ldc leading dimension of C
* @param [in] stream
*/
template <typename math_t, bool DevicePointerMode = false>
void gemm(raft::resources const& handle,
const bool trans_a,
const bool trans_b,
const int m,
const int n,
const int k,
const math_t* alpha,
const math_t* A,
const int lda,
const math_t* B,
const int ldb,
const math_t* beta,
math_t* C,
const int ldc,
cudaStream_t stream)
{
auto cublas_h = raft::resource::get_cublas_handle(handle);
cublas_device_pointer_mode<DevicePointerMode> pmode(cublas_h);
RAFT_CUBLAS_TRY(cublasgemm(cublas_h,
trans_a ? CUBLAS_OP_T : CUBLAS_OP_N,
trans_b ? CUBLAS_OP_T : CUBLAS_OP_N,
m,
n,
k,
alpha,
A,
lda,
B,
ldb,
beta,
C,
ldc,
stream));
}
/**
* @brief the wrapper of cublas gemm function
* It computes the following equation: D = alpha . opA(A) * opB(B) + beta . C
* @tparam math_t the type of input/output matrices
* @param handle raft handle
* @param a input matrix
* @param n_rows_a number of rows of A
* @param n_cols_a number of columns of A
* @param b input matrix
* @param c output matrix
* @param n_rows_c number of rows of C
* @param n_cols_c number of columns of C
* @param trans_a cublas transpose op for A
* @param trans_b cublas transpose op for B
* @param alpha scalar
* @param beta scalar
* @param stream cuda stream
*/
template <typename math_t>
void gemm(raft::resources const& handle,
const math_t* a,
int n_rows_a,
int n_cols_a,
const math_t* b,
math_t* c,
int n_rows_c,
int n_cols_c,
cublasOperation_t trans_a,
cublasOperation_t trans_b,
math_t alpha,
math_t beta,
cudaStream_t stream)
{
auto cublas_h = raft::resource::get_cublas_handle(handle);
int m = n_rows_c;
int n = n_cols_c;
int k = trans_a == CUBLAS_OP_T ? n_rows_a : n_cols_a;
int lda = trans_a == CUBLAS_OP_T ? k : m;
int ldb = trans_b == CUBLAS_OP_T ? n : k;
int ldc = m;
RAFT_CUBLAS_TRY(
cublasgemm(cublas_h, trans_a, trans_b, m, n, k, &alpha, a, lda, b, ldb, &beta, c, ldc, stream));
}
template <typename math_t>
void gemm(raft::resources const& handle,
const math_t* a,
int n_rows_a,
int n_cols_a,
const math_t* b,
math_t* c,
int n_rows_c,
int n_cols_c,
cublasOperation_t trans_a,
cublasOperation_t trans_b,
cudaStream_t stream)
{
math_t alpha = math_t(1);
math_t beta = math_t(0);
gemm(
handle, a, n_rows_a, n_cols_a, b, c, n_rows_c, n_cols_c, trans_a, trans_b, alpha, beta, stream);
}
template <typename T, bool DevicePointerMode = false>
void gemm(raft::resources const& handle,
T* z,
T* x,
T* y,
int _M,
int _N,
int _K,
bool isZColMajor,
bool isXColMajor,
bool isYColMajor,
cudaStream_t stream,
T* alpha,
T* beta)
{
auto cublas_h = raft::resource::get_cublas_handle(handle);
cublas_device_pointer_mode<DevicePointerMode> pmode(cublas_h);
cublasOperation_t trans_a, trans_b;
T *a, *b, *c;
int lda, ldb, ldc;
int M, N, K;
// This function performs c = a * b. Based on the required output layout,
// either a = x, b = y or a = y, b = x. In either case c = z.
if (isZColMajor == true) {
// Result c is required in column major layout. Thus we perform,
// z = x * y
// Using BLAS call c = a * b. Therefore a = x, b = y and c = z
a = x;
// If x is in row major layout, cublas needs to transpose x first,
// therefore trans_x needs to be CUBLAS_OP_T. If x is in column major
// layout, trans_b needs to be CUBLAS_OP_N.
trans_a = isXColMajor == true ? CUBLAS_OP_N : CUBLAS_OP_T;
// Set leading dimension appropriately
lda = isXColMajor == true ? _M : _K;
b = y;
// If y is in row major layout, cublas needs to transpose y first,
// therefore trans_x needs to be CUBLAS_OP_T. If x is in column major
// layout, trans_b needs to be CUBLAS_OP_N.
trans_b = isYColMajor == true ? CUBLAS_OP_N : CUBLAS_OP_T;
ldb = isYColMajor == true ? _K : _N;
c = z;
ldc = _M;
M = _M;
N = _N;
K = _K;
} else {
// Result c is required in row major layout Thus we pick
// a = y, b = x and c = a * b = y * x
// cublas produces output matrix only in column major layout. To get output
// matrix on row major layout, we need to produce transpose of output
// in column major layout. Therefore we perform,
// tr(z) = tr(y) * tr(x)
// we model this using cublas call for c = a * b
// therefore a = tr(y), b = tr(x) and c = tr(z)
a = y;
// If y is in row major layout, it can be/ interpreted as tr(y) on column
// major layout. Therefore we can pass trans_a as CUBLAS_OP_N. If y is in
// column major layout, cublas needs to transpose y first, therefore
// trans_a needs to be CUBLAS_OP_T
trans_a = isYColMajor == true ? CUBLAS_OP_T : CUBLAS_OP_N;
// Set leading dimension appropriately
lda = isYColMajor == true ? _K : _N;
b = x;
// If x is in row major layout, it can be interpreted as tr(x) on column
// major layout. Therefore we can pass trans_b as CUBLAS_OP_N. If x is in
// column major layout, cublas needs to trasponse x first, therefore
// trans_b needs to be CUBLAS_OP_T
trans_b = isXColMajor == true ? CUBLAS_OP_T : CUBLAS_OP_N;
// Set leading dimension appropriately
ldb = isXColMajor == true ? _M : _K;
c = z;
ldc = _N;
M = _N;
N = _M;
K = _K;
}
// Actual cuBLAS call
RAFT_CUBLAS_TRY(
cublasgemm(cublas_h, trans_a, trans_b, M, N, K, alpha, a, lda, b, ldb, beta, c, ldc, stream));
}
} // namespace detail
} // namespace linalg
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/coalesced_reduction-ext.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp>
// The explicit instantiation of raft::linalg::detail::coalescedReduction is not
// forced because there would be too many instances. Instead, we cover the most
// common instantiations with extern template instantiations below.
#define instantiate_raft_linalg_detail_coalescedReduction( \
InType, OutType, IdxType, MainLambda, ReduceLambda, FinalLambda) \
extern template void raft::linalg::detail::coalescedReduction(OutType* dots, \
const InType* data, \
IdxType D, \
IdxType N, \
OutType init, \
cudaStream_t stream, \
bool inplace, \
MainLambda main_op, \
ReduceLambda reduce_op, \
FinalLambda final_op)
instantiate_raft_linalg_detail_coalescedReduction(
double, double, int, raft::identity_op, raft::min_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
double, double, int, raft::sq_op, raft::add_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
double, double, int, raft::sq_op, raft::add_op, raft::sqrt_op);
instantiate_raft_linalg_detail_coalescedReduction(
double, double, int, raft::abs_op, raft::add_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
double, double, int, raft::abs_op, raft::max_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, size_t, raft::abs_op, raft::add_op, raft::sqrt_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, int, raft::abs_op, raft::add_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, int, raft::identity_op, raft::add_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, int, raft::identity_op, raft::min_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, int, raft::sq_op, raft::add_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, int, raft::sq_op, raft::add_op, raft::sqrt_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, long, raft::sq_op, raft::add_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, size_t, raft::identity_op, raft::add_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, size_t, raft::sq_op, raft::add_op, raft::identity_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, size_t, raft::abs_op, raft::max_op, raft::sqrt_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, size_t, raft::sq_op, raft::add_op, raft::sqrt_op);
instantiate_raft_linalg_detail_coalescedReduction(
float, float, unsigned int, raft::sq_op, raft::add_op, raft::identity_op);
#undef instantiate_raft_linalg_detail_coalescedReduction
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/linalg | rapidsai_public_repos/raft/cpp/include/raft/linalg/detail/lstsq.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <common/nvtx.hpp>
#include <raft/common/nvtx.hpp>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream_pool.hpp>
#include <raft/core/resource/cusolver_dn_handle.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/detail/cusolver_wrappers.hpp>
#include <raft/linalg/eig.cuh>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/gemv.cuh>
#include <raft/linalg/qr.cuh>
#include <raft/linalg/svd.cuh>
#include <raft/linalg/transpose.cuh>
#include <raft/matrix/math.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace linalg {
namespace detail {
namespace {
/** Operate a CUDA event if we're in the concurrent mode; no-op otherwise. */
struct DeviceEvent {
private:
cudaEvent_t e;
public:
DeviceEvent(bool concurrent)
{
if (concurrent)
RAFT_CUDA_TRY(cudaEventCreateWithFlags(&e, cudaEventDisableTiming));
else
e = nullptr;
}
~DeviceEvent()
{
if (e != nullptr) RAFT_CUDA_TRY_NO_THROW(cudaEventDestroy(e));
}
void record(cudaStream_t stream)
{
if (e != nullptr) RAFT_CUDA_TRY(cudaEventRecord(e, stream));
}
void wait_by(cudaStream_t stream)
{
if (e != nullptr) RAFT_CUDA_TRY(cudaStreamWaitEvent(stream, e, 0u));
}
DeviceEvent& operator=(const DeviceEvent& other) = delete;
};
/**
* @brief Tells if the viewed CUDA stream is implicitly synchronized with the given stream.
*
* This can happen e.g.
* if the two views point to the same stream
* or sometimes when one of them is the legacy default stream.
*/
bool are_implicitly_synchronized(rmm::cuda_stream_view a, rmm::cuda_stream_view b)
{
// any stream is "synchronized" with itself
if (a.value() == b.value()) return true;
// legacy + blocking streams
unsigned int flags = 0;
if (a.is_default()) {
RAFT_CUDA_TRY(cudaStreamGetFlags(b.value(), &flags));
if ((flags & cudaStreamNonBlocking) == 0) return true;
}
if (b.is_default()) {
RAFT_CUDA_TRY(cudaStreamGetFlags(a.value(), &flags));
if ((flags & cudaStreamNonBlocking) == 0) return true;
}
return false;
}
template <typename math_t>
struct DivideByNonZero {
constexpr static const math_t eps = math_t(1e-10);
__device__ math_t
operator()(const math_t a, const math_t b) const
{
return raft::abs<math_t>(b) >= eps ? a / b : a;
}
};
} // namespace
/** Solves the linear ordinary least squares problem `Aw = b`
* Via SVD decomposition of `A = U S Vt` using default cuSOLVER routine.
*
* @param A - input feature matrix; it's marked [in/out] in the used cuSOLVER routines,
* so it's not guaranteed to stay unmodified.
*/
template <typename math_t>
void lstsqSvdQR(raft::resources const& handle,
math_t* A,
const int n_rows,
const int n_cols,
const math_t* b,
math_t* w,
cudaStream_t stream)
{
const int minmn = min(n_rows, n_cols);
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
int cusolverWorkSetSize = 0;
// #TODO: Call from public API when ready
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDngesvd_bufferSize<math_t>(
cusolverH, n_rows, n_cols, &cusolverWorkSetSize));
rmm::device_uvector<math_t> workset(cusolverWorkSetSize // cuSolver
+ n_rows * minmn // U
+ n_cols * n_cols // V
+ minmn // S
+ minmn // U^T * b
+ 1 // devInfo
,
stream);
math_t* cusolverWorkSet = workset.data();
math_t* U = cusolverWorkSet + cusolverWorkSetSize;
math_t* Vt = U + n_rows * minmn;
math_t* S = Vt + n_cols * n_cols;
math_t* Ub = S + minmn;
int* devInfo = reinterpret_cast<int*>(Ub + minmn);
// #TODO: Call from public API when ready
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDngesvd<math_t>(cusolverH,
'S',
'S',
n_rows,
n_cols,
A,
n_rows,
S,
U,
n_rows,
Vt,
n_cols,
cusolverWorkSet,
cusolverWorkSetSize,
nullptr,
devInfo,
stream));
raft::linalg::gemv(handle, U, n_rows, minmn, b, Ub, true, stream);
raft::linalg::binaryOp(Ub, Ub, S, minmn, DivideByNonZero<math_t>(), stream);
raft::linalg::gemv(handle, Vt, minmn, n_cols, n_cols, Ub, w, true, stream);
}
/** Solves the linear ordinary least squares problem `Aw = b`
* Via SVD decomposition of `A = U S V^T` using Jacobi iterations (cuSOLVER).
*
* @param A - input feature matrix; it's marked [in/out] in the used cuSOLVER routines,
* so it's not guaranteed to stay unmodified.
*/
template <typename math_t>
void lstsqSvdJacobi(raft::resources const& handle,
math_t* A,
const int n_rows,
const int n_cols,
const math_t* b,
math_t* w,
cudaStream_t stream)
{
const int minmn = min(n_rows, n_cols);
gesvdjInfo_t gesvdj_params;
RAFT_CUSOLVER_TRY(cusolverDnCreateGesvdjInfo(&gesvdj_params));
int cusolverWorkSetSize = 0;
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
// #TODO: Call from public API when ready
RAFT_CUSOLVER_TRY(
raft::linalg::detail::cusolverDngesvdj_bufferSize<math_t>(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
1,
n_rows,
n_cols,
A,
n_rows,
nullptr,
nullptr,
n_rows,
nullptr,
n_cols,
&cusolverWorkSetSize,
gesvdj_params));
rmm::device_uvector<math_t> workset(cusolverWorkSetSize // cuSolver
+ n_rows * minmn // U
+ n_cols * minmn // V
+ minmn // S
+ minmn // U^T * b
+ 1 // devInfo
,
stream);
math_t* cusolverWorkSet = workset.data();
math_t* U = cusolverWorkSet + cusolverWorkSetSize;
math_t* V = U + n_rows * minmn;
math_t* S = V + n_cols * minmn;
math_t* Ub = S + minmn;
int* devInfo = reinterpret_cast<int*>(Ub + minmn);
// #TODO: Call from public API when ready
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDngesvdj<math_t>(cusolverH,
CUSOLVER_EIG_MODE_VECTOR,
1,
n_rows,
n_cols,
A,
n_rows,
S,
U,
n_rows,
V,
n_cols,
cusolverWorkSet,
cusolverWorkSetSize,
devInfo,
gesvdj_params,
stream));
raft::linalg::gemv(handle, U, n_rows, minmn, b, Ub, true, stream);
raft::linalg::binaryOp(Ub, Ub, S, minmn, DivideByNonZero<math_t>(), stream);
raft::linalg::gemv(handle, V, n_cols, minmn, Ub, w, false, stream);
}
/** Solves the linear ordinary least squares problem `Aw = b`
* via eigenvalue decomposition of `A^T * A` (covariance matrix for dataset A).
* (`w = (A^T A)^-1 A^T b`)
*/
template <typename math_t>
void lstsqEig(raft::resources const& handle,
const math_t* A,
const int n_rows,
const int n_cols,
const math_t* b,
math_t* w,
cudaStream_t stream)
{
rmm::cuda_stream_view mainStream = rmm::cuda_stream_view(stream);
rmm::cuda_stream_view multAbStream = resource::get_next_usable_stream(handle);
bool concurrent;
// Check if the two streams can run concurrently. This is needed because a legacy default stream
// would synchronize with other blocking streams. To avoid synchronization in such case, we try to
// use an additional stream from the pool.
if (!are_implicitly_synchronized(mainStream, multAbStream)) {
concurrent = true;
} else if (resource::get_stream_pool_size(handle) > 1) {
mainStream = resource::get_next_usable_stream(handle);
concurrent = true;
} else {
multAbStream = mainStream;
concurrent = false;
}
rmm::device_uvector<math_t> workset(n_cols * n_cols * 3 + n_cols * 2, mainStream);
// the event is created only if the given raft handle is capable of running
// at least two CUDA streams without implicit synchronization.
DeviceEvent worksetDone(concurrent);
worksetDone.record(mainStream);
math_t* Q = workset.data();
math_t* QS = Q + n_cols * n_cols;
math_t* covA = QS + n_cols * n_cols;
math_t* S = covA + n_cols * n_cols;
math_t* Ab = S + n_cols;
// covA <- A* A
math_t alpha = math_t(1);
math_t beta = math_t(0);
raft::linalg::gemm(handle,
A,
n_rows,
n_cols,
A,
covA,
n_cols,
n_cols,
CUBLAS_OP_T,
CUBLAS_OP_N,
alpha,
beta,
mainStream);
// Ab <- A* b
worksetDone.wait_by(multAbStream);
raft::linalg::gemv(handle, A, n_rows, n_cols, b, Ab, true, multAbStream);
DeviceEvent multAbDone(concurrent);
multAbDone.record(multAbStream);
// Q S Q* <- covA
raft::common::nvtx::push_range("raft::linalg::eigDC");
raft::linalg::eigDC(handle, covA, n_cols, n_cols, Q, S, mainStream);
raft::common::nvtx::pop_range();
// QS <- Q invS
raft::linalg::matrixVectorOp(
QS, Q, S, n_cols, n_cols, false, true, DivideByNonZero<math_t>(), mainStream);
// covA <- QS Q* == Q invS Q* == inv(A* A)
raft::linalg::gemm(handle,
QS,
n_cols,
n_cols,
Q,
covA,
n_cols,
n_cols,
CUBLAS_OP_N,
CUBLAS_OP_T,
alpha,
beta,
mainStream);
multAbDone.wait_by(mainStream);
// w <- covA Ab == Q invS Q* A b == inv(A* A) A b
raft::linalg::gemv(handle, covA, n_cols, n_cols, Ab, w, false, mainStream);
// This event is created only if we use two worker streams, and `stream` is not the legacy stream,
// and `mainStream` is not a non-blocking stream. In fact, with the current logic these conditions
// are impossible together, but it still makes sense to put this construct here to emphasize that
// `stream` must wait till the work here is done (for future refactorings).
DeviceEvent mainDone(!are_implicitly_synchronized(mainStream, stream));
mainDone.record(mainStream);
mainDone.wait_by(stream);
}
/** Solves the linear ordinary least squares problem `Aw = b`
* via QR decomposition of `A = QR`.
* (triangular system of equations `Rw = Q^T b`)
*
* @param A[in/out] - input feature matrix.
* Warning: the content of this matrix is modified by the cuSOLVER routines.
* @param b[in/out] - input target vector.
* Warning: the content of this vector is modified by the cuSOLVER routines.
*/
template <typename math_t>
void lstsqQR(raft::resources const& handle,
math_t* A,
const int n_rows,
const int n_cols,
math_t* b,
math_t* w,
cudaStream_t stream)
{
cublasHandle_t cublasH = resource::get_cublas_handle(handle);
cusolverDnHandle_t cusolverH = resource::get_cusolver_dn_handle(handle);
int m = n_rows;
int n = n_cols;
int info = 0;
rmm::device_uvector<math_t> d_tau(n, stream);
rmm::device_scalar<int> d_info(stream);
const cublasSideMode_t side = CUBLAS_SIDE_LEFT;
const cublasOperation_t trans = CUBLAS_OP_T;
int lwork_geqrf = 0;
int lwork_ormqr = 0;
int lwork = 0;
const int lda = m;
const int ldb = m;
// #TODO: Call from public API when ready
RAFT_CUSOLVER_TRY(
raft::linalg::detail::cusolverDngeqrf_bufferSize(cusolverH, m, n, A, lda, &lwork_geqrf));
// #TODO: Call from public API when ready
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnormqr_bufferSize(cusolverH,
side,
trans,
m,
1,
n,
A,
lda,
d_tau.data(),
b, // C,
lda, // ldc,
&lwork_ormqr));
lwork = (lwork_geqrf > lwork_ormqr) ? lwork_geqrf : lwork_ormqr;
rmm::device_uvector<math_t> d_work(lwork, stream);
// #TODO: Call from public API when ready
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDngeqrf(
cusolverH, m, n, A, lda, d_tau.data(), d_work.data(), lwork, d_info.data(), stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(&info, d_info.data(), sizeof(int), cudaMemcpyDeviceToHost, stream));
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
ASSERT(0 == info, "lstsq.h: QR wasn't successful");
// #TODO: Call from public API when ready
RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnormqr(cusolverH,
side,
trans,
m,
1,
n,
A,
lda,
d_tau.data(),
b,
ldb,
d_work.data(),
lwork,
d_info.data(),
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(&info, d_info.data(), sizeof(int), cudaMemcpyDeviceToHost, stream));
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
ASSERT(0 == info, "lstsq.h: QR wasn't successful");
const math_t one = 1;
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublastrsm(cublasH,
side,
CUBLAS_FILL_MODE_UPPER,
CUBLAS_OP_N,
CUBLAS_DIAG_NON_UNIT,
n,
1,
&one,
A,
lda,
b,
ldb,
stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(w, b, sizeof(math_t) * n, cudaMemcpyDeviceToDevice, stream));
}
}; // namespace detail
}; // namespace linalg
}; // namespace raft
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.