repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/mdspan.pxd | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libc.stdint cimport int8_t, int64_t, uint8_t, uint32_t
from libcpp.string cimport string
from pylibraft.common.cpp.mdspan cimport (
device_matrix_view,
host_matrix_view,
row_major,
)
from pylibraft.common.handle cimport device_resources
from pylibraft.common.optional cimport make_optional, optional
# Cython doesn't like `const float` inside template parameters
# hack around this with using typedefs
ctypedef const float const_float
ctypedef const int8_t const_int8_t
ctypedef const uint8_t const_uint8_t
cdef device_matrix_view[float, int64_t, row_major] get_dmv_float(
array, check_shape) except *
cdef device_matrix_view[uint8_t, int64_t, row_major] get_dmv_uint8(
array, check_shape) except *
cdef device_matrix_view[int8_t, int64_t, row_major] get_dmv_int8(
array, check_shape) except *
cdef device_matrix_view[int64_t, int64_t, row_major] get_dmv_int64(
array, check_shape) except *
cdef optional[device_matrix_view[int64_t, int64_t, row_major]] make_optional_view_int64( # noqa: E501
device_matrix_view[int64_t, int64_t, row_major]& dmv) except *
cdef device_matrix_view[uint32_t, int64_t, row_major] get_dmv_uint32(
array, check_shape) except *
cdef device_matrix_view[const_float, int64_t, row_major] get_const_dmv_float(
array, check_shape) except *
cdef device_matrix_view[const_uint8_t, int64_t, row_major] get_const_dmv_uint8(
array, check_shape) except *
cdef device_matrix_view[const_int8_t, int64_t, row_major] get_const_dmv_int8(
array, check_shape) except *
cdef host_matrix_view[float, int64_t, row_major] get_hmv_float(
array, check_shape) except *
cdef host_matrix_view[uint8_t, int64_t, row_major] get_hmv_uint8(
array, check_shape) except *
cdef host_matrix_view[int8_t, int64_t, row_major] get_hmv_int8(
array, check_shape) except *
cdef host_matrix_view[int64_t, int64_t, row_major] get_hmv_int64(
array, check_shape) except *
cdef host_matrix_view[uint32_t, int64_t, row_major] get_hmv_uint32(
array, check_shape) except *
cdef host_matrix_view[const_float, int64_t, row_major] get_const_hmv_float(
array, check_shape) except *
cdef host_matrix_view[const_uint8_t, int64_t, row_major] get_const_hmv_uint8(
array, check_shape) except *
cdef host_matrix_view[const_int8_t, int64_t, row_major] get_const_hmv_int8(
array, check_shape) except *
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/common | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/cpp/optional.pxd | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# We're still using cython v0.29.x - which doesn't have std::optional
# support. Include the minimal definition here as suggested by
# https://github.com/cython/cython/issues/3293#issuecomment-1223058101
cdef extern from "<optional>" namespace "std" nogil:
cdef cppclass optional[T]:
optional()
optional& operator=[U](U&)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/common | rapidsai_public_repos/raft/python/pylibraft/pylibraft/common/cpp/mdspan.pxd | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libc.stdint cimport int8_t, int64_t, uint8_t, uint64_t
from libcpp.string cimport string
from pylibraft.common.handle cimport device_resources
cdef extern from "raft/thirdparty/mdspan/include/experimental/__p0009_bits/layout_stride.hpp" namespace "std::experimental": # noqa: E501
cdef cppclass layout_right:
pass
cdef cppclass layout_left:
pass
cdef extern from "raft/core/mdspan_types.hpp" \
namespace "raft":
ctypedef layout_right row_major
ctypedef layout_left col_major
cdef cppclass matrix_extent[IndexType]:
pass
cdef extern from "raft/core/device_mdspan.hpp" namespace "raft" nogil:
cdef cppclass device_vector_view[ElementType, IndexType]:
pass
cdef cppclass device_scalar_view[ElementType, IndexType]:
pass
cdef cppclass device_matrix_view[ElementType, IndexType, LayoutType]:
pass
cdef device_matrix_view[ElementType, IndexType, LayoutPolicy] \
make_device_matrix_view[ElementType, IndexType, LayoutPolicy](
ElementType* ptr, IndexType n_rows, IndexType n_cols) except +
cdef device_vector_view[ElementType, IndexType] \
make_device_vector_view[ElementType, IndexType](
ElementType* ptr, IndexType n) except +
cdef device_scalar_view[ElementType, IndexType] \
make_device_vector_view[ElementType, IndexType](
ElementType* ptr) except +
cdef extern from "raft/core/host_mdspan.hpp" \
namespace "raft" nogil:
cdef cppclass host_matrix_view[ElementType, IndexType, LayoutPolicy]:
pass
cdef cppclass host_vector_view[ElementType, IndexType]:
pass
cdef cppclass host_scalar_view[ElementType, IndexType]:
pass
cdef cppclass host_mdspan[ElementType, Extents, LayoutPolicy]:
pass
cdef host_matrix_view[ElementType, IndexType, LayoutPolicy] \
make_host_matrix_view[ElementType, IndexType, LayoutPolicy](
ElementType* ptr, IndexType n_rows, IndexType n_cols) except +
cdef host_vector_view[ElementType, IndexType] \
make_host_vector_view[ElementType, IndexType](
ElementType* ptr, IndexType n) except +
cdef host_scalar_view[ElementType, IndexType] \
make_host_scalar_view[ElementType, IndexType](
ElementType *ptr) except +
cdef extern from "<sstream>" namespace "std" nogil:
cdef cppclass ostringstream:
ostringstream() except +
string str() except +
cdef extern from "<ostream>" namespace "std" nogil:
cdef cppclass ostream:
pass
cdef extern from "raft/core/mdspan.hpp" namespace "raft" nogil:
cdef cppclass dextents[IndentType, Rank]:
pass
cdef extern from "raft/core/serialize.hpp" namespace "raft" nogil:
cdef void serialize_mdspan[ElementType, Extents, LayoutPolicy](
const device_resources& handle, ostream& os,
const host_mdspan[ElementType, Extents, LayoutPolicy]& obj)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/cluster/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Set the list of Cython files to build
set(cython_sources kmeans.pyx)
set(linked_libraries raft::compiled)
# Build all of the Cython targets
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS raft MODULE_PREFIX cluster_
)
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/cluster/__init__.pxd | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/cluster/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .kmeans import (
KMeansParams,
cluster_cost,
compute_new_centroids,
fit,
init_plus_plus,
)
__all__ = [
"KMeansParams",
"cluster_cost",
"compute_new_centroids",
"fit",
"init_plus_plus",
]
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft | rapidsai_public_repos/raft/python/pylibraft/pylibraft/cluster/kmeans.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport uintptr_t
from libcpp cimport nullptr
from collections import namedtuple
from enum import IntEnum
from pylibraft.common import Handle, cai_wrapper, device_ndarray
from pylibraft.common.handle import auto_sync_handle
from pylibraft.common.handle cimport device_resources
from pylibraft.random.cpp.rng_state cimport RngState
from pylibraft.common.input_validation import *
from pylibraft.distance import DISTANCE_TYPES
from pylibraft.cluster.cpp cimport kmeans as cpp_kmeans, kmeans_types
from pylibraft.cluster.cpp.kmeans cimport (
cluster_cost as cpp_cluster_cost,
init_plus_plus as cpp_init_plus_plus,
update_centroids,
)
from pylibraft.common.cpp.mdspan cimport *
from pylibraft.common.cpp.optional cimport optional
from pylibraft.common.handle cimport device_resources
from pylibraft.common import auto_convert_output
@auto_sync_handle
@auto_convert_output
def compute_new_centroids(X,
centroids,
labels,
new_centroids,
sample_weights=None,
weight_per_cluster=None,
handle=None):
"""
Compute new centroids given an input matrix and existing centroids
Parameters
----------
X : Input CUDA array interface compliant matrix shape (m, k)
centroids : Input CUDA array interface compliant matrix shape
(n_clusters, k)
labels : Input CUDA array interface compliant matrix shape
(m, 1)
new_centroids : Writable CUDA array interface compliant matrix shape
(n_clusters, k)
sample_weights : Optional input CUDA array interface compliant matrix shape
(n_clusters, 1) default: None
weight_per_cluster : Optional writable CUDA array interface compliant
matrix shape (n_clusters, 1) default: None
batch_samples : Optional integer specifying the batch size for X to compute
distances in batches. default: m
batch_centroids : Optional integer specifying the batch size for centroids
to compute distances in batches. default: n_clusters
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.common import Handle
>>> from pylibraft.cluster.kmeans import compute_new_centroids
>>> # A single RAFT handle can optionally be reused across
>>> # pylibraft functions.
>>> handle = Handle()
>>> n_samples = 5000
>>> n_features = 50
>>> n_clusters = 3
>>> X = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> centroids = cp.random.random_sample((n_clusters, n_features),
... dtype=cp.float32)
...
>>> labels = cp.random.randint(0, high=n_clusters, size=n_samples,
... dtype=cp.int32)
>>> new_centroids = cp.empty((n_clusters, n_features),
... dtype=cp.float32)
>>> compute_new_centroids(
... X, centroids, labels, new_centroids, handle=handle
... )
>>> # pylibraft functions are often asynchronous so the
>>> # handle needs to be explicitly synchronized
>>> handle.sync()
"""
x_cai = X.__cuda_array_interface__
centroids_cai = centroids.__cuda_array_interface__
new_centroids_cai = new_centroids.__cuda_array_interface__
labels_cai = labels.__cuda_array_interface__
m = x_cai["shape"][0]
x_k = x_cai["shape"][1]
n_clusters = centroids_cai["shape"][0]
centroids_k = centroids_cai["shape"][1]
new_centroids_k = centroids_cai["shape"][1]
x_dt = np.dtype(x_cai["typestr"])
centroids_dt = np.dtype(centroids_cai["typestr"])
new_centroids_dt = np.dtype(new_centroids_cai["typestr"])
labels_dt = np.dtype(labels_cai["typestr"])
if not do_cols_match(X, centroids):
raise ValueError("X and centroids must have same number of columns.")
if not do_rows_match(X, labels):
raise ValueError("X and labels must have same number of rows")
x_ptr = <uintptr_t>x_cai["data"][0]
centroids_ptr = <uintptr_t>centroids_cai["data"][0]
new_centroids_ptr = <uintptr_t>new_centroids_cai["data"][0]
labels_ptr = <uintptr_t>labels_cai["data"][0]
if sample_weights is not None:
sample_weights_cai = sample_weights.__cuda_array_interface__
sample_weights_ptr = <uintptr_t>sample_weights_cai["data"][0]
sample_weights_dt = np.dtype(sample_weights_cai["typestr"])
else:
sample_weights_ptr = <uintptr_t>nullptr
if weight_per_cluster is not None:
weight_per_cluster_cai = weight_per_cluster.__cuda_array_interface__
weight_per_cluster_ptr = <uintptr_t>weight_per_cluster_cai["data"][0]
weight_per_cluster_dt = np.dtype(weight_per_cluster_cai["typestr"])
else:
weight_per_cluster_ptr = <uintptr_t>nullptr
handle = handle if handle is not None else Handle()
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
x_c_contiguous = is_c_contiguous(x_cai)
centroids_c_contiguous = is_c_contiguous(centroids_cai)
new_centroids_c_contiguous = is_c_contiguous(new_centroids_cai)
if not x_c_contiguous or not centroids_c_contiguous \
or not new_centroids_c_contiguous:
raise ValueError("Inputs must all be c contiguous")
if not do_dtypes_match(X, centroids, new_centroids):
raise ValueError("Inputs must all have the same dtypes "
"(float32 or float64)")
if x_dt == np.float32:
update_centroids(deref(h),
<float*> x_ptr,
<int> m,
<int> x_k,
<int> n_clusters,
<float*> sample_weights_ptr,
<float*> centroids_ptr,
<int*> labels_ptr,
<float*> new_centroids_ptr,
<float*> weight_per_cluster_ptr)
elif x_dt == np.float64:
update_centroids(deref(h),
<double*> x_ptr,
<int> m,
<int> x_k,
<int> n_clusters,
<double*> sample_weights_ptr,
<double*> centroids_ptr,
<int*> labels_ptr,
<double*> new_centroids_ptr,
<double*> weight_per_cluster_ptr)
else:
raise ValueError("dtype %s not supported" % x_dt)
@auto_sync_handle
@auto_convert_output
def init_plus_plus(X, n_clusters=None, seed=None, handle=None, centroids=None):
"""
Compute initial centroids using the "kmeans++" algorithm.
Parameters
----------
X : Input CUDA array interface compliant matrix shape (m, k)
n_clusters : Number of clusters to select
seed : Controls the random sampling of centroids
centroids : Optional writable CUDA array interface compliant matrix shape
(n_clusters, k). Use instead of passing `n_clusters`.
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.cluster.kmeans import init_plus_plus
>>> n_samples = 5000
>>> n_features = 50
>>> n_clusters = 3
>>> X = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> centroids = init_plus_plus(X, n_clusters)
"""
if (n_clusters is not None and
centroids is not None and n_clusters != centroids.shape[0]):
msg = ("Parameters 'n_clusters' and 'centroids' "
"are exclusive. Only pass one at a time.")
raise RuntimeError(msg)
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
X_cai = cai_wrapper(X)
X_cai.validate_shape_dtype(expected_dims=2)
dtype = X_cai.dtype
if centroids is not None:
n_clusters = centroids.shape[0]
else:
centroids_shape = (n_clusters, X_cai.shape[1])
centroids = device_ndarray.empty(centroids_shape, dtype=dtype)
centroids_cai = cai_wrapper(centroids)
# Can't set attributes of KMeansParameters after creating it, so taking
# a detour via a dict to collect the possible constructor arguments
params_ = dict(n_clusters=n_clusters)
if seed is not None:
params_["seed"] = seed
params = KMeansParams(**params_)
if dtype == np.float64:
cpp_init_plus_plus(
deref(h), params.c_obj,
make_device_matrix_view[double, int, row_major](
<double *><uintptr_t>X_cai.data,
<int>X_cai.shape[0], <int>X_cai.shape[1]),
make_device_matrix_view[double, int, row_major](
<double *><uintptr_t>centroids_cai.data,
<int>centroids_cai.shape[0], <int>centroids_cai.shape[1]),
)
elif dtype == np.float32:
cpp_init_plus_plus(
deref(h), params.c_obj,
make_device_matrix_view[float, int, row_major](
<float *><uintptr_t>X_cai.data,
<int>X_cai.shape[0], <int>X_cai.shape[1]),
make_device_matrix_view[float, int, row_major](
<float *><uintptr_t>centroids_cai.data,
<int>centroids_cai.shape[0], <int>centroids_cai.shape[1]),
)
else:
raise ValueError(f"Unhandled dtype ({dtype}) for X.")
return centroids
@auto_sync_handle
@auto_convert_output
def cluster_cost(X, centroids, handle=None):
"""
Compute cluster cost given an input matrix and existing centroids
Parameters
----------
X : Input CUDA array interface compliant matrix shape (m, k)
centroids : Input CUDA array interface compliant matrix shape
(n_clusters, k)
{handle_docstring}
Examples
--------
>>> import cupy as cp
>>> from pylibraft.cluster.kmeans import cluster_cost
>>> n_samples = 5000
>>> n_features = 50
>>> n_clusters = 3
>>> X = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> centroids = cp.random.random_sample((n_clusters, n_features),
... dtype=cp.float32)
>>> inertia = cluster_cost(X, centroids)
"""
x_cai = X.__cuda_array_interface__
centroids_cai = centroids.__cuda_array_interface__
m = x_cai["shape"][0]
x_k = x_cai["shape"][1]
n_clusters = centroids_cai["shape"][0]
centroids_k = centroids_cai["shape"][1]
x_dt = np.dtype(x_cai["typestr"])
centroids_dt = np.dtype(centroids_cai["typestr"])
if not do_cols_match(X, centroids):
raise ValueError("X and centroids must have same number of columns.")
x_ptr = <uintptr_t>x_cai["data"][0]
centroids_ptr = <uintptr_t>centroids_cai["data"][0]
handle = handle if handle is not None else Handle()
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
x_c_contiguous = is_c_contiguous(x_cai)
centroids_c_contiguous = is_c_contiguous(centroids_cai)
if not x_c_contiguous or not centroids_c_contiguous:
raise ValueError("Inputs must all be c contiguous")
if not do_dtypes_match(X, centroids):
raise ValueError("Inputs must all have the same dtypes "
"(float32 or float64)")
cdef float f_cost = 0
cdef double d_cost = 0
if x_dt == np.float32:
cpp_cluster_cost(deref(h),
<float*> x_ptr,
<int> m,
<int> x_k,
<int> n_clusters,
<float*> centroids_ptr,
<float*> &f_cost)
return f_cost
elif x_dt == np.float64:
cpp_cluster_cost(deref(h),
<double*> x_ptr,
<int> m,
<int> x_k,
<int> n_clusters,
<double*> centroids_ptr,
<double*> &d_cost)
return d_cost
else:
raise ValueError("dtype %s not supported" % x_dt)
class InitMethod(IntEnum):
""" Method for initializing kmeans """
KMeansPlusPlus = <int> kmeans_types.InitMethod.KMeansPlusPlus
Random = <int> kmeans_types.InitMethod.Random
Array = <int> kmeans_types.InitMethod.Array
cdef class KMeansParams:
""" Specifies hyper-parameters for the kmeans algorithm.
Parameters
----------
n_clusters : int, optional
The number of clusters to form as well as the number of centroids
to generate
max_iter : int, optional
Maximum number of iterations of the k-means algorithm for a single run
tol : float, optional
Relative tolerance with regards to inertia to declare convergence
verbosity : int, optional
seed: int, optional
Seed to the random number generator.
metric : str, optional
Metric names to use for distance computation, see
:func:`pylibraft.distance.pairwise_distance` for valid values.
init : InitMethod, optional
n_init : int, optional
Number of instance k-means algorithm will be run with different seeds.
oversampling_factor : float, optional
Oversampling factor for use in the k-means algorithm
"""
cdef kmeans_types.KMeansParams c_obj
def __init__(self,
n_clusters: Optional[int] = None,
max_iter: Optional[int] = None,
tol: Optional[float] = None,
verbosity: Optional[int] = None,
seed: Optional[int] = None,
metric: Optional[str] = None,
init: Optional[InitMethod] = None,
n_init: Optional[int] = None,
oversampling_factor: Optional[float] = None,
batch_samples: Optional[int] = None,
batch_centroids: Optional[int] = None,
inertia_check: Optional[bool] = None):
if n_clusters is not None:
self.c_obj.n_clusters = n_clusters
if max_iter is not None:
self.c_obj.max_iter = max_iter
if tol is not None:
self.c_obj.tol = tol
if verbosity is not None:
self.c_obj.verbosity = verbosity
if seed is not None:
self.c_obj.rng_state.seed = seed
if metric is not None:
distance = DISTANCE_TYPES.get(metric)
if distance is None:
valid_metrics = list(DISTANCE_TYPES.keys())
raise ValueError(f"Unknown metric '{metric}'. Valid values "
f"are: {valid_metrics}")
self.c_obj.metric = distance
if init is not None:
self.c_obj.init = init
if n_init is not None:
self.c_obj.n_init = n_init
if oversampling_factor is not None:
self.c_obj.oversampling_factor = oversampling_factor
if batch_samples is not None:
self.c_obj.batch_samples = batch_samples
if batch_centroids is not None:
self.c_obj.batch_centroids = batch_centroids
if inertia_check is not None:
self.c_obj.inertia_check = inertia_check
@property
def n_clusters(self):
return self.c_obj.n_clusters
@property
def max_iter(self):
return self.c_obj.max_iter
@property
def tol(self):
return self.c_obj.tol
@property
def verbosity(self):
return self.c_obj.verbosity
@property
def seed(self):
return self.c_obj.rng_state.seed
@property
def init(self):
return InitMethod(self.c_obj.init)
@property
def oversampling_factor(self):
return self.c_obj.oversampling_factor
@property
def batch_samples(self):
return self.c_obj.batch_samples
@property
def batch_centroids(self):
return self.c_obj.batch_centroids
@property
def inertia_check(self):
return self.c_obj.inertia_check
FitOutput = namedtuple("FitOutput", "centroids inertia n_iter")
@auto_sync_handle
@auto_convert_output
def fit(
KMeansParams params, X, centroids=None, sample_weights=None, handle=None
):
"""
Find clusters with the k-means algorithm
Parameters
----------
params : KMeansParams
Parameters to use to fit KMeans model
X : Input CUDA array interface compliant matrix shape (m, k)
centroids : Optional writable CUDA array interface compliant matrix
shape (n_clusters, k)
sample_weights : Optional input CUDA array interface compliant matrix shape
(n_clusters, 1) default: None
{handle_docstring}
Returns
-------
centroids : raft.device_ndarray
The computed centroids for each cluster
inertia : float
Sum of squared distances of samples to their closest cluster center
n_iter : int
The number of iterations used to fit the model
Examples
--------
>>> import cupy as cp
>>> from pylibraft.cluster.kmeans import fit, KMeansParams
>>> n_samples = 5000
>>> n_features = 50
>>> n_clusters = 3
>>> X = cp.random.random_sample((n_samples, n_features),
... dtype=cp.float32)
>>> params = KMeansParams(n_clusters=n_clusters)
>>> centroids, inertia, n_iter = fit(params, X)
"""
cdef device_resources *h = <device_resources*><size_t>handle.getHandle()
cdef float f_inertia = 0.0
cdef double d_inertia = 0.0
cdef int n_iter = 0
cdef optional[device_vector_view[const double, int]] d_sample_weights
cdef optional[device_vector_view[const float, int]] f_sample_weights
X_cai = cai_wrapper(X)
dtype = X_cai.dtype
if centroids is None:
centroids_shape = (params.n_clusters, X_cai.shape[1])
centroids = device_ndarray.empty(centroids_shape, dtype=dtype)
centroids_cai = cai_wrapper(centroids)
# validate inputs have are all c-contiguous, and have a consistent dtype
# and expected shape
X_cai.validate_shape_dtype(2)
centroids_cai.validate_shape_dtype(2, dtype)
if sample_weights is not None:
sample_weights_cai = cai_wrapper(sample_weights)
sample_weights_cai.validate_shape_dtype(1, dtype)
if dtype == np.float64:
if sample_weights is not None:
d_sample_weights = make_device_vector_view(
<const double *><uintptr_t>sample_weights_cai.data,
<int>sample_weights_cai.shape[0])
cpp_kmeans.fit(
deref(h),
params.c_obj,
make_device_matrix_view[double, int, row_major](
<double *><uintptr_t>X_cai.data,
<int>X_cai.shape[0], <int>X_cai.shape[1]),
d_sample_weights,
make_device_matrix_view[double, int, row_major](
<double *><uintptr_t>centroids_cai.data,
<int>centroids_cai.shape[0], <int>centroids_cai.shape[1]),
make_host_scalar_view[double, int](&d_inertia),
make_host_scalar_view[int, int](&n_iter))
return FitOutput(centroids, d_inertia, n_iter)
elif dtype == np.float32:
if sample_weights is not None:
f_sample_weights = make_device_vector_view(
<const float *><uintptr_t>sample_weights_cai.data,
<int>sample_weights_cai.shape[0])
cpp_kmeans.fit(
deref(h),
params.c_obj,
make_device_matrix_view[float, int, row_major](
<float *><uintptr_t>X_cai.data,
<int>X_cai.shape[0], <int>X_cai.shape[1]),
f_sample_weights,
make_device_matrix_view[float, int, row_major](
<float *><uintptr_t>centroids_cai.data,
<int>centroids_cai.shape[0], <int>centroids_cai.shape[1]),
make_host_scalar_view[float, int](&f_inertia),
make_host_scalar_view[int, int](&n_iter))
return FitOutput(centroids, f_inertia, n_iter)
else:
raise ValueError(f"unhandled dtype {dtype}")
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/cluster | rapidsai_public_repos/raft/python/pylibraft/pylibraft/cluster/cpp/kmeans_types.pxd | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcpp cimport bool
from pylibraft.distance.distance_type cimport DistanceType
from pylibraft.random.cpp.rng_state cimport RngState
cdef extern from "raft/cluster/kmeans_types.hpp" \
namespace "raft::cluster::kmeans":
ctypedef enum InitMethod 'raft::cluster::KMeansParams::InitMethod':
KMeansPlusPlus 'raft::cluster::kmeans::KMeansParams::InitMethod::KMeansPlusPlus' # noqa
Random 'raft::cluster::kmeans::KMeansParams::InitMethod::Random'
Array 'raft::cluster::kmeans::KMeansParams::InitMethod::Array'
cdef cppclass KMeansParams:
KMeansParams() except +
int n_clusters
InitMethod init
int max_iter
double tol
int verbosity
RngState rng_state
DistanceType metric
int n_init
double oversampling_factor
int batch_samples
int batch_centroids
bool inertia_check
| 0 |
rapidsai_public_repos/raft/python/pylibraft/pylibraft/cluster | rapidsai_public_repos/raft/python/pylibraft/pylibraft/cluster/cpp/kmeans.pxd | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
from cython.operator cimport dereference as deref
from libc.stdint cimport uintptr_t
from libcpp cimport bool, nullptr
from pylibraft.cluster.cpp.kmeans_types cimport KMeansParams
from pylibraft.common.cpp.mdspan cimport *
from pylibraft.common.cpp.optional cimport optional
from pylibraft.common.handle cimport device_resources
cdef extern from "raft_runtime/cluster/kmeans.hpp" \
namespace "raft::runtime::cluster::kmeans" nogil:
cdef void update_centroids(
const device_resources& handle,
const double *X,
int n_samples,
int n_features,
int n_clusters,
const double *sample_weights,
const double *centroids,
const int* labels,
double *new_centroids,
double *weight_per_cluster) except +
cdef void update_centroids(
const device_resources& handle,
const float *X,
int n_samples,
int n_features,
int n_clusters,
const float *sample_weights,
const float *centroids,
const int* labels,
float *new_centroids,
float *weight_per_cluster) except +
cdef void cluster_cost(
const device_resources& handle,
const float* X,
int n_samples,
int n_features,
int n_clusters,
const float * centroids,
float * cost) except +
cdef void cluster_cost(
const device_resources& handle,
const double* X,
int n_samples,
int n_features,
int n_clusters,
const double * centroids,
double * cost) except +
cdef void init_plus_plus(
const device_resources & handle,
const KMeansParams& params,
device_matrix_view[float, int, row_major] X,
device_matrix_view[float, int, row_major] centroids) except +
cdef void init_plus_plus(
const device_resources & handle,
const KMeansParams& params,
device_matrix_view[double, int, row_major] X,
device_matrix_view[double, int, row_major] centroids) except +
cdef void fit(
const device_resources & handle,
const KMeansParams& params,
device_matrix_view[float, int, row_major] X,
optional[device_vector_view[float, int]] sample_weight,
device_matrix_view[float, int, row_major] inertia,
host_scalar_view[float, int] inertia,
host_scalar_view[int, int] n_iter) except +
cdef void fit(
const device_resources & handle,
const KMeansParams& params,
device_matrix_view[double, int, row_major] X,
optional[device_vector_view[double, int]] sample_weight,
device_matrix_view[double, int, row_major] inertia,
host_scalar_view[double, int] inertia,
host_scalar_view[int, int] n_iter) except +
| 0 |
rapidsai_public_repos/raft/conda | rapidsai_public_repos/raft/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- benchmark>=1.8.2
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-nvtx=11.8
- cuda-profiler-api=11.8.86
- cuda-version=11.8
- cudatoolkit
- cxx-compiler
- cython>=3.0.0
- gcc_linux-64=11.*
- glog>=0.6.0
- h5py>=3.8.0
- hnswlib=0.7.0
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- matplotlib
- nccl>=2.9.9
- ninja
- nlohmann_json>=3.11.2
- nvcc_linux-64=11.8
- openblas
- pandas
- pyyaml
- rmm==24.2.*
- scikit-build>=0.13.1
- sysroot_linux-64==2.17
name: bench_ann_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/raft/conda | rapidsai_public_repos/raft/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- benchmark>=1.8.2
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-nvtx=11.8
- cuda-profiler-api=11.8.86
- cuda-version=11.8
- cudatoolkit
- cxx-compiler
- cython>=3.0.0
- gcc_linux-aarch64=11.*
- glog>=0.6.0
- h5py>=3.8.0
- hnswlib=0.7.0
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- matplotlib
- nccl>=2.9.9
- ninja
- nlohmann_json>=3.11.2
- nvcc_linux-aarch64=11.8
- openblas
- pandas
- pyyaml
- rmm==24.2.*
- scikit-build>=0.13.1
- sysroot_linux-aarch64==2.17
name: bench_ann_cuda-118_arch-aarch64
| 0 |
rapidsai_public_repos/raft/conda | rapidsai_public_repos/raft/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- benchmark>=1.8.2
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-nvtx-dev
- cuda-profiler-api
- cuda-version=12.0
- cxx-compiler
- cython>=3.0.0
- gcc_linux-aarch64=11.*
- glog>=0.6.0
- h5py>=3.8.0
- hnswlib=0.7.0
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
- matplotlib
- nccl>=2.9.9
- ninja
- nlohmann_json>=3.11.2
- openblas
- pandas
- pyyaml
- rmm==24.2.*
- scikit-build>=0.13.1
- sysroot_linux-aarch64==2.17
name: bench_ann_cuda-120_arch-aarch64
| 0 |
rapidsai_public_repos/raft/conda | rapidsai_public_repos/raft/conda/environments/all_cuda-120_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-nvtx-dev
- cuda-profiler-api
- cuda-python>=12.0,<13.0a0
- cuda-version=12.0
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- dask-cuda==24.2.*
- doxygen>=1.8.20
- gcc_linux-64=11.*
- gmock>=1.13.0
- graphviz
- gtest>=1.13.0
- ipython
- joblib>=0.11
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
- nccl>=2.9.9
- ninja
- numba>=0.57
- numpy>=1.21
- numpydoc
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-cov
- rapids-dask-dependency==24.2.*
- recommonmark
- rmm==24.2.*
- scikit-build>=0.13.1
- scikit-learn
- scipy
- sphinx-copybutton
- sphinx-markdown-tables
- sysroot_linux-64==2.17
- ucx-proc=*=gpu
- ucx-py==0.36.*
- ucx>=1.13.0
name: all_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/raft/conda | rapidsai_public_repos/raft/conda/environments/all_cuda-120_arch-aarch64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-nvtx-dev
- cuda-profiler-api
- cuda-python>=12.0,<13.0a0
- cuda-version=12.0
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- dask-cuda==24.2.*
- doxygen>=1.8.20
- gcc_linux-aarch64=11.*
- gmock>=1.13.0
- graphviz
- gtest>=1.13.0
- ipython
- joblib>=0.11
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
- nccl>=2.9.9
- ninja
- numba>=0.57
- numpy>=1.21
- numpydoc
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-cov
- rapids-dask-dependency==24.2.*
- recommonmark
- rmm==24.2.*
- scikit-build>=0.13.1
- scikit-learn
- scipy
- sphinx-copybutton
- sphinx-markdown-tables
- sysroot_linux-aarch64==2.17
- ucx-proc=*=gpu
- ucx-py==0.36.*
- ucx>=1.13.0
name: all_cuda-120_arch-aarch64
| 0 |
rapidsai_public_repos/raft/conda | rapidsai_public_repos/raft/conda/environments/all_cuda-118_arch-aarch64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-nvtx=11.8
- cuda-profiler-api=11.8.86
- cuda-python>=11.7.1,<12.0a0
- cuda-version=11.8
- cudatoolkit
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- dask-cuda==24.2.*
- doxygen>=1.8.20
- gcc_linux-aarch64=11.*
- gmock>=1.13.0
- graphviz
- gtest>=1.13.0
- ipython
- joblib>=0.11
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- nccl>=2.9.9
- ninja
- numba>=0.57
- numpy>=1.21
- numpydoc
- nvcc_linux-aarch64=11.8
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-cov
- rapids-dask-dependency==24.2.*
- recommonmark
- rmm==24.2.*
- scikit-build>=0.13.1
- scikit-learn
- scipy
- sphinx-copybutton
- sphinx-markdown-tables
- sysroot_linux-aarch64==2.17
- ucx-proc=*=gpu
- ucx-py==0.36.*
- ucx>=1.13.0
name: all_cuda-118_arch-aarch64
| 0 |
rapidsai_public_repos/raft/conda | rapidsai_public_repos/raft/conda/environments/all_cuda-118_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-nvtx=11.8
- cuda-profiler-api=11.8.86
- cuda-python>=11.7.1,<12.0a0
- cuda-version=11.8
- cudatoolkit
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- dask-cuda==24.2.*
- doxygen>=1.8.20
- gcc_linux-64=11.*
- gmock>=1.13.0
- graphviz
- gtest>=1.13.0
- ipython
- joblib>=0.11
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- nccl>=2.9.9
- ninja
- numba>=0.57
- numpy>=1.21
- numpydoc
- nvcc_linux-64=11.8
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-cov
- rapids-dask-dependency==24.2.*
- recommonmark
- rmm==24.2.*
- scikit-build>=0.13.1
- scikit-learn
- scipy
- sphinx-copybutton
- sphinx-markdown-tables
- sysroot_linux-64==2.17
- ucx-proc=*=gpu
- ucx-py==0.36.*
- ucx>=1.13.0
name: all_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/raft/conda | rapidsai_public_repos/raft/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- benchmark>=1.8.2
- c-compiler
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-nvtx-dev
- cuda-profiler-api
- cuda-version=12.0
- cxx-compiler
- cython>=3.0.0
- gcc_linux-64=11.*
- glog>=0.6.0
- h5py>=3.8.0
- hnswlib=0.7.0
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
- matplotlib
- nccl>=2.9.9
- ninja
- nlohmann_json>=3.11.2
- openblas
- pandas
- pyyaml
- rmm==24.2.*
- scikit-build>=0.13.1
- sysroot_linux-64==2.17
name: bench_ann_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/raft-ann-bench-cpu/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
glog_version:
- ">=0.6.0"
h5py_version:
- ">=3.8.0"
nlohmann_json_version:
- ">=3.11.2"
spdlog_version:
- ">=1.11.0,<1.12"
fmt_version:
- ">=9.1.0,<10"
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/raft-ann-bench-cpu/build.sh | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION.
./build.sh bench-ann --cpu-only --no-nvtx --build-metrics=bench_ann_cpu --incl-cache-stats
cmake --install cpp/build --component ann_bench
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/raft-ann-bench-cpu/meta.yaml | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Usage:
# conda build . -c conda-forge -c nvidia -c rapidsai
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: raft-ann-bench-cpu
version: {{ version }}
script: build.sh
source:
path: ../../..
build:
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- RAPIDS_ARTIFACTS_DIR
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=libraft-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=libraft-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
number: {{ GIT_DESCRIBE_NUMBER }}
string: py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- glog {{ glog_version }}
- matplotlib
- nlohmann_json {{ nlohmann_json_version }}
- spdlog {{ spdlog_version }}
- fmt {{ fmt_version }}
- python
- pyyaml
- pandas
run:
- glog {{ glog_version }}
- h5py {{ h5py_version }}
- matplotlib
- python
- pyyaml
- pandas
- benchmark
about:
home: https://rapids.ai/
license: Apache-2.0
summary: RAFT ANN CPU benchmarks
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/raft-dask/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
ucx_version:
- ">=1.14.1,<1.16.0"
ucx_py_version:
- "0.36.*"
cmake_version:
- ">=3.26.4"
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/raft-dask/build.sh | # Copyright (c) 2022, NVIDIA CORPORATION.
#!/usr/bin/env bash
# This assumes the script is executed from the root of the repo directory
./build.sh raft-dask --no-nvtx
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/raft-dask/meta.yaml | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Usage:
# conda build . -c conda-forge -c numba -c rapidsai -c pytorch
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: raft-dask
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
{% if cuda_major == "11" %}
- cuda-python >=11.7.1,<12.0a0
- cudatoolkit
{% else %}
- cuda-python >=12.0,<13.0a0
{% endif %}
- cuda-version ={{ cuda_version }}
- cython >=3.0.0
- nccl >=2.9.9
- pylibraft {{ version }}
- python x.x
- rmm ={{ minor_version }}
- scikit-build >=0.13.1
- setuptools
- ucx {{ ucx_version }}
- ucx-proc=*=gpu
- ucx-py {{ ucx_py_version }}
run:
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
- dask-cuda ={{ minor_version }}
- rapids-dask-dependency ={{ minor_version }}
- joblib >=0.11
- nccl >=2.9.9
- pylibraft {{ version }}
- python x.x
- rmm ={{ minor_version }}
- ucx {{ ucx_version }}
- ucx-proc=*=gpu
- ucx-py {{ ucx_py_version }}
tests:
requirements:
- cuda-version ={{ cuda_version }}
imports:
- raft_dask
about:
home: https://rapids.ai/
license: Apache-2.0
# license_file: LICENSE
summary: raft-dask library
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/libraft/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
nccl_version:
- ">=2.9.9"
gtest_version:
- ">=1.13.0"
glog_version:
- ">=0.6.0"
faiss_version:
- ">=1.7.1"
h5py_version:
- ">=3.8.0"
nlohmann_json_version:
- ">=3.11.2"
# The CTK libraries below are missing from the conda-forge::cudatoolkit package
# for CUDA 11. The "*_host_*" version specifiers correspond to `11.8` packages
# and the "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_libcublas_host_version:
- "=11.11.3.6"
cuda11_libcublas_run_version:
- ">=11.5.2.43,<12.0.0"
cuda11_libcurand_host_version:
- "=10.3.0.86"
cuda11_libcurand_run_version:
- ">=10.2.5.43,<10.3.1"
cuda11_libcusolver_host_version:
- "=11.4.1.48"
cuda11_libcusolver_run_version:
- ">=11.2.0.43,<11.4.2"
cuda11_libcusparse_host_version:
- "=11.7.5.86"
cuda11_libcusparse_run_version:
- ">=11.6.0.43,<12.0.0"
# `cuda-profiler-api` only has `11.8.0` and `12.0.0` packages for all
# architectures. The "*_host_*" version specifiers correspond to `11.8` packages and the
# "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_cuda_profiler_api_host_version:
- "=11.8.86"
cuda11_cuda_profiler_api_run_version:
- ">=11.4.240,<12"
spdlog_version:
- ">=1.11.0,<1.12"
fmt_version:
- ">=9.1.0,<10"
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/libraft/build_libraft_template.sh | #!/usr/bin/env bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Just building template so we verify it uses libraft.so and fail if it doesn't build
./build.sh template
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/libraft/build_libraft_tests.sh | #!/usr/bin/env bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
./build.sh tests bench-prims --allgpuarch --no-nvtx --build-metrics=tests_bench_prims --incl-cache-stats
cmake --install cpp/build --component testing
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/libraft/meta.yaml | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Usage:
# conda build . -c conda-forge -c nvidia -c rapidsai
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: libraft-split
source:
path: ../../..
outputs:
- name: libraft-headers-only
version: {{ version }}
script: build_libraft_headers.sh
build:
script_env: &script_env
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- RAPIDS_ARTIFACTS_DIR
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=libraft-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=libraft-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
- librmm
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
{% if cuda_major != "11" %}
- cuda-cudart-dev
{% endif %}
- cuda-version ={{ cuda_version }}
- librmm ={{ minor_version }}
- spdlog {{ spdlog_version }}
- fmt {{ fmt_version }}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- librmm ={{ minor_version }}
- spdlog {{ spdlog_version }}
- fmt {{ fmt_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libraft-headers-only library
- name: libraft-headers
version: {{ version }}
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
- librmm
requirements:
host:
- cuda-version ={{ cuda_version }}
run:
- {{ pin_subpackage('libraft-headers-only', exact=True) }}
- librmm ={{ minor_version }}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
- cuda-profiler-api {{ cuda11_cuda_profiler_api_run_version }}
- libcublas {{ cuda11_libcublas_run_version }}
- libcublas-dev {{ cuda11_libcublas_run_version }}
- libcurand {{ cuda11_libcurand_run_version }}
- libcurand-dev {{ cuda11_libcurand_run_version }}
- libcusolver {{ cuda11_libcusolver_run_version }}
- libcusolver-dev {{ cuda11_libcusolver_run_version }}
- libcusparse {{ cuda11_libcusparse_run_version }}
- libcusparse-dev {{ cuda11_libcusparse_run_version }}
{% else %}
- cuda-cudart-dev
- cuda-profiler-api
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
{% endif %}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libraft-headers library
- name: libraft
version: {{ version }}
script: build_libraft.sh
build:
script_env: *script_env
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- {{ pin_subpackage('libraft-headers', exact=True) }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cuda-profiler-api {{ cuda11_cuda_profiler_api_host_version }}
- libcublas {{ cuda11_libcublas_host_version }}
- libcublas-dev {{ cuda11_libcublas_host_version }}
- libcurand {{ cuda11_libcurand_host_version }}
- libcurand-dev {{ cuda11_libcurand_host_version }}
- libcusolver {{ cuda11_libcusolver_host_version }}
- libcusolver-dev {{ cuda11_libcusolver_host_version }}
- libcusparse {{ cuda11_libcusparse_host_version }}
- libcusparse-dev {{ cuda11_libcusparse_host_version }}
{% else %}
- cuda-profiler-api
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
{% endif %}
run:
- {{ pin_subpackage('libraft-headers', exact=True) }}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libraft library
- name: libraft-static
version: {{ version }}
script: build_libraft_static.sh
build:
script_env: *script_env
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- {{ pin_subpackage('libraft-headers', exact=True) }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cuda-profiler-api {{ cuda11_cuda_profiler_api_host_version }}
- libcublas {{ cuda11_libcublas_host_version }}
- libcublas-dev {{ cuda11_libcublas_host_version }}
- libcurand {{ cuda11_libcurand_host_version }}
- libcurand-dev {{ cuda11_libcurand_host_version }}
- libcusolver {{ cuda11_libcusolver_host_version }}
- libcusolver-dev {{ cuda11_libcusolver_host_version }}
- libcusparse {{ cuda11_libcusparse_host_version }}
- libcusparse-dev {{ cuda11_libcusparse_host_version }}
{% else %}
- cuda-profiler-api
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
{% endif %}
run:
- {{ pin_subpackage('libraft-headers', exact=True) }}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libraft static library
- name: libraft-tests
version: {{ version }}
script: build_libraft_tests.sh
build:
script_env: *script_env
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- {{ pin_subpackage('libraft', exact=True) }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cuda-profiler-api {{ cuda11_cuda_profiler_api_run_version }}
- libcublas {{ cuda11_libcublas_host_version }}
- libcublas-dev {{ cuda11_libcublas_host_version }}
- libcurand {{ cuda11_libcurand_host_version }}
- libcurand-dev {{ cuda11_libcurand_host_version }}
- libcusolver {{ cuda11_libcusolver_host_version }}
- libcusolver-dev {{ cuda11_libcusolver_host_version }}
- libcusparse {{ cuda11_libcusparse_host_version }}
- libcusparse-dev {{ cuda11_libcusparse_host_version }}
{% else %}
- cuda-cudart-dev
- cuda-profiler-api
- libcublas-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
{% endif %}
- gmock {{ gtest_version }}
- gtest {{ gtest_version }}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_subpackage('libraft', exact=True) }}
- gmock {{ gtest_version }}
- gtest {{ gtest_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libraft tests
- name: libraft-template
version: {{ version }}
script: build_libraft_template.sh
build:
script_env: *script_env
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- {{ pin_subpackage('libraft', exact=True) }}
- {{ pin_subpackage('libraft-headers', exact=True) }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cuda-profiler-api {{ cuda11_cuda_profiler_api_run_version }}
- libcublas {{ cuda11_libcublas_host_version }}
- libcublas-dev {{ cuda11_libcublas_host_version }}
{% else %}
- cuda-profiler-api
- libcublas-dev
{% endif %}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_subpackage('libraft', exact=True) }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libraft template
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/libraft/build_libraft.sh | #!/usr/bin/env bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
./build.sh libraft --allgpuarch --compile-lib --build-metrics=compile_lib --incl-cache-stats --no-nvtx
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/libraft/build_libraft_headers.sh | #!/usr/bin/env bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
./build.sh libraft --allgpuarch --no-nvtx
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/libraft/build_libraft_static.sh | #!/usr/bin/env bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
./build.sh libraft --allgpuarch --compile-static-lib --build-metrics=compile_lib_static --incl-cache-stats --no-nvtx -n
cmake --install cpp/build --component compiled-static
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/raft-ann-bench/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
nccl_version:
- ">=2.9.9"
gtest_version:
- ">=1.13.0"
glog_version:
- ">=0.6.0"
h5py_version:
- ">=3.8.0"
nlohmann_json_version:
- ">=3.11.2"
# The CTK libraries below are missing from the conda-forge::cudatoolkit package
# for CUDA 11. The "*_host_*" version specifiers correspond to `11.8` packages
# and the "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_libcublas_host_version:
- "=11.11.3.6"
cuda11_libcublas_run_version:
- ">=11.5.2.43,<12.0.0"
cuda11_libcurand_host_version:
- "=10.3.0.86"
cuda11_libcurand_run_version:
- ">=10.2.5.43,<10.3.1"
cuda11_libcusolver_host_version:
- "=11.4.1.48"
cuda11_libcusolver_run_version:
- ">=11.2.0.43,<11.4.2"
cuda11_libcusparse_host_version:
- "=11.7.5.86"
cuda11_libcusparse_run_version:
- ">=11.6.0.43,<12.0.0"
# `cuda-profiler-api` only has `11.8.0` and `12.0.0` packages for all
# architectures. The "*_host_*" version specifiers correspond to `11.8` packages and the
# "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_cuda_profiler_api_host_version:
- "=11.8.86"
cuda11_cuda_profiler_api_run_version:
- ">=11.4.240,<12"
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/raft-ann-bench/build.sh | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION.
./build.sh bench-ann --allgpuarch --no-nvtx --build-metrics=bench_ann --incl-cache-stats
cmake --install cpp/build --component ann_bench
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/raft-ann-bench/meta.yaml | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Usage:
# conda build . -c conda-forge -c nvidia -c rapidsai
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: raft-ann-bench
version: {{ version }}
script: build.sh
source:
path: ../../..
build:
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- RAPIDS_ARTIFACTS_DIR
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=libraft-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=libraft-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- python
- libraft {{ version }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cuda-profiler-api {{ cuda11_cuda_profiler_api_run_version }}
- libcublas {{ cuda11_libcublas_host_version }}
- libcublas-dev {{ cuda11_libcublas_host_version }}
{% else %}
- cuda-profiler-api
- libcublas-dev
{% endif %}
- glog {{ glog_version }}
- nlohmann_json {{ nlohmann_json_version }}
- h5py {{ h5py_version }}
- benchmark
- matplotlib
- python
- pandas
- pyyaml
# rmm is needed to determine if package is gpu-enabled
- rmm ={{ minor_version }}
run:
- python
- libraft {{ version }}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- glog {{ glog_version }}
- h5py {{ h5py_version }}
- benchmark
- glog {{ glog_version }}
- matplotlib
- python
- pandas
- pyyaml
# rmm is needed to determine if package is gpu-enabled
- rmm ={{ minor_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: RAFT ANN GPU and CPU benchmarks
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/pylibraft/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/pylibraft/build.sh | # Copyright (c) 2022, NVIDIA CORPORATION.
#!/usr/bin/env bash
# This assumes the script is executed from the root of the repo directory
./build.sh pylibraft --no-nvtx
| 0 |
rapidsai_public_repos/raft/conda/recipes | rapidsai_public_repos/raft/conda/recipes/pylibraft/meta.yaml | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Usage:
# conda build . -c conda-forge -c numba -c rapidsai -c pytorch
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: pylibraft
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
{% if cuda_major == "11" %}
- cuda-python >=11.7.1,<12.0a0
- cudatoolkit
{% else %}
- cuda-python >=12.0,<13.0a0
{% endif %}
- cuda-version ={{ cuda_version }}
- cython >=3.0.0
- libraft {{ version }}
- libraft-headers {{ version }}
- python x.x
- rmm ={{ minor_version }}
- scikit-build >=0.13.1
- setuptools
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- libraft {{ version }}
- libraft-headers {{ version }}
- numpy >=1.21
- python x.x
- rmm ={{ minor_version }}
tests:
requirements:
- cuda-version ={{ cuda_version }}
imports:
- pylibraft
about:
home: https://rapids.ai/
license: Apache-2.0
# license_file: LICENSE
summary: pylibraft library
| 0 |
rapidsai_public_repos/raft | rapidsai_public_repos/raft/cpp/.clangd | # https://clangd.llvm.org/config
# Apply a config conditionally to all C files
If:
PathMatch: .*\.(c|h)$
---
# Apply a config conditionally to all C++ files
If:
PathMatch: .*\.(c|h)pp
---
# Apply a config conditionally to all CUDA files
If:
PathMatch: .*\.cuh?
CompileFlags:
Add:
- "-x"
- "cuda"
# No error on unknown CUDA versions
- "-Wno-unknown-cuda-version"
# Allow variadic CUDA functions
- "-Xclang=-fcuda-allow-variadic-functions"
Diagnostics:
Suppress:
- "variadic_device_fn"
- "attributes_not_allowed"
---
# Tweak the clangd parse settings for all files
CompileFlags:
Add:
# report all errors
- "-ferror-limit=0"
- "-fmacro-backtrace-limit=0"
- "-ftemplate-backtrace-limit=0"
# Skip the CUDA version check
- "--no-cuda-version-check"
Remove:
# remove gcc's -fcoroutines
- -fcoroutines
# remove nvc++ flags unknown to clang
- "-gpu=*"
- "-stdpar*"
# remove nvcc flags unknown to clang
- "-arch*"
- "-gencode*"
- "--generate-code*"
- "-ccbin*"
- "-t=*"
- "--threads*"
- "-Xptxas*"
- "-Xcudafe*"
- "-Xfatbin*"
- "-Xcompiler*"
- "--diag-suppress*"
- "--diag_suppress*"
- "--compiler-options*"
- "--expt-extended-lambda"
- "--expt-relaxed-constexpr"
- "-forward-unknown-to-host-compiler"
- "-Werror=cross-execution-space-call"
| 0 |
rapidsai_public_repos/raft | rapidsai_public_repos/raft/cpp/CMakeLists.txt | # =============================================================================
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
set(RAPIDS_VERSION "24.02")
set(RAFT_VERSION "24.02.00")
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
include(../fetch_rapids.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-export)
include(rapids-find)
option(BUILD_CPU_ONLY "Build CPU only components. Applies to RAFT ANN benchmarks currently" OFF)
# workaround for rapids_cuda_init_architectures not working for arch detection with
# enable_language(CUDA)
set(lang_list "CXX")
if(NOT BUILD_CPU_ONLY)
include(rapids-cuda)
rapids_cuda_init_architectures(RAFT)
list(APPEND lang_list "CUDA")
endif()
project(
RAFT
VERSION ${RAFT_VERSION}
LANGUAGES ${lang_list}
)
# Write the version header
rapids_cmake_write_version_file(include/raft/version_config.hpp)
# ##################################################################################################
# * build type ---------------------------------------------------------------
# Set a default build type if none was specified
rapids_cmake_build_type(Release)
# this is needed for clang-tidy runs
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
# ##################################################################################################
# * User Options ------------------------------------------------------------
option(BUILD_SHARED_LIBS "Build raft shared libraries" ON)
option(BUILD_TESTS "Build raft unit-tests" ON)
option(BUILD_PRIMS_BENCH "Build raft C++ benchmark tests" OFF)
option(BUILD_ANN_BENCH "Build raft ann benchmarks" OFF)
option(CUDA_ENABLE_KERNELINFO "Enable kernel resource usage info" OFF)
option(CUDA_ENABLE_LINEINFO
"Enable the -lineinfo option for nvcc (useful for cuda-memcheck / profiler)" OFF
)
option(CUDA_STATIC_RUNTIME "Statically link the CUDA toolkit runtime and libraries" OFF)
option(CUDA_LOG_COMPILE_TIME "Write a log of compilation times to nvcc_compile_log.csv" OFF)
option(DETECT_CONDA_ENV "Enable detection of conda environment for dependencies" ON)
option(DISABLE_DEPRECATION_WARNINGS "Disable deprecaction warnings " ON)
option(DISABLE_OPENMP "Disable OpenMP" OFF)
option(RAFT_NVTX "Enable nvtx markers" OFF)
set(RAFT_COMPILE_LIBRARY_DEFAULT OFF)
if((BUILD_TESTS
OR BUILD_PRIMS_BENCH
OR BUILD_ANN_BENCH
)
AND NOT BUILD_CPU_ONLY
)
set(RAFT_COMPILE_LIBRARY_DEFAULT ON)
endif()
option(RAFT_COMPILE_LIBRARY "Enable building raft shared library instantiations"
${RAFT_COMPILE_LIBRARY_DEFAULT}
)
if(BUILD_CPU_ONLY)
set(BUILD_SHARED_LIBS OFF)
set(BUILD_TESTS OFF)
endif()
# Needed because GoogleBenchmark changes the state of FindThreads.cmake, causing subsequent runs to
# have different values for the `Threads::Threads` target. Setting this flag ensures
# `Threads::Threads` is the same value across all builds so that cache hits occur
set(THREADS_PREFER_PTHREAD_FLAG ON)
include(CMakeDependentOption)
# cmake_dependent_option( RAFT_USE_FAISS_STATIC "Build and statically link the FAISS library for
# nearest neighbors search on GPU" ON RAFT_COMPILE_LIBRARY OFF )
message(VERBOSE "RAFT: Building optional components: ${raft_FIND_COMPONENTS}")
message(VERBOSE "RAFT: Build RAFT unit-tests: ${BUILD_TESTS}")
message(VERBOSE "RAFT: Building raft C++ benchmarks: ${BUILD_PRIMS_BENCH}")
message(VERBOSE "RAFT: Building ANN benchmarks: ${BUILD_ANN_BENCH}")
message(VERBOSE "RAFT: Build CPU only components: ${BUILD_CPU_ONLY}")
message(VERBOSE "RAFT: Enable detection of conda environment for dependencies: ${DETECT_CONDA_ENV}")
message(VERBOSE "RAFT: Disable depreaction warnings " ${DISABLE_DEPRECATION_WARNINGS})
message(VERBOSE "RAFT: Disable OpenMP: ${DISABLE_OPENMP}")
message(VERBOSE "RAFT: Enable kernel resource usage info: ${CUDA_ENABLE_KERNELINFO}")
message(VERBOSE "RAFT: Enable lineinfo in nvcc: ${CUDA_ENABLE_LINEINFO}")
message(VERBOSE "RAFT: Enable nvtx markers: ${RAFT_NVTX}")
message(VERBOSE
"RAFT: Statically link the CUDA toolkit runtime and libraries: ${CUDA_STATIC_RUNTIME}"
)
# Set RMM logging level
set(RMM_LOGGING_LEVEL
"INFO"
CACHE STRING "Choose the logging level."
)
set_property(
CACHE RMM_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL" "OFF"
)
message(VERBOSE "RAFT: RMM_LOGGING_LEVEL = '${RMM_LOGGING_LEVEL}'.")
# ##################################################################################################
# * Conda environment detection ----------------------------------------------
if(DETECT_CONDA_ENV)
rapids_cmake_support_conda_env(conda_env MODIFY_PREFIX_PATH)
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT AND DEFINED ENV{CONDA_PREFIX})
message(
STATUS "RAFT: No CMAKE_INSTALL_PREFIX argument detected, setting to: $ENV{CONDA_PREFIX}"
)
set(CMAKE_INSTALL_PREFIX "$ENV{CONDA_PREFIX}")
endif()
endif()
# ##################################################################################################
# * compiler options ----------------------------------------------------------
set(_ctk_static_suffix "")
if(CUDA_STATIC_RUNTIME)
set(_ctk_static_suffix "_static")
endif()
if(NOT BUILD_CPU_ONLY)
# CUDA runtime
rapids_cuda_init_runtime(USE_STATIC ${CUDA_STATIC_RUNTIME})
# * find CUDAToolkit package
# * determine GPU architectures
# * enable the CMake CUDA language
# * set other CUDA compilation flags
rapids_find_package(
CUDAToolkit REQUIRED
BUILD_EXPORT_SET raft-exports
INSTALL_EXPORT_SET raft-exports
)
else()
add_compile_definitions(BUILD_CPU_ONLY)
endif()
if(NOT DISABLE_OPENMP)
rapids_find_package(
OpenMP REQUIRED
BUILD_EXPORT_SET raft-exports
INSTALL_EXPORT_SET raft-exports
)
if(OPENMP_FOUND)
message(VERBOSE "RAFT: OpenMP found in ${OpenMP_CXX_INCLUDE_DIRS}")
endif()
endif()
include(cmake/modules/ConfigureCUDA.cmake)
# ##################################################################################################
# * Requirements -------------------------------------------------------------
# add third party dependencies using CPM
rapids_cpm_init()
if(NOT BUILD_CPU_ONLY)
# thrust before rmm/cuco so we get the right version of thrust/cub
include(cmake/thirdparty/get_thrust.cmake)
include(cmake/thirdparty/get_rmm.cmake)
include(cmake/thirdparty/get_cutlass.cmake)
include(${rapids-cmake-dir}/cpm/cuco.cmake)
rapids_cpm_cuco(BUILD_EXPORT_SET raft-exports INSTALL_EXPORT_SET raft-exports)
endif()
if(BUILD_TESTS)
include(cmake/thirdparty/get_gtest.cmake)
endif()
if(BUILD_PRIMS_BENCH OR BUILD_ANN_BENCH)
include(${rapids-cmake-dir}/cpm/gbench.cmake)
rapids_cpm_gbench()
endif()
# ##################################################################################################
# * raft ---------------------------------------------------------------------
add_library(raft INTERFACE)
add_library(raft::raft ALIAS raft)
target_include_directories(
raft INTERFACE "$<BUILD_INTERFACE:${RAFT_SOURCE_DIR}/include>" "$<INSTALL_INTERFACE:include>"
)
if(NOT BUILD_CPU_ONLY)
# Keep RAFT as lightweight as possible. Only CUDA libs and rmm should be used in global target.
target_link_libraries(raft INTERFACE rmm::rmm cuco::cuco nvidia::cutlass::cutlass raft::Thrust)
endif()
target_compile_features(raft INTERFACE cxx_std_17 $<BUILD_INTERFACE:cuda_std_17>)
target_compile_options(
raft INTERFACE $<$<COMPILE_LANG_AND_ID:CUDA,NVIDIA>:--expt-extended-lambda
--expt-relaxed-constexpr>
)
set(RAFT_CUSOLVER_DEPENDENCY CUDA::cusolver${_ctk_static_suffix})
set(RAFT_CUBLAS_DEPENDENCY CUDA::cublas${_ctk_static_suffix})
set(RAFT_CURAND_DEPENDENCY CUDA::curand${_ctk_static_suffix})
set(RAFT_CUSPARSE_DEPENDENCY CUDA::cusparse${_ctk_static_suffix})
set(RAFT_CTK_MATH_DEPENDENCIES ${RAFT_CUBLAS_DEPENDENCY} ${RAFT_CUSOLVER_DEPENDENCY}
${RAFT_CUSPARSE_DEPENDENCY} ${RAFT_CURAND_DEPENDENCY}
)
# Endian detection
include(TestBigEndian)
test_big_endian(BIG_ENDIAN)
if(BIG_ENDIAN)
target_compile_definitions(raft INTERFACE RAFT_SYSTEM_LITTLE_ENDIAN=0)
else()
target_compile_definitions(raft INTERFACE RAFT_SYSTEM_LITTLE_ENDIAN=1)
endif()
if(RAFT_COMPILE_LIBRARY)
file(
WRITE "${CMAKE_CURRENT_BINARY_DIR}/fatbin.ld"
[=[
SECTIONS
{
.nvFatBinSegment : { *(.nvFatBinSegment) }
.nv_fatbin : { *(.nv_fatbin) }
}
]=]
)
endif()
# ##################################################################################################
# * NVTX support in raft -----------------------------------------------------
if(RAFT_NVTX)
# This enables NVTX within the project with no option to disable it downstream.
target_link_libraries(raft INTERFACE CUDA::nvToolsExt)
target_compile_definitions(raft INTERFACE NVTX_ENABLED)
else()
# Allow enable NVTX downstream if not set here. This creates a new option at build/install time,
# which is set by default to OFF, but can be enabled in the dependent project.
get_property(
nvtx_option_help_string
CACHE RAFT_NVTX
PROPERTY HELPSTRING
)
string(
CONCAT
nvtx_export_string
"option(RAFT_NVTX \""
${nvtx_option_help_string}
"\" OFF)"
[=[
target_link_libraries(raft::raft INTERFACE $<$<BOOL:${RAFT_NVTX}>:CUDA::nvToolsExt>)
target_compile_definitions(raft::raft INTERFACE $<$<BOOL:${RAFT_NVTX}>:NVTX_ENABLED>)
]=]
)
endif()
# ##################################################################################################
# * raft_compiled ------------------------------------------------------------
add_library(raft_compiled INTERFACE)
if(TARGET raft_compiled AND (NOT TARGET raft::compiled))
add_library(raft::compiled ALIAS raft_compiled)
endif()
set_target_properties(raft_compiled PROPERTIES EXPORT_NAME compiled)
if(RAFT_COMPILE_LIBRARY)
add_library(
raft_objs OBJECT
src/core/logger.cpp
src/distance/detail/pairwise_matrix/dispatch_canberra_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_canberra_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_correlation_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_correlation_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_cosine_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_cosine_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_hamming_unexpanded_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_hamming_unexpanded_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_hellinger_expanded_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_hellinger_expanded_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_jensen_shannon_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_jensen_shannon_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_kl_divergence_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_kl_divergence_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_l1_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_l1_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_l2_expanded_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_l2_expanded_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_l2_unexpanded_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_l2_unexpanded_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_l_inf_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_l_inf_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_lp_unexpanded_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_lp_unexpanded_float_float_float_int.cu
src/distance/detail/pairwise_matrix/dispatch_rbf.cu
src/distance/detail/pairwise_matrix/dispatch_russel_rao_double_double_double_int.cu
src/distance/detail/pairwise_matrix/dispatch_russel_rao_float_float_float_int.cu
src/distance/distance.cu
src/distance/fused_l2_nn.cu
src/linalg/detail/coalesced_reduction.cu
src/matrix/detail/select_k_double_int64_t.cu
src/matrix/detail/select_k_double_uint32_t.cu
src/matrix/detail/select_k_float_int64_t.cu
src/matrix/detail/select_k_float_uint32_t.cu
src/matrix/detail/select_k_float_int32.cu
src/matrix/detail/select_k_half_int64_t.cu
src/matrix/detail/select_k_half_uint32_t.cu
src/neighbors/ball_cover.cu
src/neighbors/brute_force_fused_l2_knn_float_int64_t.cu
src/neighbors/brute_force_knn_int64_t_float_int64_t.cu
src/neighbors/brute_force_knn_int64_t_float_uint32_t.cu
src/neighbors/brute_force_knn_int_float_int.cu
src/neighbors/brute_force_knn_uint32_t_float_uint32_t.cu
src/neighbors/brute_force_knn_index_float.cu
src/neighbors/detail/cagra/search_multi_cta_float_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_multi_cta_float_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_multi_cta_float_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_multi_cta_float_uint32_dim1024_t32.cu
src/neighbors/detail/cagra/search_multi_cta_int8_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_multi_cta_int8_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_multi_cta_int8_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_multi_cta_int8_uint32_dim1024_t32.cu
src/neighbors/detail/cagra/search_multi_cta_uint8_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_multi_cta_uint8_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_multi_cta_uint8_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_multi_cta_uint8_uint32_dim1024_t32.cu
src/neighbors/detail/cagra/search_single_cta_float_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_single_cta_float_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_single_cta_float_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_single_cta_float_uint32_dim1024_t32.cu
src/neighbors/detail/cagra/search_single_cta_int8_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_single_cta_int8_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_single_cta_int8_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_single_cta_int8_uint32_dim1024_t32.cu
src/neighbors/detail/cagra/search_single_cta_uint8_uint32_dim128_t8.cu
src/neighbors/detail/cagra/search_single_cta_uint8_uint32_dim256_t16.cu
src/neighbors/detail/cagra/search_single_cta_uint8_uint32_dim512_t32.cu
src/neighbors/detail/cagra/search_single_cta_uint8_uint32_dim1024_t32.cu
src/neighbors/detail/ivf_flat_interleaved_scan_float_float_int64_t.cu
src/neighbors/detail/ivf_flat_interleaved_scan_int8_t_int32_t_int64_t.cu
src/neighbors/detail/ivf_flat_interleaved_scan_uint8_t_uint32_t_int64_t.cu
src/neighbors/detail/ivf_flat_search.cu
src/neighbors/detail/ivf_pq_compute_similarity_float_float.cu
src/neighbors/detail/ivf_pq_compute_similarity_float_fp8_false.cu
src/neighbors/detail/ivf_pq_compute_similarity_float_fp8_true.cu
src/neighbors/detail/ivf_pq_compute_similarity_float_half.cu
src/neighbors/detail/ivf_pq_compute_similarity_half_fp8_false.cu
src/neighbors/detail/ivf_pq_compute_similarity_half_fp8_true.cu
src/neighbors/detail/ivf_pq_compute_similarity_half_half.cu
src/neighbors/detail/refine_host_float_float.cpp
src/neighbors/detail/refine_host_int8_t_float.cpp
src/neighbors/detail/refine_host_uint8_t_float.cpp
src/neighbors/detail/selection_faiss_int32_t_float.cu
src/neighbors/detail/selection_faiss_int_double.cu
src/neighbors/detail/selection_faiss_long_float.cu
src/neighbors/detail/selection_faiss_size_t_double.cu
src/neighbors/detail/selection_faiss_size_t_float.cu
src/neighbors/detail/selection_faiss_uint32_t_float.cu
src/neighbors/detail/selection_faiss_int64_t_double.cu
src/neighbors/detail/selection_faiss_int64_t_half.cu
src/neighbors/detail/selection_faiss_uint32_t_double.cu
src/neighbors/detail/selection_faiss_uint32_t_half.cu
src/neighbors/ivf_flat_build_float_int64_t.cu
src/neighbors/ivf_flat_build_int8_t_int64_t.cu
src/neighbors/ivf_flat_build_uint8_t_int64_t.cu
src/neighbors/ivf_flat_extend_float_int64_t.cu
src/neighbors/ivf_flat_extend_int8_t_int64_t.cu
src/neighbors/ivf_flat_extend_uint8_t_int64_t.cu
src/neighbors/ivf_flat_search_float_int64_t.cu
src/neighbors/ivf_flat_search_int8_t_int64_t.cu
src/neighbors/ivf_flat_search_uint8_t_int64_t.cu
src/neighbors/ivfpq_build_float_int64_t.cu
src/neighbors/ivfpq_build_int8_t_int64_t.cu
src/neighbors/ivfpq_build_uint8_t_int64_t.cu
src/neighbors/ivfpq_extend_float_int64_t.cu
src/neighbors/ivfpq_extend_int8_t_int64_t.cu
src/neighbors/ivfpq_extend_uint8_t_int64_t.cu
src/neighbors/ivfpq_search_float_int64_t.cu
src/neighbors/ivfpq_search_int8_t_int64_t.cu
src/neighbors/ivfpq_search_uint8_t_int64_t.cu
src/neighbors/refine_float_float.cu
src/neighbors/refine_int8_t_float.cu
src/neighbors/refine_uint8_t_float.cu
src/raft_runtime/cluster/cluster_cost.cuh
src/raft_runtime/cluster/cluster_cost_double.cu
src/raft_runtime/cluster/cluster_cost_float.cu
src/raft_runtime/cluster/kmeans_fit_double.cu
src/raft_runtime/cluster/kmeans_fit_float.cu
src/raft_runtime/cluster/kmeans_init_plus_plus_double.cu
src/raft_runtime/cluster/kmeans_init_plus_plus_float.cu
src/raft_runtime/cluster/update_centroids.cuh
src/raft_runtime/cluster/update_centroids_double.cu
src/raft_runtime/cluster/update_centroids_float.cu
src/raft_runtime/distance/fused_l2_min_arg.cu
src/raft_runtime/distance/pairwise_distance.cu
src/raft_runtime/matrix/select_k_float_int64_t.cu
src/raft_runtime/neighbors/brute_force_knn_int64_t_float.cu
src/raft_runtime/neighbors/cagra_build.cu
src/raft_runtime/neighbors/cagra_search.cu
src/raft_runtime/neighbors/cagra_serialize.cu
src/raft_runtime/neighbors/ivf_flat_build.cu
src/raft_runtime/neighbors/ivf_flat_search.cu
src/raft_runtime/neighbors/ivf_flat_serialize.cu
src/raft_runtime/neighbors/ivfpq_build.cu
src/raft_runtime/neighbors/ivfpq_deserialize.cu
src/raft_runtime/neighbors/ivfpq_search_float_int64_t.cu
src/raft_runtime/neighbors/ivfpq_search_int8_t_int64_t.cu
src/raft_runtime/neighbors/ivfpq_search_uint8_t_int64_t.cu
src/raft_runtime/neighbors/ivfpq_serialize.cu
src/raft_runtime/neighbors/refine_d_int64_t_float.cu
src/raft_runtime/neighbors/refine_d_int64_t_int8_t.cu
src/raft_runtime/neighbors/refine_d_int64_t_uint8_t.cu
src/raft_runtime/neighbors/refine_h_int64_t_float.cu
src/raft_runtime/neighbors/refine_h_int64_t_int8_t.cu
src/raft_runtime/neighbors/refine_h_int64_t_uint8_t.cu
src/raft_runtime/random/rmat_rectangular_generator_int64_double.cu
src/raft_runtime/random/rmat_rectangular_generator_int64_float.cu
src/raft_runtime/random/rmat_rectangular_generator_int_double.cu
src/raft_runtime/random/rmat_rectangular_generator_int_float.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_2d_dist.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_2d_euclidean.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_2d_haversine.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_3d_dist.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_3d_euclidean.cu
src/spatial/knn/detail/ball_cover/registers_pass_one_3d_haversine.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_2d_dist.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_2d_euclidean.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_2d_haversine.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_3d_dist.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_3d_euclidean.cu
src/spatial/knn/detail/ball_cover/registers_pass_two_3d_haversine.cu
src/spatial/knn/detail/fused_l2_knn_int32_t_float.cu
src/spatial/knn/detail/fused_l2_knn_int64_t_float.cu
src/spatial/knn/detail/fused_l2_knn_uint32_t_float.cu
src/util/memory_pool.cpp
)
set_target_properties(
raft_objs
PROPERTIES CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
)
target_compile_definitions(raft_objs PRIVATE "RAFT_EXPLICIT_INSTANTIATE_ONLY")
target_compile_options(
raft_objs PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${RAFT_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${RAFT_CUDA_FLAGS}>"
)
add_library(raft_lib SHARED $<TARGET_OBJECTS:raft_objs>)
add_library(raft_lib_static STATIC $<TARGET_OBJECTS:raft_objs>)
set_target_properties(
raft_lib raft_lib_static
PROPERTIES OUTPUT_NAME raft
BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
foreach(target raft_lib raft_lib_static raft_objs)
target_link_libraries(
${target}
PUBLIC raft::raft
${RAFT_CTK_MATH_DEPENDENCIES} # TODO: Once `raft::resources` is used everywhere, this
# will just be cublas
$<TARGET_NAME_IF_EXISTS:OpenMP::OpenMP_CXX>
)
# So consumers know when using libraft.so/libraft.a
target_compile_definitions(${target} PUBLIC "RAFT_COMPILED")
# ensure CUDA symbols aren't relocated to the middle of the debug build binaries
target_link_options(${target} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/fatbin.ld")
endforeach()
endif()
if(TARGET raft_lib AND (NOT TARGET raft::raft_lib))
add_library(raft::raft_lib ALIAS raft_lib)
endif()
target_link_libraries(raft_compiled INTERFACE raft::raft $<TARGET_NAME_IF_EXISTS:raft::raft_lib>)
# ##################################################################################################
# * raft_compiled_static----------------------------------------------------------------------------
add_library(raft_compiled_static INTERFACE)
if(TARGET raft_compiled_static AND (NOT TARGET raft::compiled_static))
add_library(raft::compiled_static ALIAS raft_compiled_static)
endif()
set_target_properties(raft_compiled_static PROPERTIES EXPORT_NAME compiled_static)
if(TARGET raft_lib_static AND (NOT TARGET raft::raft_lib_static))
add_library(raft::raft_lib_static ALIAS raft_lib_static)
endif()
target_link_libraries(
raft_compiled_static INTERFACE raft::raft $<TARGET_NAME_IF_EXISTS:raft::raft_lib_static>
)
# ##################################################################################################
# * raft_distributed -------------------------------------------------------------------------------
add_library(raft_distributed INTERFACE)
if(TARGET raft_distributed AND (NOT TARGET raft::distributed))
add_library(raft::distributed ALIAS raft_distributed)
endif()
set_target_properties(raft_distributed PROPERTIES EXPORT_NAME distributed)
rapids_find_generate_module(
NCCL
HEADER_NAMES nccl.h
LIBRARY_NAMES nccl
BUILD_EXPORT_SET raft-distributed-exports
INSTALL_EXPORT_SET raft-distributed-exports
)
rapids_export_package(BUILD ucx raft-distributed-exports)
rapids_export_package(INSTALL ucx raft-distributed-exports)
rapids_export_package(BUILD NCCL raft-distributed-exports)
rapids_export_package(INSTALL NCCL raft-distributed-exports)
target_link_libraries(raft_distributed INTERFACE ucx::ucp NCCL::NCCL)
# ##################################################################################################
# * install targets-----------------------------------------------------------
rapids_cmake_install_lib_dir(lib_dir)
include(GNUInstallDirs)
include(CPack)
install(
TARGETS raft
DESTINATION ${lib_dir}
COMPONENT raft
EXPORT raft-exports
)
install(
TARGETS raft_compiled raft_compiled_static
DESTINATION ${lib_dir}
COMPONENT raft
EXPORT raft-compiled-exports
)
if(TARGET raft_lib)
install(
TARGETS raft_lib
DESTINATION ${lib_dir}
COMPONENT compiled
EXPORT raft-compiled-lib-exports
)
install(
TARGETS raft_lib_static
DESTINATION ${lib_dir}
COMPONENT compiled-static
EXPORT raft-compiled-static-lib-exports
)
install(
DIRECTORY include/raft_runtime
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
COMPONENT compiled
)
endif()
install(
TARGETS raft_distributed
DESTINATION ${lib_dir}
COMPONENT distributed
EXPORT raft-distributed-exports
)
install(
DIRECTORY include/raft
COMPONENT raft
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
)
# Temporary install of raft.hpp while the file is removed
install(
FILES include/raft.hpp
COMPONENT raft
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/raft
)
install(
FILES ${CMAKE_CURRENT_BINARY_DIR}/include/raft/version_config.hpp
COMPONENT raft
DESTINATION include/raft
)
# ##################################################################################################
# * install export -----------------------------------------------------------
set(doc_string
[=[
Provide targets for the RAFT: Reusable Accelerated Functions and Tools
RAFT contains fundamental widely-used algorithms and primitives
for data science and machine learning.
Optional Components:
- compiled
- compiled_static
- distributed
Imported Targets:
- raft::raft
- raft::compiled brought in by the `compiled` optional component
- raft::compiled_static brought in by the `compiled_static` optional component
- raft::distributed brought in by the `distributed` optional component
]=]
)
set(code_string ${nvtx_export_string})
string(
APPEND
code_string
[=[
if(NOT TARGET raft::Thrust)
thrust_create_target(raft::Thrust FROM_OPTIONS)
endif()
]=]
)
string(
APPEND
code_string
[=[
if(compiled IN_LIST raft_FIND_COMPONENTS)
enable_language(CUDA)
endif()
]=]
)
set(raft_components compiled distributed)
set(raft_export_sets raft-compiled-exports raft-distributed-exports)
if(TARGET raft_lib)
list(APPEND raft_components compiled compiled-static)
list(APPEND raft_export_sets raft-compiled-lib-exports raft-compiled-static-lib-exports)
endif()
string(
APPEND
code_string
[=[
option(RAFT_ENABLE_CUSOLVER_DEPENDENCY "Enable cusolver dependency" ON)
option(RAFT_ENABLE_CUBLAS_DEPENDENCY "Enable cublas dependency" ON)
option(RAFT_ENABLE_CURAND_DEPENDENCY "Enable curand dependency" ON)
option(RAFT_ENABLE_CUSPARSE_DEPENDENCY "Enable cusparse dependency" ON)
mark_as_advanced(RAFT_ENABLE_CUSOLVER_DEPENDENCY)
mark_as_advanced(RAFT_ENABLE_CUBLAS_DEPENDENCY)
mark_as_advanced(RAFT_ENABLE_CURAND_DEPENDENCY)
mark_as_advanced(RAFT_ENABLE_CUSPARSE_DEPENDENCY)
target_link_libraries(raft::raft INTERFACE
$<$<BOOL:${RAFT_ENABLE_CUSOLVER_DEPENDENCY}>:${RAFT_CUSOLVER_DEPENDENCY}>
$<$<BOOL:${RAFT_ENABLE_CUBLAS_DEPENDENCY}>:${RAFT_CUBLAS_DEPENDENCY}>
$<$<BOOL:${RAFT_ENABLE_CUSPARSE_DEPENDENCY}>:${RAFT_CUSPARSE_DEPENDENCY}>
$<$<BOOL:${RAFT_ENABLE_CURAND_DEPENDENCY}>:${RAFT_CURAND_DEPENDENCY}>
)
]=]
)
# Use `rapids_export` for 22.04 as it will have COMPONENT support
rapids_export(
INSTALL raft
EXPORT_SET raft-exports
COMPONENTS ${raft_components}
COMPONENTS_EXPORT_SET ${raft_export_sets}
GLOBAL_TARGETS raft compiled distributed
NAMESPACE raft::
DOCUMENTATION doc_string
FINAL_CODE_BLOCK code_string
)
# ##################################################################################################
# * build export -------------------------------------------------------------
rapids_export(
BUILD raft
EXPORT_SET raft-exports
COMPONENTS ${raft_components}
COMPONENTS_EXPORT_SET ${raft_export_sets}
GLOBAL_TARGETS raft compiled distributed
DOCUMENTATION doc_string
NAMESPACE raft::
FINAL_CODE_BLOCK code_string
)
# ##################################################################################################
# * shared test/bench headers ------------------------------------------------
if(BUILD_TESTS OR BUILD_PRIMS_BENCH)
include(internal/CMakeLists.txt)
endif()
# ##################################################################################################
# * build test executable ----------------------------------------------------
if(BUILD_TESTS)
include(test/CMakeLists.txt)
endif()
# ##################################################################################################
# * build benchmark executable -----------------------------------------------
if(BUILD_PRIMS_BENCH)
include(bench/prims/CMakeLists.txt)
endif()
# ##################################################################################################
# * build ann benchmark executable -----------------------------------------------
if(BUILD_ANN_BENCH)
include(bench/ann/CMakeLists.txt)
endif()
| 0 |
rapidsai_public_repos/raft | rapidsai_public_repos/raft/cpp/.clang-tidy | ---
# Refer to the following link for the explanation of each params:
# https://releases.llvm.org/8.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/list.html
Checks: 'clang-diagnostic-*,clang-analyzer-*,modernize-*,-modernize-make-*,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming,-*,modernize-*,-modernize-make-*,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
WarningsAsErrors: ''
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
FormatStyle: none
User: snanditale
CheckOptions:
- key: google-build-namespaces.HeaderFileExtensions
value: ',h,hh,hpp,hxx'
- key: google-global-names-in-headers.HeaderFileExtensions
value: ',h,hh,hpp,hxx'
- key: google-readability-braces-around-statements.ShortStatementLines
value: '1'
- key: google-readability-function-size.BranchThreshold
value: '4294967295'
- key: google-readability-function-size.LineThreshold
value: '4294967295'
- key: google-readability-function-size.NestingThreshold
value: '4294967295'
- key: google-readability-function-size.ParameterThreshold
value: '4294967295'
- key: google-readability-function-size.StatementThreshold
value: '800'
- key: google-readability-function-size.VariableThreshold
value: '4294967295'
- key: google-readability-namespace-comments.ShortNamespaceLines
value: '10'
- key: google-readability-namespace-comments.SpacesBeforeComments
value: '2'
- key: google-runtime-int.SignedTypePrefix
value: int
- key: google-runtime-int.TypeSuffix
value: ''
- key: google-runtime-int.UnsignedTypePrefix
value: uint
- key: google-runtime-references.WhiteListTypes
value: ''
- key: modernize-loop-convert.MaxCopySize
value: '16'
- key: modernize-loop-convert.MinConfidence
value: reasonable
- key: modernize-loop-convert.NamingStyle
value: CamelCase
- key: modernize-pass-by-value.IncludeStyle
value: llvm
- key: modernize-pass-by-value.ValuesOnly
value: '0'
- key: modernize-replace-auto-ptr.IncludeStyle
value: llvm
- key: modernize-replace-random-shuffle.IncludeStyle
value: llvm
- key: modernize-use-auto.MinTypeNameLength
value: '5'
- key: modernize-use-auto.RemoveStars
value: '0'
- key: modernize-use-default-member-init.IgnoreMacros
value: '1'
- key: modernize-use-default-member-init.UseAssignment
value: '0'
- key: modernize-use-emplace.ContainersWithPushBack
value: '::std::vector;::std::list;::std::deque'
- key: modernize-use-emplace.SmartPointers
value: '::std::shared_ptr;::std::unique_ptr;::std::auto_ptr;::std::weak_ptr'
- key: modernize-use-emplace.TupleMakeFunctions
value: '::std::make_pair;::std::make_tuple'
- key: modernize-use-emplace.TupleTypes
value: '::std::pair;::std::tuple'
- key: modernize-use-equals-default.IgnoreMacros
value: '1'
- key: modernize-use-noexcept.ReplacementString
value: ''
- key: modernize-use-noexcept.UseNoexceptFalse
value: '1'
- key: modernize-use-nullptr.NullMacros
value: 'NULL'
- key: modernize-use-transparent-functors.SafeMode
value: '0'
- key: modernize-use-using.IgnoreMacros
value: '1'
- key: readability-identifier-naming.AbstractClassCase
value: lower_case
- key: readability-identifier-naming.AbstractClassPrefix
value: ''
- key: readability-identifier-naming.AbstractClassSuffix
value: ''
- key: readability-identifier-naming.ClassCase
value: lower_case
- key: readability-identifier-naming.ClassPrefix
value: ''
- key: readability-identifier-naming.ClassSuffix
value: ''
- key: readability-identifier-naming.ClassConstantCase
value: CamelCase
- key: readability-identifier-naming.ClassConstantPrefix
value: 'k'
- key: readability-identifier-naming.ClassConstantSuffix
value: ''
- key: readability-identifier-naming.ClassMemberCase
value: lower_case
- key: readability-identifier-naming.ClassMemberPrefix
value: ''
- key: readability-identifier-naming.ClassMemberSuffix
value: '_'
- key: readability-identifier-naming.ClassMethodCase
value: lower_case
- key: readability-identifier-naming.ClassMethodPrefix
value: ''
- key: readability-identifier-naming.ClassMethodSuffix
value: ''
- key: readability-identifier-naming.ConstexprFunctionCase
value: lower_case
- key: readability-identifier-naming.ConstexprFunctionPrefix
value: ''
- key: readability-identifier-naming.ConstexprFunctionSuffix
value: ''
- key: readability-identifier-naming.ConstexprMethodCase
value: lower_case
- key: readability-identifier-naming.ConstexprMethodPrefix
value: ''
- key: readability-identifier-naming.ConstexprMethodSuffix
value: ''
- key: readability-identifier-naming.ConstexprVariableCase
value: CamelCase
- key: readability-identifier-naming.ConstexprVariablePrefix
value: 'k'
- key: readability-identifier-naming.ConstexprVariableSuffix
value: ''
- key: readability-identifier-naming.EnumCase
value: CamelCase
- key: readability-identifier-naming.EnumPrefix
value: ''
- key: readability-identifier-naming.EnumSuffix
value: ''
- key: readability-identifier-naming.EnumConstantCase
value: CamelCase
- key: readability-identifier-naming.EnumConstantPrefix
value: 'k'
- key: readability-identifier-naming.EnumConstantSuffix
value: ''
- key: readability-identifier-naming.FunctionCase
value: lower_case
- key: readability-identifier-naming.FunctionPrefix
value: ''
- key: readability-identifier-naming.FunctionSuffix
value: ''
- key: readability-identifier-naming.GlobalConstantCase
value: CamelCase
- key: readability-identifier-naming.GlobalConstantPrefix
value: 'k'
- key: readability-identifier-naming.GlobalConstantSuffix
value: ''
- key: readability-identifier-naming.IgnoreFailedSplit
value: '0'
- key: readability-identifier-naming.LocalVariableCase
value: 'lower_case'
- key: readability-identifier-naming.LocalVariablePrefix
value: ''
- key: readability-identifier-naming.LocalVariableSuffix
value: ''
- key: readability-identifier-naming.ConstExprVariableCase
value: 'CamelCase'
- key: readability-identifier-naming.ConstExprVariablePrefix
value: 'k'
- key: readability-identifier-naming.ConstExprVariableSuffix
value: ''
- key: readability-identifier-naming.MemberCase
value: lower_case
- key: readability-identifier-naming.MemberPrefix
value: ''
- key: readability-identifier-naming.MemberSuffix
value: ''
- key: readability-identifier-naming.NamespaceCase
value: lower_case
- key: readability-identifier-naming.NamespacePrefix
value: ''
- key: readability-identifier-naming.NamespaceSuffix
value: ''
- key: readability-identifier-naming.PrivateMemberCase
value: lower_case
- key: readability-identifier-naming.PrivateMemberPrefix
value: ''
- key: readability-identifier-naming.PrivateMemberSuffix
value: '_'
- key: readability-identifier-naming.ProtectedMemberCase
value: lower_case
- key: readability-identifier-naming.ProtectedMemberPrefix
value: ''
- key: readability-identifier-naming.ProtectedMemberSuffix
value: '_'
- key: readability-identifier-naming.StaticConstantCase
value: CamelCase
- key: readability-identifier-naming.StaticConstantPrefix
value: 'k'
- key: readability-identifier-naming.StaticConstantSuffix
value: ''
- key: readability-identifier-naming.StructCase
value: lower_case
- key: readability-identifier-naming.StructPrefix
value: ''
- key: readability-identifier-naming.StructSuffix
value: ''
- key: readability-identifier-naming.TypeAliasCase
value: lower_case
- key: readability-identifier-naming.TypeAliasPrefix
value: ''
- key: readability-identifier-naming.TypeAliasSuffix
value: ''
- key: readability-identifier-naming.TypeTemplateParameterCase
value: CamelCase
- key: readability-identifier-naming.TypeTemplateParameterPrefix
value: ''
- key: readability-identifier-naming.TypeTemplateParameterSuffix
value: ''
- key: readability-identifier-naming.TypedefCase
value: lower_case
- key: readability-identifier-naming.TypedefPrefix
value: ''
- key: readability-identifier-naming.TypedefSuffix
value: ''
- key: readability-identifier-naming.VariableCase
value: lower_case
- key: readability-identifier-naming.VariablePrefix
value: ''
- key: readability-identifier-naming.VariableSuffix
value: ''
...
| 0 |
rapidsai_public_repos/raft | rapidsai_public_repos/raft/cpp/.clang-format | ---
# Refer to the following link for the explanation of each params:
# http://releases.llvm.org/8.0.0/tools/clang/docs/ClangFormatStyleOptions.html
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: true
AlignConsecutiveBitFields: true
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortEnumsOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
AllowShortLambdasOnASingleLine: true
AllowShortLoopsOnASingleLine: false
# This is deprecated
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
# disabling the below splits, else, they'll just add to the vertical length of source files!
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
BreakAfterJavaFieldAnnotations: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: WebKit
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakInheritanceList: BeforeColon
BreakStringLiterals: true
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
# Kept the below 2 to be the same as `IndentWidth` to keep everything uniform
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
# Enabling comment reflow causes doxygen comments to be messed up in their formats!
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceBeforeSquareBrackets: false
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++17
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
# Be consistent with indent-width, even for people who use tab for indentation!
TabWidth: 2
UseTab: Never
| 0 |
rapidsai_public_repos/raft/cpp | rapidsai_public_repos/raft/cpp/include/raft.hpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in a future release.
*/
#include "raft/core/device_mdarray.hpp"
#include "raft/core/device_mdspan.hpp"
#include "raft/core/device_span.hpp"
#include "raft/core/handle.hpp"
#include <string>
namespace raft {
/* Function for testing RAFT include
*
* @return message indicating RAFT has been included successfully*/
inline std::string test_raft()
{
std::string status = "RAFT Setup successfully";
return status;
}
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft_runtime | rapidsai_public_repos/raft/cpp/include/raft_runtime/random/rmat_rectangular_generator.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <raft/core/resources.hpp>
#include <raft/random/rng_state.hpp>
namespace raft::runtime::random {
/**
* @defgroup rmat_runtime RMAT Runtime API
* @{
*/
#define FUNC_DECL(IdxT, ProbT) \
void rmat_rectangular_gen(raft::resources const& handle, \
IdxT* out, \
IdxT* out_src, \
IdxT* out_dst, \
const ProbT* theta, \
IdxT r_scale, \
IdxT c_scale, \
IdxT n_edges, \
raft::random::RngState& r)
FUNC_DECL(int, float);
FUNC_DECL(int64_t, float);
FUNC_DECL(int, double);
FUNC_DECL(int64_t, double);
#undef FUNC_DECL
/** @} */ // end group rmat_runtime
} // namespace raft::runtime::random
| 0 |
rapidsai_public_repos/raft/cpp/include/raft_runtime | rapidsai_public_repos/raft/cpp/include/raft_runtime/distance/pairwise_distance.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/distance/distance_types.hpp>
namespace raft::runtime::distance {
/**
* @defgroup pairwise_distance_runtime Pairwise Distances Runtime API
* @{
*/
void pairwise_distance(raft::resources const& handle,
float* x,
float* y,
float* dists,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor,
float metric_arg);
void pairwise_distance(raft::resources const& handle,
double* x,
double* y,
double* dists,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor,
float metric_arg);
/** @} */ // end group pairwise_distance_runtime
} // namespace raft::runtime::distance
| 0 |
rapidsai_public_repos/raft/cpp/include/raft_runtime | rapidsai_public_repos/raft/cpp/include/raft_runtime/distance/fused_l2_nn.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
namespace raft::runtime::distance {
/**
* @defgroup fused_l2_nn_min_arg_runtime Fused L2 1NN Runtime API
* @{
*/
/**
* @brief Wrapper around fusedL2NN with minimum reduction operators.
*
* fusedL2NN cannot be compiled in the distance library due to the lambda
* operators, so this wrapper covers the most common case (minimum).
*
* @param[in] handle raft handle
* @param[out] min will contain the reduced output (Length = `m`)
* (on device)
* @param[in] x first matrix. Row major. Dim = `m x k`.
* (on device).
* @param[in] y second matrix. Row major. Dim = `n x k`.
* (on device).
* @param[in] m gemm m
* @param[in] n gemm n
* @param[in] k gemm k
* @param[in] sqrt Whether the output `minDist` should contain L2-sqrt
*/
void fused_l2_nn_min_arg(raft::resources const& handle,
int* min,
const float* x,
const float* y,
int m,
int n,
int k,
bool sqrt);
void fused_l2_nn_min_arg(raft::resources const& handle,
int* min,
const double* x,
const double* y,
int m,
int n,
int k,
bool sqrt);
/** @} */ // end group fused_l2_nn_min_arg_runtime
} // end namespace raft::runtime::distance
| 0 |
rapidsai_public_repos/raft/cpp/include/raft_runtime | rapidsai_public_repos/raft/cpp/include/raft_runtime/neighbors/ivf_pq.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/neighbors/ivf_pq_types.hpp>
namespace raft::runtime::neighbors::ivf_pq {
// We define overloads for build and extend with void return type. This is used in the Cython
// wrappers, where exception handling is not compatible with return type that has nontrivial
// constructor.
#define RAFT_DECL_BUILD_EXTEND(T, IdxT) \
[[nodiscard]] raft::neighbors::ivf_pq::index<IdxT> build( \
raft::resources const& handle, \
const raft::neighbors::ivf_pq::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset); \
\
void build(raft::resources const& handle, \
const raft::neighbors::ivf_pq::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset, \
raft::neighbors::ivf_pq::index<IdxT>* idx); \
\
[[nodiscard]] raft::neighbors::ivf_pq::index<IdxT> extend( \
raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
const raft::neighbors::ivf_pq::index<IdxT>& idx); \
\
void extend(raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
raft::neighbors::ivf_pq::index<IdxT>* idx);
RAFT_DECL_BUILD_EXTEND(float, int64_t);
RAFT_DECL_BUILD_EXTEND(int8_t, int64_t);
RAFT_DECL_BUILD_EXTEND(uint8_t, int64_t);
#undef RAFT_DECL_BUILD_EXTEND
#define RAFT_DECL_SEARCH(T, IdxT) \
void search(raft::resources const& handle, \
const raft::neighbors::ivf_pq::search_params& params, \
const raft::neighbors::ivf_pq::index<IdxT>& idx, \
raft::device_matrix_view<const T, IdxT, row_major> queries, \
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors, \
raft::device_matrix_view<float, IdxT, row_major> distances);
RAFT_DECL_SEARCH(float, int64_t);
RAFT_DECL_SEARCH(int8_t, int64_t);
RAFT_DECL_SEARCH(uint8_t, int64_t);
#undef RAFT_DECL_SEARCH
/**
* Save the index to file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @param[in] handle the raft handle
* @param[in] filename the filename for saving the index
* @param[in] index IVF-PQ index
*
*/
void serialize(raft::resources const& handle,
const std::string& filename,
const raft::neighbors::ivf_pq::index<int64_t>& index);
/**
* Load index from file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @param[in] handle the raft handle
* @param[in] filename the name of the file that stores the index
* @param[in] index IVF-PQ index
*
*/
void deserialize(raft::resources const& handle,
const std::string& filename,
raft::neighbors::ivf_pq::index<int64_t>* index);
} // namespace raft::runtime::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/include/raft_runtime | rapidsai_public_repos/raft/cpp/include/raft_runtime/neighbors/ivf_flat.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/neighbors/ivf_flat_types.hpp>
#include <string>
namespace raft::runtime::neighbors::ivf_flat {
// We define overloads for build and extend with void return type. This is used in the Cython
// wrappers, where exception handling is not compatible with return type that has nontrivial
// constructor.
#define RAFT_INST_BUILD_EXTEND(T, IdxT) \
auto build(raft::resources const& handle, \
const raft::neighbors::ivf_flat::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset) \
->raft::neighbors::ivf_flat::index<T, IdxT>; \
\
auto extend(raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
const raft::neighbors::ivf_flat::index<T, IdxT>& orig_index) \
->raft::neighbors::ivf_flat::index<T, IdxT>; \
\
void build(raft::resources const& handle, \
const raft::neighbors::ivf_flat::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset, \
raft::neighbors::ivf_flat::index<T, IdxT>& idx); \
\
void extend(raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
raft::neighbors::ivf_flat::index<T, IdxT>* idx); \
\
void serialize_file(raft::resources const& handle, \
const std::string& filename, \
const raft::neighbors::ivf_flat::index<T, IdxT>& index); \
\
void deserialize_file(raft::resources const& handle, \
const std::string& filename, \
raft::neighbors::ivf_flat::index<T, IdxT>* index); \
void serialize(raft::resources const& handle, \
std::string& str, \
const raft::neighbors::ivf_flat::index<T, IdxT>& index); \
void deserialize(raft::resources const& handle, \
const std::string& str, \
raft::neighbors::ivf_flat::index<T, IdxT>*);
RAFT_INST_BUILD_EXTEND(float, int64_t)
RAFT_INST_BUILD_EXTEND(int8_t, int64_t)
RAFT_INST_BUILD_EXTEND(uint8_t, int64_t)
#undef RAFT_INST_BUILD_EXTEND
#define RAFT_INST_SEARCH(T, IdxT) \
void search(raft::resources const&, \
raft::neighbors::ivf_flat::search_params const&, \
raft::neighbors::ivf_flat::index<T, IdxT> const&, \
raft::device_matrix_view<const T, IdxT, row_major>, \
raft::device_matrix_view<IdxT, IdxT, row_major>, \
raft::device_matrix_view<float, IdxT, row_major>);
RAFT_INST_SEARCH(float, int64_t);
RAFT_INST_SEARCH(int8_t, int64_t);
RAFT_INST_SEARCH(uint8_t, int64_t);
#undef RAFT_INST_SEARCH
} // namespace raft::runtime::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/include/raft_runtime | rapidsai_public_repos/raft/cpp/include/raft_runtime/neighbors/refine.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
// #include <raft/core/host_mdspan.hpp>
namespace raft::runtime::neighbors {
#define RAFT_INST_REFINE(IDX_T, DATA_T) \
void refine(raft::resources const& handle, \
raft::device_matrix_view<const DATA_T, int64_t, row_major> dataset, \
raft::device_matrix_view<const DATA_T, int64_t, row_major> queries, \
raft::device_matrix_view<const IDX_T, int64_t, row_major> neighbor_candidates, \
raft::device_matrix_view<IDX_T, int64_t, row_major> indices, \
raft::device_matrix_view<float, int64_t, row_major> distances, \
distance::DistanceType metric); \
\
void refine(raft::resources const& handle, \
raft::host_matrix_view<const DATA_T, int64_t, row_major> dataset, \
raft::host_matrix_view<const DATA_T, int64_t, row_major> queries, \
raft::host_matrix_view<const IDX_T, int64_t, row_major> neighbor_candidates, \
raft::host_matrix_view<IDX_T, int64_t, row_major> indices, \
raft::host_matrix_view<float, int64_t, row_major> distances, \
distance::DistanceType metric);
RAFT_INST_REFINE(int64_t, float);
RAFT_INST_REFINE(int64_t, uint8_t);
RAFT_INST_REFINE(int64_t, int8_t);
#undef RAFT_INST_REFINE
} // namespace raft::runtime::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/include/raft_runtime | rapidsai_public_repos/raft/cpp/include/raft_runtime/neighbors/brute_force.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
namespace raft::runtime::neighbors::brute_force {
#define RAFT_INST_BFKNN(IDX_T, DATA_T, MATRIX_IDX_T, INDEX_LAYOUT, SEARCH_LAYOUT) \
void knn(raft::resources const& handle, \
raft::device_matrix_view<const DATA_T, MATRIX_IDX_T, INDEX_LAYOUT> index, \
raft::device_matrix_view<const DATA_T, MATRIX_IDX_T, SEARCH_LAYOUT> search, \
raft::device_matrix_view<IDX_T, MATRIX_IDX_T, row_major> indices, \
raft::device_matrix_view<DATA_T, MATRIX_IDX_T, row_major> distances, \
distance::DistanceType metric = distance::DistanceType::L2Unexpanded, \
std::optional<float> metric_arg = std::make_optional<float>(2.0f), \
std::optional<IDX_T> global_id_offset = std::nullopt);
RAFT_INST_BFKNN(int64_t, float, int64_t, raft::row_major, raft::row_major);
#undef RAFT_INST_BFKNN
} // namespace raft::runtime::neighbors::brute_force
| 0 |
rapidsai_public_repos/raft/cpp/include/raft_runtime | rapidsai_public_repos/raft/cpp/include/raft_runtime/neighbors/cagra.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/neighbors/cagra_types.hpp>
#include <raft/neighbors/ivf_pq_types.hpp>
#include <string>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_device_accessor.hpp>
#include <raft/core/mdspan.hpp>
namespace raft::runtime::neighbors::cagra {
// Using device and host_matrix_view avoids needing to typedef mutltiple mdspans based on accessors
#define RAFT_INST_CAGRA_FUNCS(T, IdxT) \
auto build(raft::resources const& handle, \
const raft::neighbors::cagra::index_params& params, \
raft::device_matrix_view<const T, int64_t, row_major> dataset) \
->raft::neighbors::cagra::index<T, IdxT>; \
\
auto build(raft::resources const& handle, \
const raft::neighbors::cagra::index_params& params, \
raft::host_matrix_view<const T, int64_t, row_major> dataset) \
->raft::neighbors::cagra::index<T, IdxT>; \
\
void build_device(raft::resources const& handle, \
const raft::neighbors::cagra::index_params& params, \
raft::device_matrix_view<const T, int64_t, row_major> dataset, \
raft::neighbors::cagra::index<T, IdxT>& idx); \
\
void build_host(raft::resources const& handle, \
const raft::neighbors::cagra::index_params& params, \
raft::host_matrix_view<const T, int64_t, row_major> dataset, \
raft::neighbors::cagra::index<T, IdxT>& idx); \
\
void search(raft::resources const& handle, \
raft::neighbors::cagra::search_params const& params, \
const raft::neighbors::cagra::index<T, IdxT>& index, \
raft::device_matrix_view<const T, int64_t, row_major> queries, \
raft::device_matrix_view<IdxT, int64_t, row_major> neighbors, \
raft::device_matrix_view<float, int64_t, row_major> distances); \
void serialize_file(raft::resources const& handle, \
const std::string& filename, \
const raft::neighbors::cagra::index<T, IdxT>& index, \
bool include_dataset = true); \
\
void deserialize_file(raft::resources const& handle, \
const std::string& filename, \
raft::neighbors::cagra::index<T, IdxT>* index); \
void serialize(raft::resources const& handle, \
std::string& str, \
const raft::neighbors::cagra::index<T, IdxT>& index, \
bool include_dataset = true); \
\
void deserialize(raft::resources const& handle, \
const std::string& str, \
raft::neighbors::cagra::index<T, IdxT>* index);
RAFT_INST_CAGRA_FUNCS(float, uint32_t);
RAFT_INST_CAGRA_FUNCS(int8_t, uint32_t);
RAFT_INST_CAGRA_FUNCS(uint8_t, uint32_t);
#undef RAFT_INST_CAGRA_FUNCS
#define RAFT_INST_CAGRA_OPTIMIZE(IdxT) \
void optimize_device(raft::resources const& res, \
raft::device_matrix_view<IdxT, int64_t, row_major> knn_graph, \
raft::host_matrix_view<IdxT, int64_t, row_major> new_graph); \
\
void optimize_host(raft::resources const& res, \
raft::host_matrix_view<IdxT, int64_t, row_major> knn_graph, \
raft::host_matrix_view<IdxT, int64_t, row_major> new_graph);
RAFT_INST_CAGRA_OPTIMIZE(uint32_t);
#undef RAFT_INST_CAGRA_OPTIMIZE
} // namespace raft::runtime::neighbors::cagra
| 0 |
rapidsai_public_repos/raft/cpp/include/raft_runtime | rapidsai_public_repos/raft/cpp/include/raft_runtime/matrix/select_k.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <optional>
namespace raft::runtime::matrix {
void select_k(const resources& handle,
raft::device_matrix_view<const float, int64_t, row_major> in_val,
std::optional<raft::device_matrix_view<const int64_t, int64_t, row_major>> in_idx,
raft::device_matrix_view<float, int64_t, row_major> out_val,
raft::device_matrix_view<int64_t, int64_t, row_major> out_idx,
bool select_min);
} // namespace raft::runtime::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft_runtime | rapidsai_public_repos/raft/cpp/include/raft_runtime/cluster/kmeans.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/cluster/kmeans_types.hpp>
namespace raft::runtime::cluster::kmeans {
/**
* @defgroup kmeans_runtime Kmeans Runtime API
* @{
*/
void update_centroids(raft::resources const& handle,
const float* X,
int n_samples,
int n_features,
int n_clusters,
const float* sample_weights,
const float* centroids,
const int* labels,
float* new_centroids,
float* weight_per_cluster);
void update_centroids(raft::resources const& handle,
const double* X,
int n_samples,
int n_features,
int n_clusters,
const double* sample_weights,
const double* centroids,
const int* labels,
double* new_centroids,
double* weight_per_cluster);
void fit(raft::resources const& handle,
const raft::cluster::kmeans::KMeansParams& params,
raft::device_matrix_view<const float, int, row_major> X,
std::optional<raft::device_vector_view<const float, int>> sample_weight,
raft::device_matrix_view<float, int, row_major> centroids,
raft::host_scalar_view<float, int> inertia,
raft::host_scalar_view<int, int> n_iter);
void fit(raft::resources const& handle,
const raft::cluster::kmeans::KMeansParams& params,
raft::device_matrix_view<const double, int, row_major> X,
std::optional<raft::device_vector_view<const double, int>> sample_weight,
raft::device_matrix_view<double, int, row_major> centroids,
raft::host_scalar_view<double, int> inertia,
raft::host_scalar_view<int, int> n_iter);
void init_plus_plus(raft::resources const& handle,
const raft::cluster::kmeans::KMeansParams& params,
raft::device_matrix_view<const float, int, row_major> X,
raft::device_matrix_view<float, int, row_major> centroids);
void init_plus_plus(raft::resources const& handle,
const raft::cluster::kmeans::KMeansParams& params,
raft::device_matrix_view<const double, int, row_major> X,
raft::device_matrix_view<double, int, row_major> centroids);
void cluster_cost(raft::resources const& handle,
const float* X,
int n_samples,
int n_features,
int n_clusters,
const float* centroids,
float* cost);
void cluster_cost(raft::resources const& handle,
const double* X,
int n_samples,
int n_features,
int n_clusters,
const double* centroids,
double* cost);
/** @} */ // end group kmeans_runtime
} // namespace raft::runtime::cluster::kmeans
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/seive.hpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <vector>
// Taken from:
// https://github.com/teju85/programming/blob/master/euler/include/seive.h
namespace raft {
namespace common {
/**
* @brief Implementation of 'Seive of Eratosthenes'
*/
class Seive {
public:
/**
* @param _num number of integers for which seive is needed
*/
Seive(unsigned _num)
{
N = _num;
generateSeive();
}
/**
* @brief Check whether a number is prime or not
* @param num number to be checked
* @return true if the 'num' is prime, else false
*/
bool isPrime(unsigned num) const
{
unsigned mask, pos;
if (num <= 1) { return false; }
if (num == 2) { return true; }
if (!(num & 1)) { return false; }
getMaskPos(num, mask, pos);
return (seive[pos] & mask);
}
private:
void generateSeive()
{
auto sqN = fastIntSqrt(N);
auto size = raft::ceildiv<unsigned>(N, sizeof(unsigned) * 8);
seive.resize(size);
// assume all to be primes initially
for (auto& itr : seive) {
itr = 0xffffffffu;
}
unsigned cid = 0;
unsigned cnum = getNum(cid);
while (cnum <= sqN) {
do {
++cid;
cnum = getNum(cid);
if (isPrime(cnum)) { break; }
} while (cnum <= sqN);
auto cnum2 = cnum << 1;
// 'unmark' all the 'odd' multiples of the current prime
for (unsigned i = 3, num = i * cnum; num <= N; i += 2, num += cnum2) {
unmark(num);
}
}
}
unsigned getId(unsigned num) const { return (num >> 1); }
unsigned getNum(unsigned id) const
{
if (id == 0) { return 2; }
return ((id << 1) + 1);
}
void getMaskPos(unsigned num, unsigned& mask, unsigned& pos) const
{
pos = getId(num);
mask = 1 << (pos & 0x1f);
pos >>= 5;
}
void unmark(unsigned num)
{
unsigned mask, pos;
getMaskPos(num, mask, pos);
seive[pos] &= ~mask;
}
// REF: http://www.azillionmonkeys.com/qed/ulerysqroot.pdf
unsigned fastIntSqrt(unsigned val)
{
unsigned g = 0;
auto bshft = 15u, b = 1u << bshft;
do {
unsigned temp = ((g << 1) + b) << bshft--;
if (val >= temp) {
g += b;
val -= temp;
}
} while (b >>= 1);
return g;
}
/** find all primes till this number */
unsigned N;
/** the seive */
std::vector<unsigned> seive;
};
}; // namespace common
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/fast_int_div.cuh | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <stdint.h>
namespace raft::util {
/**
* @brief Perform fast integer division and modulo using a known divisor
* From Hacker's Delight, Second Edition, Chapter 10
*
* @note This currently only supports 32b signed integers
* @todo Extend support for signed divisors
*/
struct FastIntDiv {
/**
* @defgroup HostMethods Ctor's that are accessible only from host
* @{
* @brief Host-only ctor's
* @param _d the divisor
*/
FastIntDiv(int _d) : d(_d) { computeScalars(); }
FastIntDiv& operator=(int _d)
{
d = _d;
computeScalars();
return *this;
}
/** @} */
/**
* @defgroup DeviceMethods Ctor's which even the device-side can access
* @{
* @brief host and device ctor's
* @param other source object to be copied from
*/
HDI FastIntDiv(const FastIntDiv& other) : d(other.d), m(other.m), p(other.p) {}
HDI FastIntDiv& operator=(const FastIntDiv& other)
{
d = other.d;
m = other.m;
p = other.p;
return *this;
}
/** @} */
/** divisor */
int d;
/** the term 'm' as found in the reference chapter */
unsigned m;
/** the term 'p' as found in the reference chapter */
int p;
private:
void computeScalars()
{
if (d == 1) {
m = 0;
p = 1;
return;
} else if (d < 0) {
ASSERT(false, "FastIntDiv: division by negative numbers not supported!");
} else if (d == 0) {
ASSERT(false, "FastIntDiv: got division by zero!");
}
int64_t nc = ((1LL << 31) / d) * d - 1;
p = 31;
int64_t twoP, rhs;
do {
++p;
twoP = 1LL << p;
rhs = nc * (d - twoP % d);
} while (twoP <= rhs);
m = (twoP + d - twoP % d) / d;
}
}; // struct FastIntDiv
/**
* @brief Division overload, so that FastIntDiv can be transparently switched
* to even on device
* @param n numerator
* @param divisor the denominator
* @return the quotient
*/
HDI int operator/(int n, const FastIntDiv& divisor)
{
if (divisor.d == 1) return n;
int ret = (int64_t(divisor.m) * int64_t(n)) >> divisor.p;
if (n < 0) ++ret;
return ret;
}
/**
* @brief Modulo overload, so that FastIntDiv can be transparently switched
* to even on device
* @param n numerator
* @param divisor the denominator
* @return the remainder
*/
HDI int operator%(int n, const FastIntDiv& divisor)
{
int quotient = n / divisor;
int remainder = n - quotient * divisor.d;
return remainder;
}
}; // namespace raft::util
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/arch.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_rt_essentials.hpp> // RAFT_CUDA_TRY
namespace raft::util::arch {
/* raft::util::arch provides the following facilities:
*
* - raft::util::arch::SM_XX : hardcoded compile-time constants for various compute
* architectures. The values raft::util::arch::SM_min and raft::util::arch::SM_future
* represent architectures that are always smaller and larger (respectively)
* than any architecture that can be encountered in practice.
*
* - raft::util::arch::SM_compute_arch : a compile-time value for the *current*
* compute architecture that a kernel is compiled with. It can only be used
* inside kernels with a template argument.
*
* - raft::util::arch::kernel_virtual_arch : a function that computes at *run-time*
* which version of a kernel will launch (i.e., it will return the virtual compute
* architecture of the version of the kernel that it was compiled for which
* will be launched by the driver).
*
* - raft::util::arch::SM_range : a compile-time value to represent an open interval
* of compute architectures. This can be used to check if the current
* compile-time architecture is in a specified compatibility range.
*/
// detail::SM_generic is a template to create a generic compile-time SM
// architecture constant.
namespace detail {
template <int n>
struct SM_generic {
public:
__host__ __device__ constexpr int value() const { return n; }
};
} // namespace detail
// A list of architectures that RAPIDS explicitly builds for (SM60, ..., SM90)
// and SM_MIN and SM_FUTURE, that allow specifying an open interval of
// compatible compute architectures.
using SM_min = detail::SM_generic<350>;
using SM_60 = detail::SM_generic<600>;
using SM_70 = detail::SM_generic<700>;
using SM_75 = detail::SM_generic<750>;
using SM_80 = detail::SM_generic<800>;
using SM_86 = detail::SM_generic<860>;
using SM_90 = detail::SM_generic<900>;
using SM_future = detail::SM_generic<99999>;
// This is a type that uses the __CUDA_ARCH__ macro to obtain the compile-time
// compute architecture. It can only be used where __CUDA_ARCH__ is defined,
// i.e., inside a __global__ function template.
struct SM_compute_arch {
template <int dummy = 0>
__device__ constexpr int value() const
{
#ifdef __CUDA_ARCH__
return __CUDA_ARCH__;
#else
// This function should not be called in host code (because __CUDA_ARCH__ is
// not defined). This function is constexpr and thus can be called in host
// code (due to the --expt-relaxed-constexpr compile flag). We would like to
// provide an intelligible error message when this function is called in
// host code, which we do below.
//
// To make sure the static_assert only fires in host code, we use a dummy
// template parameter as described in P2593:
// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p2593r0.html
static_assert(dummy != 0,
"SM_compute_arch.value() is only callable from a __global__ function template. "
"A way to create a function template is by adding 'template <int dummy = 0>'.");
return -1;
#endif
}
};
// A runtime value for the actual compute architecture of a kernel.
//
// A single kernel can be compiled for several "virtual" compute architectures.
// When a program runs, the driver picks the version of the kernel that most
// closely matches the current hardware. This struct reflects the virtual
// compute architecture of the version of the kernel that the driver picks when
// the kernel runs.
struct SM_runtime {
friend SM_runtime kernel_virtual_arch(void*);
private:
const int _version;
SM_runtime(int version) : _version(version) {}
public:
__host__ __device__ int value() const { return _version; }
};
// Computes which virtual compute architecture the given kernel was compiled for,
// driver picks the version of the kernel that closely matches the current hardware.
//
// Semantics are described above in the documentation of SM_runtime.
//
// This function requires a pointer to the kernel that will run. Other methods
// to determine the architecture (that do not require a pointer) can be error
// prone. See:
// https://github.com/NVIDIA/cub/issues/545
inline SM_runtime kernel_virtual_arch(void* kernel)
{
cudaFuncAttributes attributes;
RAFT_CUDA_TRY(cudaFuncGetAttributes(&attributes, kernel));
return SM_runtime(10 * attributes.ptxVersion);
}
// SM_range represents a range of SM architectures. It can be used to
// conditionally compile a kernel.
template <typename SM_MIN, typename SM_MAX>
struct SM_range {
private:
const SM_MIN _min;
const SM_MAX _max;
public:
__host__ __device__ constexpr SM_range(SM_MIN min, SM_MAX max) : _min(min), _max(max) {}
__host__ __device__ constexpr SM_range() : _min(SM_MIN()), _max(SM_MAX()) {}
template <typename SM_t>
__host__ __device__ constexpr bool contains(SM_t current) const
{
return _min.value() <= current.value() && current.value() < _max.value();
}
};
} // namespace raft::util::arch
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/memory_pool-ext.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef> // size_t
#include <memory> // std::unique_ptr
#include <rmm/mr/device/device_memory_resource.hpp> // rmm::mr::device_memory_resource
namespace raft {
std::unique_ptr<rmm::mr::device_memory_resource> get_pool_memory_resource(
rmm::mr::device_memory_resource*& mr, size_t initial_size);
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/integer_utils.hpp | /*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Eyal Rozenberg <eyalroz@blazingdb.com>
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* Utility code involving integer arithmetic
*
*/
#include <limits>
#include <raft/core/detail/macros.hpp>
#include <stdexcept>
#include <type_traits>
namespace raft {
//! Utility functions
/**
* Finds the smallest integer not less than `number_to_round` and modulo `S` is
* zero. This function assumes that `number_to_round` is non-negative and
* `modulus` is positive.
*/
template <typename S>
constexpr inline S round_up_safe(S number_to_round, S modulus)
{
auto remainder = number_to_round % modulus;
if (remainder == 0) { return number_to_round; }
auto rounded_up = number_to_round - remainder + modulus;
if (rounded_up < number_to_round) {
throw std::invalid_argument("Attempt to round up beyond the type's maximum value");
}
return rounded_up;
}
/**
* Finds the largest integer not greater than `number_to_round` and modulo `S` is
* zero. This function assumes that `number_to_round` is non-negative and
* `modulus` is positive.
*/
template <typename S>
inline S round_down_safe(S number_to_round, S modulus)
{
auto remainder = number_to_round % modulus;
auto rounded_down = number_to_round - remainder;
return rounded_down;
}
/**
* Divides the left-hand-side by the right-hand-side, rounding up
* to an integral multiple of the right-hand-side, e.g. (9,5) -> 2 , (10,5) -> 2, (11,5) -> 3.
*
* @param dividend the number to divide
* @param divisor the number by which to divide
* @return The least integer multiple of divisor which is greater than or equal to
* the non-integral division dividend/divisor.
*
* @note sensitive to overflow, i.e. if dividend > std::numeric_limits<S>::max() - divisor,
* the result will be incorrect
*/
template <typename S, typename T>
constexpr inline S div_rounding_up_unsafe(const S& dividend, const T& divisor) noexcept
{
return (dividend + divisor - 1) / divisor;
}
namespace detail {
template <typename I>
constexpr inline I div_rounding_up_safe(std::integral_constant<bool, false>,
I dividend,
I divisor) noexcept
{
// TODO: This could probably be implemented faster
return (dividend > divisor) ? 1 + div_rounding_up_unsafe(dividend - divisor, divisor)
: (dividend > 0);
}
template <typename I>
constexpr inline I div_rounding_up_safe(std::integral_constant<bool, true>,
I dividend,
I divisor) noexcept
{
auto quotient = dividend / divisor;
auto remainder = dividend % divisor;
return quotient + (remainder != 0);
}
} // namespace detail
/**
* Divides the left-hand-side by the right-hand-side, rounding up
* to an integral multiple of the right-hand-side, e.g. (9,5) -> 2 , (10,5) -> 2, (11,5) -> 3.
*
* @param dividend the number to divide
* @param divisor the number of by which to divide
* @return The least integer multiple of divisor which is greater than or equal to
* the non-integral division dividend/divisor.
*
* @note will not overflow, and may _or may not_ be slower than the intuitive
* approach of using (dividend + divisor - 1) / divisor
*/
template <typename I>
constexpr inline auto div_rounding_up_safe(I dividend, I divisor) noexcept
-> std::enable_if_t<std::is_integral<I>::value, I>
{
using i_is_a_signed_type = std::integral_constant<bool, std::is_signed<I>::value>;
return detail::div_rounding_up_safe(i_is_a_signed_type{}, dividend, divisor);
}
template <typename I>
constexpr inline auto is_a_power_of_two(I val) noexcept
-> std::enable_if_t<std::is_integral<I>::value, bool>
{
return (val != 0) && (((val - 1) & val) == 0);
}
/**
* Given an integer `x`, return such `y` that `x <= y` and `is_a_power_of_two(y)`.
* If such `y` does not exist in `T`, return zero.
*/
template <typename T>
constexpr inline auto bound_by_power_of_two(T x) noexcept
-> std::enable_if_t<std::is_integral<T>::value, T>
{
if (is_a_power_of_two(x)) { return x; }
constexpr T kMaxUnsafe = std::numeric_limits<T>::max();
constexpr T kMaxSafe = is_a_power_of_two(kMaxUnsafe) ? kMaxUnsafe : (kMaxUnsafe >> 1);
const T limited = std::min(x, kMaxSafe);
T bound = T{1};
while (bound < limited) {
bound <<= 1;
}
return bound < x ? T{0} : bound;
}
/**
* @brief Return the absolute value of a number.
*
* This calls `std::abs()` which performs equivalent: `(value < 0) ? -value : value`.
*
* This was created to prevent compile errors calling `std::abs()` with unsigned integers.
* An example compile error appears as follows:
* @code{.pseudo}
* error: more than one instance of overloaded function "std::abs" matches the argument list:
* function "abs(int)"
* function "std::abs(long)"
* function "std::abs(long long)"
* function "std::abs(double)"
* function "std::abs(float)"
* function "std::abs(long double)"
* argument types are: (uint64_t)
* @endcode
*
* Not all cases could be if-ed out using std::is_signed<T>::value and satisfy the compiler.
*
* @param val Numeric value can be either integer or float type.
* @return Absolute value if value type is signed.
*/
template <typename T>
constexpr inline auto absolute_value(T val) -> std::enable_if_t<std::is_signed<T>::value, T>
{
return std::abs(val);
}
// Unsigned type just returns itself.
template <typename T>
constexpr inline auto absolute_value(T val) -> std::enable_if_t<!std::is_signed<T>::value, T>
{
return val;
}
/**
* @defgroup Check whether the numeric conversion is narrowing
*
* @tparam From source type
* @tparam To destination type
* @{
*/
template <typename From, typename To, typename = void>
struct is_narrowing : std::true_type {};
template <typename From, typename To>
struct is_narrowing<From, To, std::void_t<decltype(To{std::declval<From>()})>> : std::false_type {};
/** @} */
/** Check whether the numeric conversion is narrowing */
template <typename From, typename To>
inline constexpr bool is_narrowing_v = is_narrowing<From, To>::value; // NOLINT
/** Wide multiplication of two unsigned 64-bit integers */
_RAFT_HOST_DEVICE inline void wmul_64bit(uint64_t& res_hi, uint64_t& res_lo, uint64_t a, uint64_t b)
{
#ifdef __CUDA_ARCH__
asm("mul.hi.u64 %0, %1, %2;" : "=l"(res_hi) : "l"(a), "l"(b));
asm("mul.lo.u64 %0, %1, %2;" : "=l"(res_lo) : "l"(a), "l"(b));
#else
uint32_t a_hi, a_lo, b_hi, b_lo;
a_hi = uint32_t(a >> 32);
a_lo = uint32_t(a & uint64_t(0x00000000FFFFFFFF));
b_hi = uint32_t(b >> 32);
b_lo = uint32_t(b & uint64_t(0x00000000FFFFFFFF));
uint64_t t0 = uint64_t(a_lo) * uint64_t(b_lo);
uint64_t t1 = uint64_t(a_hi) * uint64_t(b_lo);
uint64_t t2 = uint64_t(a_lo) * uint64_t(b_hi);
uint64_t t3 = uint64_t(a_hi) * uint64_t(b_hi);
uint64_t carry = 0, trial = 0;
res_lo = t0;
trial = res_lo + (t1 << 32);
if (trial < res_lo) carry++;
res_lo = trial;
trial = res_lo + (t2 << 32);
if (trial < res_lo) carry++;
res_lo = trial;
// No need to worry about carry in this addition
res_hi = (t1 >> 32) + (t2 >> 32) + t3 + carry;
#endif
}
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/cache_util.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace cache {
/**
* @brief Collect vectors of data from the cache into a contiguous memory buffer.
*
* We assume contiguous memory layout for the output buffer, i.e. we get
* column vectors into a column major out buffer, or row vectors into a row
* major output buffer.
*
* On exit, the output array is filled the following way:
* out[i + n_vec*k] = cache[i + n_vec * cache_idx[k]]), where i=0..n_vec-1, and
* k = 0..n-1 where cache_idx[k] >= 0
*
* We ignore vectors where cache_idx[k] < 0.
*
* @param [in] cache stores the cached data, size [n_vec x n_cached_vectors]
* @param [in] n_vec number of elements in a cached vector
* @param [in] cache_idx cache indices, size [n]
* @param [in] n the number of elements that need to be collected
* @param [out] out vectors collected from the cache, size [n_vec * n]
*/
template <typename math_t, typename idx_t, typename int_t>
RAFT_KERNEL get_vecs(const math_t* cache, int_t n_vec, const idx_t* cache_idx, int_t n, math_t* out)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int row = tid % n_vec; // row idx
if (tid < n_vec * n) {
size_t out_col = tid / n_vec; // col idx
size_t cache_col = cache_idx[out_col];
if (!std::is_signed<idx_t>::value || cache_idx[out_col] >= 0) {
if (row + out_col * n_vec < (size_t)n_vec * n) { out[tid] = cache[row + cache_col * n_vec]; }
}
}
}
/**
* @brief Store vectors of data into the cache.
*
* Elements within a vector should be contiguous in memory (i.e. column vectors
* for column major data storage, or row vectors of row major data).
*
* If tile_idx==nullptr then the operation is the opposite of get_vecs,
* i.e. we store
* cache[i + cache_idx[k]*n_vec] = tile[i + k*n_vec], for i=0..n_vec-1, k=0..n-1
*
* If tile_idx != nullptr, then we permute the vectors from tile according
* to tile_idx. This allows to store vectors from a buffer where the individual
* vectors are not stored contiguously (but the elements of each vector shall
* be contiguous):
* cache[i + cache_idx[k]*n_vec] = tile[i + tile_idx[k]*n_vec],
* for i=0..n_vec-1, k=0..n-1
*
* @param [in] tile stores the data to be cashed cached, size [n_vec x n_tile]
* @param [in] n_tile number of vectors in the input tile
* @param [in] n_vec number of elements in a cached vector
* @param [in] tile_idx indices of vectors that we want to store
* @param [in] n number of vectos that we want to store (n <= n_tile)
* @param [in] cache_idx cache indices, size [n], negative values are ignored
* @param [inout] cache updated cache
* @param [in] n_cache_vecs
*/
template <typename math_t>
RAFT_KERNEL store_vecs(const math_t* tile,
int n_tile,
int n_vec,
const int* tile_idx,
int n,
const int* cache_idx,
math_t* cache,
int n_cache_vecs)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int row = tid % n_vec; // row idx
if (tid < n_vec * n) {
int tile_col = tid / n_vec; // col idx
int data_col = tile_idx ? tile_idx[tile_col] : tile_col;
int cache_col = cache_idx[tile_col];
// We ignore negative values. The rest of the checks should be fulfilled
// if the cache is used properly
if (cache_col >= 0 && cache_col < n_cache_vecs && data_col < n_tile) {
cache[row + (size_t)cache_col * n_vec] = tile[row + (size_t)data_col * n_vec];
}
}
}
/**
* @brief Map a key to a cache set.
*
* @param key key to be hashed
* @param n_cache_sets number of cache sets
* @return index of the cache set [0..n_cache_set)
*/
int DI hash(int key, int n_cache_sets) { return key % n_cache_sets; }
/**
* @brief Binary search to find the first element in the array which is greater
* equal than a given value.
* @param [in] array sorted array of n numbers
* @param [in] n length of the array
* @param [in] val the value to search for
* @return the index of the first element in the array for which
* array[idx] >= value. If there is no such value, then return n.
*/
int DI arg_first_ge(const int* array, int n, int val)
{
int start = 0;
int end = n - 1;
if (array[0] == val) return 0;
if (array[end] < val) return n;
while (start + 1 < end) {
int q = (start + end + 1) / 2;
// invariants:
// start < end
// start < q <=end
// array[start] < val && array[end] <=val
// at every iteration d = end-start is decreasing
// when d==0, then array[end] will be the first element >= val.
if (array[q] >= val) {
end = q;
} else {
start = q;
}
}
return end;
}
/**
* @brief Find the k-th occurrence of value in a sorted array.
*
* Assume that array is [0, 1, 1, 1, 2, 2, 4, 4, 4, 4, 6, 7]
* then find_nth_occurrence(cset, 12, 4, 2) == 7, because cset_array[7] stores
* the second element with value = 4.
* If there are less than k values in the array, then return -1
*
* @param [in] array sorted array of numbers, size [n]
* @param [in] n number of elements in the array
* @param [in] val the value we are searching for
* @param [in] k
* @return the idx of the k-th occurrence of val in array, or -1 if
* the value is not found.
*/
int DI find_nth_occurrence(const int* array, int n, int val, int k)
{
int q = arg_first_ge(array, n, val);
if (q + k < n && array[q + k] == val) {
q += k;
} else {
q = -1;
}
return q;
}
/**
* @brief Rank the entries in a cache set according to the time stamp, return
* the indices that would sort the time stamp in ascending order.
*
* Assume we have a single cache set with time stamps as:
* key (threadIdx.x): 0 1 2 3
* val (time stamp): 8 6 7 5
*
* The corresponding sorted key-value pairs:
* key: 3 1 2 0
* val: 5 6 7 8
* rank: 0th 1st 2nd 3rd
*
* On return, the rank is assigned for each thread:
* threadIdx.x: 0 1 2 3
* rank: 3 1 2 0
*
* For multiple cache sets, launch one block per cache set.
*
* @tparam nthreads number of threads per block (nthreads <= associativity)
* @tparam associativity number of items in a cache set
*
* @param [in] cache_time time stamp of caching the data,
size [associativity * n_cache_sets]
* @param [in] n_cache_sets number of cache sets
* @param [out] rank within the cache set size [nthreads * items_per_thread]
* Each block should give a different pointer for rank.
*/
template <int nthreads, int associativity>
DI void rank_set_entries(const int* cache_time, int n_cache_sets, int* rank)
{
const int items_per_thread = raft::ceildiv(associativity, nthreads);
typedef cub::BlockRadixSort<int, nthreads, items_per_thread, int> BlockRadixSort;
__shared__ typename BlockRadixSort::TempStorage temp_storage;
int key[items_per_thread];
int val[items_per_thread];
int block_offset = blockIdx.x * associativity;
for (int j = 0; j < items_per_thread; j++) {
int k = threadIdx.x + j * nthreads;
int t = (k < associativity) ? cache_time[block_offset + k] : 32768;
key[j] = t;
val[j] = k;
}
BlockRadixSort(temp_storage).Sort(key, val);
for (int j = 0; j < items_per_thread; j++) {
if (val[j] < associativity) { rank[val[j]] = threadIdx.x * items_per_thread + j; }
}
__syncthreads();
}
/**
* @brief Assign cache location to a set of keys using LRU replacement policy.
*
* The keys and the corresponding cache_set arrays shall be sorted according
* to cache_set in ascending order. One block should be launched for every cache
* set.
*
* Each cache set is sorted according to time_stamp, and values from keys
* are filled in starting at the oldest time stamp. Entries that were accessed
* at the current time are not reassigned.
*
* @tparam nthreads number of threads per block
* @tparam associativity number of keys in a cache set
*
* @param [in] keys that we want to cache size [n]
* @param [in] n number of keys
* @param [in] cache_set assigned to keys, size [n]
* @param [inout] cached_keys keys of already cached vectors,
* size [n_cache_sets*associativity], on exit it will be updated with the
* cached elements from keys.
* @param [in] n_cache_sets number of cache sets
* @param [inout] cache_time will be updated to "time" for those elements that
* could be assigned to a cache location, size [n_cache_sets*associativity]
* @param [in] time time stamp
* @param [out] cache_idx the cache idx assigned to the input, or -1 if it could
* not be cached, size [n]
*/
template <int nthreads, int associativity>
RAFT_KERNEL assign_cache_idx(const int* keys,
int n,
const int* cache_set,
int* cached_keys,
int n_cache_sets,
int* cache_time,
int time,
int* cache_idx)
{
int block_offset = blockIdx.x * associativity;
const int items_per_thread = raft::ceildiv(associativity, nthreads);
// the size of rank limits how large associativity can be used in practice
__shared__ int rank[items_per_thread * nthreads];
rank_set_entries<nthreads, associativity>(cache_time, n_cache_sets, rank);
// Each thread will fill items_per_thread items in the cache.
// It uses a place, only if it was not updated at the current time step
// (cache_time != time).
// We rank the places according to the time stamp, least recently used
// elements come to the front.
// We fill the least recently used elements with the working set.
// there might be elements which cannot be assigned to cache loc.
// these elements are assigned -1.
for (int j = 0; j < items_per_thread; j++) {
int i = threadIdx.x + j * nthreads;
int t_idx = block_offset + i;
bool mask = (i < associativity);
// whether this slot is available for writing
mask = mask && (cache_time[t_idx] != time);
// rank[i] tells which element to store by this thread
// we look up where is the corresponding key stored in the input array
if (mask) {
int k = find_nth_occurrence(cache_set, n, blockIdx.x, rank[i]);
if (k > -1) {
int key_val = keys[k];
cached_keys[t_idx] = key_val;
cache_idx[k] = t_idx;
cache_time[t_idx] = time;
}
}
}
}
/**
* @brief Get the cache indices for keys stored in the cache.
*
* For every key, we look up the corresponding cache position.
* If keys[k] is stored in the cache, then is_cached[k] is set to true, and
* cache_idx[k] stores the corresponding cache idx.
*
* If keys[k] is not stored in the cache, then we assign a cache set to it.
* This cache set is stored in cache_idx[k], and is_cached[k] is set to false.
* In this case AssignCacheIdx should be called, to get an assigned position
* within the cache set.
*
* Cache_time is assigned to the time input argument for all elements in idx.
*
* @param [in] keys array of keys that we want to look up in the cache, size [n]
* @param [in] n number of keys to look up
* @param [inout] cached_keys keys stored in the cache, size [n_cache_sets * associativity]
* @param [in] n_cache_sets number of cache sets
* @param [in] associativity number of keys in cache set
* @param [inout] cache_time time stamp when the indices were cached, size [n_cache_sets *
* associativity]
* @param [out] cache_idx cache indices of the working set elements, size [n]
* @param [out] is_cached whether the element is cached size[n]
* @param [in] time iteration counter (used for time stamping)
*/
template <typename = void>
RAFT_KERNEL get_cache_idx(int* keys,
int n,
int* cached_keys,
int n_cache_sets,
int associativity,
int* cache_time,
int* cache_idx,
bool* is_cached,
int time)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
int widx = keys[tid];
int sidx = hash(widx, n_cache_sets);
int cidx = sidx * associativity;
int i = 0;
bool found = false;
// search for empty spot and the least recently used spot
while (i < associativity && !found) {
found = (cache_time[cidx + i] > 0 && cached_keys[cidx + i] == widx);
i++;
}
is_cached[tid] = found;
if (found) {
cidx = cidx + i - 1;
cache_time[cidx] = time; // update time stamp
cache_idx[tid] = cidx; // exact cache idx
} else {
cache_idx[tid] = sidx; // assign cache set
}
}
}
}; // namespace cache
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/memory_pool.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "memory_pool-ext.hpp"
#if !defined(RAFT_COMPILED)
#include "memory_pool-inl.hpp"
#endif // RAFT_COMPILED
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/itertools.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/detail/itertools.hpp>
/**
* Helpers inspired by the Python itertools library
*
*/
namespace raft::util::itertools {
/**
* @brief Cartesian product of the given initializer lists.
*
* This helper can be used to easily define input parameters in tests/benchmarks.
* Note that it's not optimized for use with large lists / many lists in performance-critical code!
*
* @tparam S Type of the output structures.
* @tparam Args Types of the elements of the initilizer lists, matching the types of the first
* fields of the structure (if the structure has more fields, some might be initialized
* with their default value).
* @param lists One or more initializer lists.
* @return std::vector<S> A vector of structures containing the cartesian product.
*/
template <typename S, typename... Args>
std::vector<S> product(std::initializer_list<Args>... lists)
{
return detail::product<S>(std::index_sequence_for<Args...>(), (std::vector<Args>(lists))...);
}
} // namespace raft::util::itertools
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/cache.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <raft/core/interruptible.hpp>
#include <raft/core/logger.hpp>
#include <raft/util/cache_util.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <cstddef>
namespace raft::cache {
/**
* @brief Associative cache with least recently used replacement policy.
*
* SW managed cache in device memory, for ML algos where we can trade memory
* access for computation. The two main functions of this class are the
* management of cache indices, and methods to retrieve/store data using the
* cache indices.
*
* The index management can be considered as a hash map<int, int>, where the int
* keys are the original vector indices that we want to store, and the values are
* the cache location of these vectors. The keys are hashed into a bucket
* whose size equals the associativity. These are the cache sets. If a cache
* set is full, then new indices are stored by replacing the oldest entries.
*
* Using this index mapping we implement methods to store and retrieve data from
* the cache buffer, where a unit of data that we are storing is math_t[n_vec].
* For example in SVM we store full columns of the kernel matrix at each cache
* entry.
*
* Note: we should have a look if the index management could be simplified using
* concurrent_unordered_map.cuh from cudf. See Issue #914.
*
* Example usage:
* @code{.cpp}
*
* // An expensive calculation that we want to accelerate with caching:
* // we have n keys, and for each key we generate a vector with m elements.
* // The keys and the output values are stored in GPU memory.
* void calc(int *key, int n, int m, float *out, cudaStream_t stream) {
* for (k=0; k<n; k++) {
* // use key[k] to generate out[i + m*k], where i=0..m-1
* }
* }
*
* // We assume that our ML algo repeatedly calls calc, and the set of keys have
* // an overlap. We will use the cache to avoid repeated calculations.
*
* // Assume we have raft::resources& h, and cudaStream_t stream
* Cache<float> cache(h.get_device_allocator(), stream, m);
*
* // A buffer that we will reuse to store the cache indices.
* rmm::device_uvector<int> cache_idx(h.get_device_allocator(), stream, n);
*
* void cached_calc(int *key, int n, int m, float *out, stream) {
* int n_cached = 0;
*
* cache.GetCacheIdxPartitioned(key, n, cache_idx.data(), &n_cached,
* cudaStream_t stream);
*
* // Note: GetCacheIdxPartitioned has reordered the keys so that
* // key[0..n_cached-1] are the keys already in the cache.
* // We collect the corresponding values
* cache.GetVecs(cache_idx.data(), n_cached, out, stream);
*
* // Calculate the elements not in the cache
* int non_cached = n - n_cached;
* if (non_cached > 0) {
* int *key_new = key + n_cached;
* int *cache_idx_new = cache_idx.data() + n_cached;
* float *out_new = out + n_cached * m;
* // AssignCacheIdx can permute the keys, therefore it has to come before
* // we call calc.
* // Note: a call to AssignCacheIdx should always be preceded with
* // GetCacheIdxPartitioned, because that initializes the cache_idx_new array
* // with the cache set (hash bucket) that correspond to the keys.
* // The cache idx will be assigned from that cache set.
* cache.AssignCacheIdx(key_new, non_cached, cache_idx_new, stream);
*
* calc(key_new, non_cached, m, out_new, stream);
*
* // Store the calculated vectors into the cache.
* cache.StoreVecs(out_new, non_cached, non_cached, cache_idx_new, stream);
* }
* }
* @endcode
*/
template <typename math_t, int associativity = 32>
class Cache {
public:
/**
* @brief Construct a Cache object
*
* @tparam math_t type of elements to be cached
* @tparam associativity number of vectors in a cache set
*
* @param stream cuda stream
* @param n_vec number of elements in a single vector that is stored in a
* cache entry
* @param cache_size in MiB
*/
Cache(cudaStream_t stream, int n_vec, float cache_size = 200)
: n_vec(n_vec),
cache_size(cache_size),
cache(0, stream),
cached_keys(0, stream),
cache_time(0, stream),
is_cached(0, stream),
ws_tmp(0, stream),
idx_tmp(0, stream),
d_num_selected_out(stream),
d_temp_storage(0, stream)
{
ASSERT(n_vec > 0, "Parameter n_vec: shall be larger than zero");
ASSERT(associativity > 0, "Associativity shall be larger than zero");
ASSERT(cache_size >= 0, "Cache size should not be negative");
// Calculate how many vectors would fit the cache
int n_cache_vecs = (cache_size * 1024 * 1024) / (sizeof(math_t) * n_vec);
// The available memory shall be enough for at least one cache set
if (n_cache_vecs >= associativity) {
n_cache_sets = n_cache_vecs / associativity;
n_cache_vecs = n_cache_sets * associativity;
cache.resize(n_cache_vecs * n_vec, stream);
cached_keys.resize(n_cache_vecs, stream);
cache_time.resize(n_cache_vecs, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(cached_keys.data(), 0, cached_keys.size() * sizeof(int), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(cache_time.data(), 0, cache_time.size() * sizeof(int), stream));
} else {
if (cache_size > 0) {
RAFT_LOG_WARN(
"Warning: not enough memory to cache a single set of "
"rows, not using cache");
}
n_cache_sets = 0;
cache_size = 0;
}
RAFT_LOG_DEBUG(
"Creating cache with size=%f MiB, to store %d vectors, in "
"%d sets with associativity=%d",
cache_size,
n_cache_vecs,
n_cache_sets,
associativity);
}
Cache(const Cache& other) = delete;
Cache& operator=(const Cache& other) = delete;
/** @brief Collect cached data into contiguous memory space.
*
* On exit, the tile array is filled the following way:
* out[i + n_vec*k] = cache[i + n_vec * idx[k]]), where i=0..n_vec-1,
* k = 0..n-1
*
* Idx values less than 0 are ignored.
*
* @param [in] idx cache indices, size [n]
* @param [in] n the number of vectors that need to be collected
* @param [out] out vectors collected from cache, size [n_vec*n]
* @param [in] stream cuda stream
*/
void GetVecs(const int* idx, int n, math_t* out, cudaStream_t stream)
{
if (n > 0) {
get_vecs<<<raft::ceildiv(n * n_vec, TPB), TPB, 0, stream>>>(cache.data(), n_vec, idx, n, out);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
/** @brief Store vectors of data into the cache.
*
* Roughly the opposite of GetVecs, but the input vectors can be scattered
* in memory. The cache is updated using the following formula:
*
* cache[i + cache_idx[k]*n_vec] = tile[i + tile_idx[k]*n_vec],
* for i=0..n_vec-1, k=0..n-1
*
* If tile_idx==nullptr, then we assume tile_idx[k] = k.
*
* Elements within a vector should be contiguous in memory (i.e. column vectors
* for column major data storage, or row vectors of row major data).
*
* @param [in] tile stores the data to be cashed cached, size [n_vec x n_tile]
* @param [in] n_tile number of vectors in tile (at least n)
* @param [in] n number of vectors that need to be stored in the cache (a subset
* of all the vectors in the tile)
* @param [in] cache_idx cache indices for storing the vectors (negative values
* are ignored), size [n]
* @param [in] stream cuda stream
* @param [in] tile_idx indices of vectors that need to be stored
*/
void StoreVecs(const math_t* tile,
int n_tile,
int n,
int* cache_idx,
cudaStream_t stream,
const int* tile_idx = nullptr)
{
if (n > 0) {
store_vecs<<<raft::ceildiv(n * n_vec, TPB), TPB, 0, stream>>>(
tile, n_tile, n_vec, tile_idx, n, cache_idx, cache.data(), cache.size() / n_vec);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
/** @brief Map a set of keys to cache indices.
*
* For each k in 0..n-1, if keys[k] is found in the cache, then cache_idx[k]
* will tell the corresponding cache idx, and is_cached[k] is set to true.
*
* If keys[k] is not found in the cache, then is_cached[k] is set to false.
* In this case we assign the cache set for keys[k], and cache_idx[k] will
* store the cache set.
*
* @note in order to retrieve the cached vector j=cache_idx[k] from the cache,
* we have to access cache[i + j*n_vec], where i=0..n_vec-1.
*
* @note: do not use simultaneous GetCacheIdx and AssignCacheIdx
*
* @param [in] keys device array of keys, size [n]
* @param [in] n number of keys
* @param [out] cache_idx device array of cache indices corresponding to the
* input keys, size [n]
* @param [out] is_cached whether the element is already available in the
* cache, size [n]
* @param [in] stream
*/
void GetCacheIdx(int* keys, int n, int* cache_idx, bool* is_cached, cudaStream_t stream)
{
n_iter++; // we increase the iteration counter, that is used to time stamp
// accessing entries from the cache
get_cache_idx<<<raft::ceildiv(n, TPB), TPB, 0, stream>>>(keys,
n,
cached_keys.data(),
n_cache_sets,
associativity,
cache_time.data(),
cache_idx,
is_cached,
n_iter);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/** @brief Map a set of keys to cache indices.
*
* Same as GetCacheIdx, but partitions the keys, and cache_idx arrays in a way
* that keys[0..n_cached-1] and cache_idx[0..n_cached-1] store the indices of
* vectors that are found in the cache, while keys[n_cached..n-1] are the
* indices of vectors that are not found in the cache. For the vectors not
* found in the cache, cache_idx[n_cached..n-1] stores the cache set, and this
* can be used to call AssignCacheIdx.
*
* @param [inout] keys device array of keys, size [n]
* @param [in] n number of indices
* @param [out] cache_idx device array of cache indices corresponding to
* the input keys, size [n]
* @param [out] n_cached number of elements that are cached
* @param [in] stream cuda stream
*/
void GetCacheIdxPartitioned(int* keys, int n, int* cache_idx, int* n_cached, cudaStream_t stream)
{
ResizeTmpBuffers(n, stream);
GetCacheIdx(keys, n, ws_tmp.data(), is_cached.data(), stream);
// Group cache indices as [already cached, non_cached]
cub::DevicePartition::Flagged(d_temp_storage.data(),
d_temp_storage_size,
ws_tmp.data(),
is_cached.data(),
cache_idx,
d_num_selected_out.data(),
n,
stream);
raft::update_host(n_cached, d_num_selected_out.data(), 1, stream);
// Similarly re-group the input indices
raft::copy(ws_tmp.data(), keys, n, stream);
cub::DevicePartition::Flagged(d_temp_storage.data(),
d_temp_storage_size,
ws_tmp.data(),
is_cached.data(),
keys,
d_num_selected_out.data(),
n,
stream);
raft::interruptible::synchronize(stream);
}
/**
* @brief Assign cache location to a set of keys.
*
* Note: call GetCacheIdx first, to get the cache_set assigned to the keys.
* Keys that cannot be cached are assigned to -1.
*
* @param [inout] keys device array of keys, size [n]
* @param [in] n number of elements that we want to cache
* @param [inout] cidx on entry: cache_set, on exit: assigned cache_idx or -1,
* size[n]
* @param [in] stream cuda stream
*/
void AssignCacheIdx(int* keys, int n, int* cidx, cudaStream_t stream)
{
if (n <= 0) return;
cub::DeviceRadixSort::SortPairs(d_temp_storage.data(),
d_temp_storage_size,
cidx,
ws_tmp.data(),
keys,
idx_tmp.data(),
n,
0,
sizeof(int) * 8,
stream);
raft::copy(keys, idx_tmp.data(), n, stream);
// set it to -1
RAFT_CUDA_TRY(cudaMemsetAsync(cidx, 255, n * sizeof(int), stream));
const int nthreads = associativity <= 32 ? associativity : 32;
assign_cache_idx<nthreads, associativity><<<n_cache_sets, nthreads, 0, stream>>>(
keys, n, ws_tmp.data(), cached_keys.data(), n_cache_sets, cache_time.data(), n_iter, cidx);
RAFT_CUDA_TRY(cudaPeekAtLastError());
if (debug_mode) RAFT_CUDA_TRY(cudaDeviceSynchronize());
}
/** Return approximate cache size in MiB. */
float GetSizeInMiB() const { return cache_size; }
/**
* Returns the number of vectors that can be cached.
*/
int GetSize() const { return cached_keys.size(); }
protected:
int n_vec; //!< Number of elements in a cached vector
float cache_size; //!< in MiB
int n_cache_sets; //!< number of cache sets
const int TPB = 256; //!< threads per block for kernel launch
int n_iter = 0; //!< Counter for time stamping cache operation
bool debug_mode = false;
rmm::device_uvector<math_t> cache; //!< The value of cached vectors
rmm::device_uvector<int> cached_keys; //!< Keys stored at each cache loc
rmm::device_uvector<int> cache_time; //!< Time stamp for LRU cache
// Helper arrays for GetCacheIdx
rmm::device_uvector<bool> is_cached;
rmm::device_uvector<int> ws_tmp;
rmm::device_uvector<int> idx_tmp;
// Helper arrays for cub
rmm::device_scalar<int> d_num_selected_out;
rmm::device_uvector<char> d_temp_storage;
size_t d_temp_storage_size = 0;
void ResizeTmpBuffers(int n, cudaStream_t stream)
{
if (ws_tmp.size() < static_cast<std::size_t>(n)) {
ws_tmp.resize(n, stream);
is_cached.resize(n, stream);
idx_tmp.resize(n, stream);
cub::DevicePartition::Flagged(NULL,
d_temp_storage_size,
cached_keys.data(),
is_cached.data(),
cached_keys.data(),
d_num_selected_out.data(),
n,
stream);
d_temp_storage.resize(d_temp_storage_size, stream);
}
}
};
}; // namespace raft::cache
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/cuda_utils.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <math_constants.h>
#include <stdint.h>
#include <type_traits>
#if defined(_RAFT_HAS_CUDA)
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#endif
#include <raft/core/cudart_utils.hpp>
#include <raft/core/math.hpp>
#include <raft/core/operators.hpp>
// For backward compatibility, we include the follow headers. They contain
// functionality that were previously contained in cuda_utils.cuh
#include <raft/util/cuda_dev_essentials.cuh>
#include <raft/util/reduction.cuh>
namespace raft {
/** Device function to have atomic add support for older archs */
template <typename Type>
DI void myAtomicAdd(Type* address, Type val)
{
atomicAdd(address, val);
}
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600)
// Ref:
// http://on-demand.gputechconf.com/gtc/2013/presentations/S3101-Atomic-Memory-Operations.pdf
template <>
DI void myAtomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old =
atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
}
#endif
template <typename T, typename ReduceLambda>
DI void myAtomicReduce(T* address, T val, ReduceLambda op);
template <typename ReduceLambda>
DI void myAtomicReduce(double* address, double val, ReduceLambda op)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_ull, assumed, __double_as_longlong(op(val, __longlong_as_double(assumed))));
} while (assumed != old);
}
template <typename ReduceLambda>
DI void myAtomicReduce(float* address, float val, ReduceLambda op)
{
unsigned int* address_as_uint = (unsigned int*)address;
unsigned int old = *address_as_uint, assumed;
do {
assumed = old;
old = atomicCAS(address_as_uint, assumed, __float_as_uint(op(val, __uint_as_float(assumed))));
} while (assumed != old);
}
// Needed for atomicCas on ushort
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
template <typename ReduceLambda>
DI void myAtomicReduce(__half* address, __half val, ReduceLambda op)
{
unsigned short int* address_as_uint = (unsigned short int*)address;
unsigned short int old = *address_as_uint, assumed;
do {
assumed = old;
old = atomicCAS(address_as_uint, assumed, __half_as_ushort(op(val, __ushort_as_half(assumed))));
} while (assumed != old);
}
#endif
// Needed for nv_bfloat16 support
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
template <typename ReduceLambda>
DI void myAtomicReduce(nv_bfloat16* address, nv_bfloat16 val, ReduceLambda op)
{
unsigned short int* address_as_uint = (unsigned short int*)address;
unsigned short int old = *address_as_uint, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_uint, assumed, __bfloat16_as_ushort(op(val, __ushort_as_bfloat16(assumed))));
} while (assumed != old);
}
#endif
template <typename ReduceLambda>
DI void myAtomicReduce(int* address, int val, ReduceLambda op)
{
int old = *address, assumed;
do {
assumed = old;
old = atomicCAS(address, assumed, op(val, assumed));
} while (assumed != old);
}
template <typename ReduceLambda>
DI void myAtomicReduce(long long* address, long long val, ReduceLambda op)
{
long long old = *address, assumed;
do {
assumed = old;
old = atomicCAS(address, assumed, op(val, assumed));
} while (assumed != old);
}
template <typename ReduceLambda>
DI void myAtomicReduce(unsigned long long* address, unsigned long long val, ReduceLambda op)
{
unsigned long long old = *address, assumed;
do {
assumed = old;
old = atomicCAS(address, assumed, op(val, assumed));
} while (assumed != old);
}
/**
* @brief Provide atomic min operation.
* @tparam T: data type for input data (float or double).
* @param[in] address: address to read old value from, and to atomically update w/ min(old value,
* val)
* @param[in] val: new value to compare with old
*/
template <typename T>
DI T myAtomicMin(T* address, T val);
/**
* @brief Provide atomic max operation.
* @tparam T: data type for input data (float or double).
* @param[in] address: address to read old value from, and to atomically update w/ max(old value,
* val)
* @param[in] val: new value to compare with old
*/
template <typename T>
DI T myAtomicMax(T* address, T val);
DI float myAtomicMin(float* address, float val)
{
myAtomicReduce<float(float, float)>(address, val, fminf);
return *address;
}
DI float myAtomicMax(float* address, float val)
{
myAtomicReduce<float(float, float)>(address, val, fmaxf);
return *address;
}
DI double myAtomicMin(double* address, double val)
{
myAtomicReduce<double(double, double)>(address, val, fmin);
return *address;
}
DI double myAtomicMax(double* address, double val)
{
myAtomicReduce<double(double, double)>(address, val, fmax);
return *address;
}
/**
* @defgroup Max maximum of two numbers
* @{
*/
template <typename T>
HDI T myMax(T x, T y);
template <>
[[deprecated("use raft::max from raft/core/math.hpp instead")]] HDI float myMax<float>(float x,
float y)
{
return fmaxf(x, y);
}
template <>
[[deprecated("use raft::max from raft/core/math.hpp instead")]] HDI double myMax<double>(double x,
double y)
{
return fmax(x, y);
}
/** @} */
/**
* @defgroup Min minimum of two numbers
* @{
*/
template <typename T>
HDI T myMin(T x, T y);
template <>
[[deprecated("use raft::min from raft/core/math.hpp instead")]] HDI float myMin<float>(float x,
float y)
{
return fminf(x, y);
}
template <>
[[deprecated("use raft::min from raft/core/math.hpp instead")]] HDI double myMin<double>(double x,
double y)
{
return fmin(x, y);
}
/** @} */
/**
* @brief Provide atomic min operation.
* @tparam T: data type for input data (float or double).
* @param[in] address: address to read old value from, and to atomically update w/ min(old value,
* val)
* @param[in] val: new value to compare with old
*/
template <typename T>
DI T myAtomicMin(T* address, T val)
{
myAtomicReduce(address, val, raft::min_op{});
return *address;
}
/**
* @brief Provide atomic max operation.
* @tparam T: data type for input data (float or double).
* @param[in] address: address to read old value from, and to atomically update w/ max(old value,
* val)
* @param[in] val: new value to compare with old
*/
template <typename T>
DI T myAtomicMax(T* address, T val)
{
myAtomicReduce(address, val, raft::max_op{});
return *address;
}
/**
* @defgroup Exp Exponential function
* @{
*/
template <typename T>
HDI T myExp(T x);
template <>
[[deprecated("use raft::exp from raft/core/math.hpp instead")]] HDI float myExp(float x)
{
return expf(x);
}
template <>
[[deprecated("use raft::exp from raft/core/math.hpp instead")]] HDI double myExp(double x)
{
return ::exp(x);
}
/** @} */
/**
* @defgroup Cuda infinity values
* @{
*/
template <typename T>
inline __device__ T myInf();
template <>
inline __device__ float myInf<float>()
{
return CUDART_INF_F;
}
template <>
inline __device__ double myInf<double>()
{
return CUDART_INF;
}
/** @} */
/**
* @defgroup Log Natural logarithm
* @{
*/
template <typename T>
HDI T myLog(T x);
template <>
[[deprecated("use raft::log from raft/core/math.hpp instead")]] HDI float myLog(float x)
{
return logf(x);
}
template <>
[[deprecated("use raft::log from raft/core/math.hpp instead")]] HDI double myLog(double x)
{
return ::log(x);
}
/** @} */
/**
* @defgroup Sqrt Square root
* @{
*/
template <typename T>
HDI T mySqrt(T x);
template <>
[[deprecated("use raft::sqrt from raft/core/math.hpp instead")]] HDI float mySqrt(float x)
{
return sqrtf(x);
}
template <>
[[deprecated("use raft::sqrt from raft/core/math.hpp instead")]] HDI double mySqrt(double x)
{
return ::sqrt(x);
}
/** @} */
/**
* @defgroup SineCosine Sine and cosine calculation
* @{
*/
template <typename T>
DI void mySinCos(T x, T& s, T& c);
template <>
[[deprecated("use raft::sincos from raft/core/math.hpp instead")]] DI void mySinCos(float x,
float& s,
float& c)
{
sincosf(x, &s, &c);
}
template <>
[[deprecated("use raft::sincos from raft/core/math.hpp instead")]] DI void mySinCos(double x,
double& s,
double& c)
{
::sincos(x, &s, &c);
}
/** @} */
/**
* @defgroup Sine Sine calculation
* @{
*/
template <typename T>
DI T mySin(T x);
template <>
[[deprecated("use raft::sin from raft/core/math.hpp instead")]] DI float mySin(float x)
{
return sinf(x);
}
template <>
[[deprecated("use raft::sin from raft/core/math.hpp instead")]] DI double mySin(double x)
{
return ::sin(x);
}
/** @} */
/**
* @defgroup Abs Absolute value
* @{
*/
template <typename T>
DI T myAbs(T x)
{
return x < 0 ? -x : x;
}
template <>
[[deprecated("use raft::abs from raft/core/math.hpp instead")]] DI float myAbs(float x)
{
return fabsf(x);
}
template <>
[[deprecated("use raft::abs from raft/core/math.hpp instead")]] DI double myAbs(double x)
{
return fabs(x);
}
/** @} */
/**
* @defgroup Pow Power function
* @{
*/
template <typename T>
HDI T myPow(T x, T power);
template <>
[[deprecated("use raft::pow from raft/core/math.hpp instead")]] HDI float myPow(float x,
float power)
{
return powf(x, power);
}
template <>
[[deprecated("use raft::pow from raft/core/math.hpp instead")]] HDI double myPow(double x,
double power)
{
return ::pow(x, power);
}
/** @} */
/**
* @defgroup myTanh tanh function
* @{
*/
template <typename T>
HDI T myTanh(T x);
template <>
[[deprecated("use raft::tanh from raft/core/math.hpp instead")]] HDI float myTanh(float x)
{
return tanhf(x);
}
template <>
[[deprecated("use raft::tanh from raft/core/math.hpp instead")]] HDI double myTanh(double x)
{
return ::tanh(x);
}
/** @} */
/**
* @defgroup myATanh arctanh function
* @{
*/
template <typename T>
HDI T myATanh(T x);
template <>
[[deprecated("use raft::atanh from raft/core/math.hpp instead")]] HDI float myATanh(float x)
{
return atanhf(x);
}
template <>
[[deprecated("use raft::atanh from raft/core/math.hpp instead")]] HDI double myATanh(double x)
{
return ::atanh(x);
}
/** @} */
/**
* @defgroup LambdaOps Legacy lambda operations, to be deprecated
* @{
*/
template <typename Type, typename IdxType = int>
struct Nop {
[[deprecated("Nop is deprecated. Use identity_op instead.")]] HDI Type
operator()(Type in, IdxType i = 0) const
{
return in;
}
};
template <typename Type, typename IdxType = int>
struct SqrtOp {
[[deprecated("SqrtOp is deprecated. Use sqrt_op instead.")]] HDI Type
operator()(Type in, IdxType i = 0) const
{
return raft::sqrt(in);
}
};
template <typename Type, typename IdxType = int>
struct L0Op {
[[deprecated("L0Op is deprecated. Use nz_op instead.")]] HDI Type operator()(Type in,
IdxType i = 0) const
{
return in != Type(0) ? Type(1) : Type(0);
}
};
template <typename Type, typename IdxType = int>
struct L1Op {
[[deprecated("L1Op is deprecated. Use abs_op instead.")]] HDI Type operator()(Type in,
IdxType i = 0) const
{
return raft::abs(in);
}
};
template <typename Type, typename IdxType = int>
struct L2Op {
[[deprecated("L2Op is deprecated. Use sq_op instead.")]] HDI Type operator()(Type in,
IdxType i = 0) const
{
return in * in;
}
};
template <typename InT, typename OutT = InT>
struct Sum {
[[deprecated("Sum is deprecated. Use add_op instead.")]] HDI OutT operator()(InT a, InT b) const
{
return a + b;
}
};
template <typename Type>
struct Max {
[[deprecated("Max is deprecated. Use max_op instead.")]] HDI Type operator()(Type a, Type b) const
{
if (b > a) { return b; }
return a;
}
};
/** @} */
/**
* @defgroup Sign Obtain sign value
* @brief Obtain sign of x
* @param x input
* @return +1 if x >= 0 and -1 otherwise
* @{
*/
template <typename T>
DI T signPrim(T x)
{
return x < 0 ? -1 : +1;
}
template <>
DI float signPrim(float x)
{
return signbit(x) == true ? -1.0f : +1.0f;
}
template <>
DI double signPrim(double x)
{
return signbit(x) == true ? -1.0 : +1.0;
}
/** @} */
/**
* @defgroup Max maximum of two numbers
* @brief Obtain maximum of two values
* @param x one item
* @param y second item
* @return maximum of two items
* @{
*/
template <typename T>
DI T maxPrim(T x, T y)
{
return x > y ? x : y;
}
template <>
DI float maxPrim(float x, float y)
{
return fmaxf(x, y);
}
template <>
DI double maxPrim(double x, double y)
{
return fmax(x, y);
}
/** @} */
/**
* @brief Four-way byte dot product-accumulate.
* @tparam T Four-byte integer: int or unsigned int
* @tparam S Either same as T or a 4-byte vector of the same signedness.
*
* @param a
* @param b
* @param c
* @return dot(a, b) + c
*/
template <typename T, typename S = T>
DI auto dp4a(S a, S b, T c) -> T;
template <>
DI auto dp4a(char4 a, char4 b, int c) -> int
{
#if __CUDA_ARCH__ >= 610
return __dp4a(a, b, c);
#else
c += static_cast<int>(a.x) * static_cast<int>(b.x);
c += static_cast<int>(a.y) * static_cast<int>(b.y);
c += static_cast<int>(a.z) * static_cast<int>(b.z);
c += static_cast<int>(a.w) * static_cast<int>(b.w);
return c;
#endif
}
template <>
DI auto dp4a(uchar4 a, uchar4 b, unsigned int c) -> unsigned int
{
#if __CUDA_ARCH__ >= 610
return __dp4a(a, b, c);
#else
c += static_cast<unsigned int>(a.x) * static_cast<unsigned int>(b.x);
c += static_cast<unsigned int>(a.y) * static_cast<unsigned int>(b.y);
c += static_cast<unsigned int>(a.z) * static_cast<unsigned int>(b.z);
c += static_cast<unsigned int>(a.w) * static_cast<unsigned int>(b.w);
return c;
#endif
}
template <>
DI auto dp4a(int a, int b, int c) -> int
{
#if __CUDA_ARCH__ >= 610
return __dp4a(a, b, c);
#else
return dp4a(*reinterpret_cast<char4*>(&a), *reinterpret_cast<char4*>(&b), c);
#endif
}
template <>
DI auto dp4a(unsigned int a, unsigned int b, unsigned int c) -> unsigned int
{
#if __CUDA_ARCH__ >= 610
return __dp4a(a, b, c);
#else
return dp4a(*reinterpret_cast<uchar4*>(&a), *reinterpret_cast<uchar4*>(&b), c);
#endif
}
/**
* @brief Simple utility function to determine whether user_stream or one of the
* internal streams should be used.
* @param user_stream main user stream
* @param int_streams array of internal streams
* @param n_int_streams number of internal streams
* @param idx the index for which to query the stream
*/
inline cudaStream_t select_stream(cudaStream_t user_stream,
cudaStream_t* int_streams,
int n_int_streams,
int idx)
{
return n_int_streams > 0 ? int_streams[idx % n_int_streams] : user_stream;
}
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/pow2_utils.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
namespace raft {
/**
* @brief Fast arithmetics and alignment checks for power-of-two values known at compile time.
*
* @tparam Value_ a compile-time value representable as a power-of-two.
*/
template <auto Value_>
struct Pow2 {
typedef decltype(Value_) Type;
static constexpr Type Value = Value_;
static constexpr Type Log2 = log2(Value);
static constexpr Type Mask = Value - 1;
static_assert(std::is_integral<Type>::value, "Value must be integral.");
static_assert(Value && !(Value & Mask), "Value must be power of two.");
#define Pow2_FUNC_QUALIFIER static constexpr __host__ __device__ __forceinline__
#define Pow2_WHEN_INTEGRAL(I) std::enable_if_t<Pow2_IS_REPRESENTABLE_AS(I), I>
#define Pow2_IS_REPRESENTABLE_AS(I) (std::is_integral<I>::value && Type(I(Value)) == Value)
/**
* Integer division by Value truncated toward zero
* (same as `x / Value` in C++).
*
* Invariant: `x = Value * quot(x) + rem(x)`
*/
template <typename I>
Pow2_FUNC_QUALIFIER Pow2_WHEN_INTEGRAL(I) quot(I x) noexcept
{
if constexpr (std::is_signed<I>::value) return (x >> I(Log2)) + (x < 0 && (x & I(Mask)));
if constexpr (std::is_unsigned<I>::value) return x >> I(Log2);
}
/**
* Remainder of integer division by Value truncated toward zero
* (same as `x % Value` in C++).
*
* Invariant: `x = Value * quot(x) + rem(x)`.
*/
template <typename I>
Pow2_FUNC_QUALIFIER Pow2_WHEN_INTEGRAL(I) rem(I x) noexcept
{
if constexpr (std::is_signed<I>::value) return x < 0 ? -((-x) & I(Mask)) : (x & I(Mask));
if constexpr (std::is_unsigned<I>::value) return x & I(Mask);
}
/**
* Integer division by Value truncated toward negative infinity
* (same as `x // Value` in Python).
*
* Invariant: `x = Value * div(x) + mod(x)`.
*
* Note, `div` and `mod` for negative values are slightly faster
* than `quot` and `rem`, but behave slightly different
* compared to normal C++ operators `/` and `%`.
*/
template <typename I>
Pow2_FUNC_QUALIFIER Pow2_WHEN_INTEGRAL(I) div(I x) noexcept
{
return x >> I(Log2);
}
/**
* Rounds up the value to next power of two.
*/
template <typename I>
Pow2_FUNC_QUALIFIER Pow2_WHEN_INTEGRAL(I) round_up_pow2(I val) noexcept
{
return 1 << (log2(val) + 1);
}
/**
* x modulo Value operation (remainder of the `div(x)`)
* (same as `x % Value` in Python).
*
* Invariant: `mod(x) >= 0`
* Invariant: `x = Value * div(x) + mod(x)`.
*
* Note, `div` and `mod` for negative values are slightly faster
* than `quot` and `rem`, but behave slightly different
* compared to normal C++ operators `/` and `%`.
*/
template <typename I>
Pow2_FUNC_QUALIFIER Pow2_WHEN_INTEGRAL(I) mod(I x) noexcept
{
return x & I(Mask);
}
#define Pow2_CHECK_TYPE(T) \
static_assert(std::is_pointer<T>::value || std::is_integral<T>::value, \
"Only pointer or integral types make sense here")
/**
* Tell whether the pointer or integral is Value-aligned.
* NB: for pointers, the alignment is checked in bytes, not in elements.
*/
template <typename PtrT>
Pow2_FUNC_QUALIFIER bool isAligned(PtrT p) noexcept
{
Pow2_CHECK_TYPE(PtrT);
if constexpr (Pow2_IS_REPRESENTABLE_AS(PtrT)) return mod(p) == 0;
if constexpr (!Pow2_IS_REPRESENTABLE_AS(PtrT)) return mod(reinterpret_cast<Type>(p)) == 0;
}
/** Tell whether two pointers have the same address modulo Value. */
template <typename PtrT, typename PtrS>
Pow2_FUNC_QUALIFIER bool areSameAlignOffsets(PtrT a, PtrS b) noexcept
{
Pow2_CHECK_TYPE(PtrT);
Pow2_CHECK_TYPE(PtrS);
Type x, y;
if constexpr (Pow2_IS_REPRESENTABLE_AS(PtrT))
x = Type(mod(a));
else
x = mod(reinterpret_cast<Type>(a));
if constexpr (Pow2_IS_REPRESENTABLE_AS(PtrS))
y = Type(mod(b));
else
y = mod(reinterpret_cast<Type>(b));
return x == y;
}
/** Get this or next Value-aligned address (in bytes) or integral. */
template <typename PtrT>
Pow2_FUNC_QUALIFIER PtrT roundUp(PtrT p) noexcept
{
Pow2_CHECK_TYPE(PtrT);
if constexpr (Pow2_IS_REPRESENTABLE_AS(PtrT)) return (p + PtrT(Mask)) & PtrT(~Mask);
if constexpr (!Pow2_IS_REPRESENTABLE_AS(PtrT)) {
auto x = reinterpret_cast<Type>(p);
return reinterpret_cast<PtrT>((x + Mask) & (~Mask));
}
}
/** Get this or previous Value-aligned address (in bytes) or integral. */
template <typename PtrT>
Pow2_FUNC_QUALIFIER PtrT roundDown(PtrT p) noexcept
{
Pow2_CHECK_TYPE(PtrT);
if constexpr (Pow2_IS_REPRESENTABLE_AS(PtrT)) return p & PtrT(~Mask);
if constexpr (!Pow2_IS_REPRESENTABLE_AS(PtrT)) {
auto x = reinterpret_cast<Type>(p);
return reinterpret_cast<PtrT>(x & (~Mask));
}
}
#undef Pow2_CHECK_TYPE
#undef Pow2_IS_REPRESENTABLE_AS
#undef Pow2_FUNC_QUALIFIER
#undef Pow2_WHEN_INTEGRAL
};
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/input_validation.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft {
template <class ElementType, class Extents, class Layout, class Accessor>
constexpr bool is_row_or_column_major(mdspan<ElementType, Extents, Layout, Accessor> m)
{
return false;
}
template <class ElementType, class Extents, class Accessor>
constexpr bool is_row_or_column_major(mdspan<ElementType, Extents, layout_left, Accessor> m)
{
return true;
}
template <class ElementType, class Extents, class Accessor>
constexpr bool is_row_or_column_major(mdspan<ElementType, Extents, layout_right, Accessor> m)
{
return true;
}
template <class ElementType, class Extents, class Accessor>
constexpr bool is_row_or_column_major(mdspan<ElementType, Extents, layout_stride, Accessor> m)
{
return m.is_exhaustive();
}
template <class ElementType, class Extents, class Layout, class Accessor>
constexpr bool is_row_major(mdspan<ElementType, Extents, Layout, Accessor> /* m */)
{
return false;
}
template <class ElementType, class Extents, class Accessor>
constexpr bool is_row_major(mdspan<ElementType, Extents, layout_left, Accessor> /* m */)
{
return false;
}
template <class ElementType, class Extents, class Accessor>
constexpr bool is_row_major(mdspan<ElementType, Extents, layout_right, Accessor> /* m */)
{
return true;
}
template <class ElementType, class Extents, class Accessor>
constexpr bool is_row_major(mdspan<ElementType, Extents, layout_stride, Accessor> m)
{
return m.is_exhaustive() && m.stride(1) == typename Extents::index_type(1);
}
template <class ElementType, class Extents, class Layout, class Accessor>
constexpr bool is_col_major(mdspan<ElementType, Extents, Layout, Accessor> /* m */)
{
return false;
}
template <class ElementType, class Extents, class Accessor>
constexpr bool is_col_major(mdspan<ElementType, Extents, layout_left, Accessor> /* m */)
{
return true;
}
template <class ElementType, class Extents, class Accessor>
constexpr bool is_col_major(mdspan<ElementType, Extents, layout_right, Accessor> /* m */)
{
return false;
}
template <class ElementType, class Extents, class Accessor>
constexpr bool is_col_major(mdspan<ElementType, Extents, layout_stride, Accessor> m)
{
return m.is_exhaustive() && m.stride(0) == typename Extents::index_type(1);
}
template <class ElementType, class IndexType, size_t... Exts, class Layout, class Accessor>
constexpr bool is_matrix_view(
mdspan<ElementType, extents<IndexType, Exts...>, Layout, Accessor> /* m */)
{
return sizeof...(Exts) == 2;
}
template <class ElementType, class Extents>
constexpr bool is_matrix_view(mdspan<ElementType, Extents> m)
{
return false;
}
template <class ElementType, class IndexType, size_t... Exts, class Layout, class Accessor>
constexpr bool is_vector_view(
mdspan<ElementType, extents<IndexType, Exts...>, Layout, Accessor> /* m */)
{
return sizeof...(Exts) == 1;
}
template <class ElementType, class Extents>
constexpr bool is_vector_view(mdspan<ElementType, Extents> m)
{
return false;
}
template <class ElementType, class IndexType, size_t... Exts, class Layout, class Accessor>
constexpr bool is_scalar_view(
mdspan<ElementType, extents<IndexType, Exts...>, Layout, Accessor> /* m */)
{
return sizeof...(Exts) == 0;
}
template <class ElementType, class Extents>
constexpr bool is_scalar_view(mdspan<ElementType, Extents> m)
{
return false;
}
}; // end namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/device_atomics.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* @brief overloads for CUDA atomic operations
* @file device_atomics.cuh
*
* Provides the overloads for arithmetic data types, where CUDA atomic operations are, `atomicAdd`,
* `atomicMin`, `atomicMax`, and `atomicCAS`.
* `atomicAnd`, `atomicOr`, `atomicXor` are also supported for integer data types.
* Also provides `raft::genericAtomicOperation` which performs atomic operation with the given
* binary operator.
*/
#include <cooperative_groups.h>
#include <type_traits>
namespace raft {
namespace device_atomics {
namespace detail {
// -------------------------------------------------------------------------------------------------
// Binary operators
/* @brief binary `sum` operator */
struct DeviceSum {
template <typename T, typename std::enable_if_t<std::is_arithmetic<T>::value>* = nullptr>
__device__ T operator()(const T& lhs, const T& rhs)
{
return lhs + rhs;
}
};
/* @brief binary `min` operator */
struct DeviceMin {
template <typename T>
__device__ T operator()(const T& lhs, const T& rhs)
{
return lhs < rhs ? lhs : rhs;
}
};
/* @brief binary `max` operator */
struct DeviceMax {
template <typename T>
__device__ T operator()(const T& lhs, const T& rhs)
{
return lhs > rhs ? lhs : rhs;
}
};
/* @brief binary `product` operator */
struct DeviceProduct {
template <typename T, typename std::enable_if_t<std::is_arithmetic<T>::value>* = nullptr>
__device__ T operator()(const T& lhs, const T& rhs)
{
return lhs * rhs;
}
};
/* @brief binary `and` operator */
struct DeviceAnd {
template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr>
__device__ T operator()(const T& lhs, const T& rhs)
{
return (lhs & rhs);
}
};
/* @brief binary `or` operator */
struct DeviceOr {
template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr>
__device__ T operator()(const T& lhs, const T& rhs)
{
return (lhs | rhs);
}
};
/* @brief binary `xor` operator */
struct DeviceXor {
template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr>
__device__ T operator()(const T& lhs, const T& rhs)
{
return (lhs ^ rhs);
}
};
// FIXME: remove this if C++17 is supported.
// `static_assert` requires a string literal at C++14.
#define errmsg_cast "size mismatch."
template <typename T_output, typename T_input>
__forceinline__ __device__ T_output type_reinterpret(T_input value)
{
static_assert(sizeof(T_output) == sizeof(T_input), "type_reinterpret for different size");
return *(reinterpret_cast<T_output*>(&value));
}
// -------------------------------------------------------------------------------------------------
// the implementation of `genericAtomicOperation`
template <typename T, typename Op, size_t N = sizeof(T)>
struct genericAtomicOperationImpl;
// single byte atomic operation
template <typename T, typename Op>
struct genericAtomicOperationImpl<T, Op, 1> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, Op op)
{
using T_int = unsigned int;
T_int* address_uint32 = reinterpret_cast<T_int*>(addr - (reinterpret_cast<size_t>(addr) & 3));
T_int shift = ((reinterpret_cast<size_t>(addr) & 3) * 8);
T_int old = *address_uint32;
T_int assumed;
do {
assumed = old;
T target_value = T((old >> shift) & 0xff);
uint8_t updating_value = type_reinterpret<uint8_t, T>(op(target_value, update_value));
T_int new_value = (old & ~(0x000000ff << shift)) | (T_int(updating_value) << shift);
old = atomicCAS(address_uint32, assumed, new_value);
} while (assumed != old);
return T((old >> shift) & 0xff);
}
};
// 2 bytes atomic operation
template <typename T, typename Op>
struct genericAtomicOperationImpl<T, Op, 2> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, Op op)
{
using T_int = unsigned int;
bool is_32_align = (reinterpret_cast<size_t>(addr) & 2) ? false : true;
T_int* address_uint32 =
reinterpret_cast<T_int*>(reinterpret_cast<size_t>(addr) - (is_32_align ? 0 : 2));
T_int old = *address_uint32;
T_int assumed;
do {
assumed = old;
T target_value = (is_32_align) ? T(old & 0xffff) : T(old >> 16);
uint16_t updating_value = type_reinterpret<uint16_t, T>(op(target_value, update_value));
T_int new_value = (is_32_align) ? (old & 0xffff0000) | updating_value
: (old & 0xffff) | (T_int(updating_value) << 16);
old = atomicCAS(address_uint32, assumed, new_value);
} while (assumed != old);
return (is_32_align) ? T(old & 0xffff) : T(old >> 16);
;
}
};
// 4 bytes atomic operation
template <typename T, typename Op>
struct genericAtomicOperationImpl<T, Op, 4> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, Op op)
{
using T_int = unsigned int;
T old_value = *addr;
T assumed{old_value};
if constexpr (std::is_same<T, float>{} && (std::is_same<Op, DeviceMin>{})) {
if (isnan(update_value)) { return old_value; }
}
do {
assumed = old_value;
const T new_value = op(old_value, update_value);
T_int ret = atomicCAS(reinterpret_cast<T_int*>(addr),
type_reinterpret<T_int, T>(assumed),
type_reinterpret<T_int, T>(new_value));
old_value = type_reinterpret<T, T_int>(ret);
} while (assumed != old_value);
return old_value;
}
};
// 4 bytes fp32 atomic Max operation
template <>
struct genericAtomicOperationImpl<float, DeviceMax, 4> {
using T = float;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceMax op)
{
if (isnan(update_value)) { return *addr; }
T old = (update_value >= 0)
? __int_as_float(atomicMax((int*)addr, __float_as_int(update_value)))
: __uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(update_value)));
return old;
}
};
// 8 bytes atomic operation
template <typename T, typename Op>
struct genericAtomicOperationImpl<T, Op, 8> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, Op op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int), errmsg_cast);
T old_value = *addr;
T assumed{old_value};
do {
assumed = old_value;
const T new_value = op(old_value, update_value);
T_int ret = atomicCAS(reinterpret_cast<T_int*>(addr),
type_reinterpret<T_int, T>(assumed),
type_reinterpret<T_int, T>(new_value));
old_value = type_reinterpret<T, T_int>(ret);
} while (assumed != old_value);
return old_value;
}
};
// -------------------------------------------------------------------------------------------------
// specialized functions for operators
// `atomicAdd` supports int, unsigned int, unsigned long long int, float, double (long long int is
// not supported.) `atomicMin`, `atomicMax` support int, unsigned int, unsigned long long int
// `atomicAnd`, `atomicOr`, `atomicXor` support int, unsigned int, unsigned long long int
// CUDA natively supports `unsigned long long int` for `atomicAdd`,
// but doesn't supports `long int`.
// However, since the signed integer is represented as Two's complement,
// the fundamental arithmetic operations of addition are identical to
// those for unsigned binary numbers.
// Then, this computes as `unsigned long long int` with `atomicAdd`
// @sa https://en.wikipedia.org/wiki/Two%27s_complement
template <>
struct genericAtomicOperationImpl<long int, DeviceSum, 8> {
using T = long int;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceSum op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int), errmsg_cast);
T_int ret = atomicAdd(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
template <>
struct genericAtomicOperationImpl<unsigned long int, DeviceSum, 8> {
using T = unsigned long int;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceSum op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int), errmsg_cast);
T_int ret = atomicAdd(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
// CUDA natively supports `unsigned long long int` for `atomicAdd`,
// but doesn't supports `long long int`.
// However, since the signed integer is represented as Two's complement,
// the fundamental arithmetic operations of addition are identical to
// those for unsigned binary numbers.
// Then, this computes as `unsigned long long int` with `atomicAdd`
// @sa https://en.wikipedia.org/wiki/Two%27s_complement
template <>
struct genericAtomicOperationImpl<long long int, DeviceSum, 8> {
using T = long long int;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceSum op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int), errmsg_cast);
T_int ret = atomicAdd(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
template <>
struct genericAtomicOperationImpl<unsigned long int, DeviceMin, 8> {
using T = unsigned long int;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceMin op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int), errmsg_cast);
T ret = atomicMin(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
template <>
struct genericAtomicOperationImpl<unsigned long int, DeviceMax, 8> {
using T = unsigned long int;
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceMax op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int), errmsg_cast);
T ret = atomicMax(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
template <typename T>
struct genericAtomicOperationImpl<T, DeviceAnd, 8> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceAnd op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int), errmsg_cast);
T_int ret = atomicAnd(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
template <typename T>
struct genericAtomicOperationImpl<T, DeviceOr, 8> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceOr op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int), errmsg_cast);
T_int ret = atomicOr(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
template <typename T>
struct genericAtomicOperationImpl<T, DeviceXor, 8> {
__forceinline__ __device__ T operator()(T* addr, T const& update_value, DeviceXor op)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int), errmsg_cast);
T_int ret = atomicXor(reinterpret_cast<T_int*>(addr), type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
// -------------------------------------------------------------------------------------------------
// the implementation of `typesAtomicCASImpl`
template <typename T, size_t N = sizeof(T)>
struct typesAtomicCASImpl;
template <typename T>
struct typesAtomicCASImpl<T, 1> {
__forceinline__ __device__ T operator()(T* addr, T const& compare, T const& update_value)
{
using T_int = unsigned int;
T_int shift = ((reinterpret_cast<size_t>(addr) & 3) * 8);
T_int* address_uint32 = reinterpret_cast<T_int*>(addr - (reinterpret_cast<size_t>(addr) & 3));
// the 'target_value' in `old` can be different from `compare`
// because other thread may update the value
// before fetching a value from `address_uint32` in this function
T_int old = *address_uint32;
T_int assumed;
T target_value;
uint8_t u_val = type_reinterpret<uint8_t, T>(update_value);
do {
assumed = old;
target_value = T((old >> shift) & 0xff);
// have to compare `target_value` and `compare` before calling atomicCAS
// the `target_value` in `old` can be different with `compare`
if (target_value != compare) break;
T_int new_value = (old & ~(0x000000ff << shift)) | (T_int(u_val) << shift);
old = atomicCAS(address_uint32, assumed, new_value);
} while (assumed != old);
return target_value;
}
};
template <typename T>
struct typesAtomicCASImpl<T, 2> {
__forceinline__ __device__ T operator()(T* addr, T const& compare, T const& update_value)
{
using T_int = unsigned int;
bool is_32_align = (reinterpret_cast<size_t>(addr) & 2) ? false : true;
T_int* address_uint32 =
reinterpret_cast<T_int*>(reinterpret_cast<size_t>(addr) - (is_32_align ? 0 : 2));
T_int old = *address_uint32;
T_int assumed;
T target_value;
uint16_t u_val = type_reinterpret<uint16_t, T>(update_value);
do {
assumed = old;
target_value = (is_32_align) ? T(old & 0xffff) : T(old >> 16);
if (target_value != compare) break;
T_int new_value =
(is_32_align) ? (old & 0xffff0000) | u_val : (old & 0xffff) | (T_int(u_val) << 16);
old = atomicCAS(address_uint32, assumed, new_value);
} while (assumed != old);
return target_value;
}
};
template <typename T>
struct typesAtomicCASImpl<T, 4> {
__forceinline__ __device__ T operator()(T* addr, T const& compare, T const& update_value)
{
using T_int = unsigned int;
T_int ret = atomicCAS(reinterpret_cast<T_int*>(addr),
type_reinterpret<T_int, T>(compare),
type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
// 8 bytes atomic operation
template <typename T>
struct typesAtomicCASImpl<T, 8> {
__forceinline__ __device__ T operator()(T* addr, T const& compare, T const& update_value)
{
using T_int = unsigned long long int;
static_assert(sizeof(T) == sizeof(T_int), errmsg_cast);
T_int ret = atomicCAS(reinterpret_cast<T_int*>(addr),
type_reinterpret<T_int, T>(compare),
type_reinterpret<T_int, T>(update_value));
return type_reinterpret<T, T_int>(ret);
}
};
} // namespace detail
} // namespace device_atomics
/** -------------------------------------------------------------------------*
* @brief compute atomic binary operation
* reads the `old` located at the `address` in global or shared memory,
* computes 'BinaryOp'('old', 'update_value'),
* and stores the result back to memory at the same address.
* These three operations are performed in one atomic transaction.
*
* The supported cudf types for `genericAtomicOperation` are:
* int8_t, int16_t, int32_t, int64_t, float, double
*
* @param[in] address The address of old value in global or shared memory
* @param[in] update_value The value to be computed
* @param[in] op The binary operator used for compute
*
* @returns The old value at `address`
* -------------------------------------------------------------------------**/
template <typename T, typename BinaryOp>
typename std::enable_if_t<std::is_arithmetic<T>::value, T> __forceinline__ __device__
genericAtomicOperation(T* address, T const& update_value, BinaryOp op)
{
auto fun = raft::device_atomics::detail::genericAtomicOperationImpl<T, BinaryOp>{};
return T(fun(address, update_value, op));
}
// specialization for bool types
template <typename BinaryOp>
__forceinline__ __device__ bool genericAtomicOperation(bool* address,
bool const& update_value,
BinaryOp op)
{
using T = bool;
// don't use underlying type to apply operation for bool
auto fun = raft::device_atomics::detail::genericAtomicOperationImpl<T, BinaryOp>{};
return T(fun(address, update_value, op));
}
} // namespace raft
/**
* @brief Overloads for `atomicAdd`
*
* reads the `old` located at the `address` in global or shared memory, computes (old + val), and
* stores the result back to memory at the same address. These three operations are performed in one
* atomic transaction.
*
* The supported types for `atomicAdd` are: integers are floating point numbers.
* CUDA natively supports `int`, `unsigned int`, `unsigned long long int`, `float`, `double.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be added
*
* @returns The old value at `address`
*/
template <typename T>
__forceinline__ __device__ T atomicAdd(T* address, T val)
{
return raft::genericAtomicOperation(address, val, raft::device_atomics::detail::DeviceSum{});
}
/**
* @brief Overloads for `atomicMin`
*
* reads the `old` located at the `address` in global or shared memory, computes the minimum of old
* and val, and stores the result back to memory at the same address. These three operations are
* performed in one atomic transaction.
*
* The supported types for `atomicMin` are: integers are floating point numbers.
* CUDA natively supports `int`, `unsigned int`, `unsigned long long int`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be computed
*
* @returns The old value at `address`
*/
template <typename T>
__forceinline__ __device__ T atomicMin(T* address, T val)
{
return raft::genericAtomicOperation(address, val, raft::device_atomics::detail::DeviceMin{});
}
/**
* @brief Overloads for `atomicMax`
*
* reads the `old` located at the `address` in global or shared memory, computes the maximum of old
* and val, and stores the result back to memory at the same address. These three operations are
* performed in one atomic transaction.
*
* The supported types for `atomicMax` are: integers are floating point numbers.
* CUDA natively supports `int`, `unsigned int`, `unsigned long long int`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be computed
*
* @returns The old value at `address`
*/
template <typename T>
__forceinline__ __device__ T atomicMax(T* address, T val)
{
return raft::genericAtomicOperation(address, val, raft::device_atomics::detail::DeviceMax{});
}
/**
* @brief Overloads for `atomicCAS`
*
* reads the `old` located at the `address` in global or shared memory, computes
* (`old` == `compare` ? `val` : `old`), and stores the result back to memory at the same address.
* These three operations are performed in one atomic transaction.
*
* The supported types for `atomicCAS` are: integers are floating point numbers.
* CUDA natively supports `int`, `unsigned int`, `unsigned long long int`, `unsigned short int`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] compare The value to be compared
* @param[in] val The value to be computed
*
* @returns The old value at `address`
*/
template <typename T>
__forceinline__ __device__ T atomicCAS(T* address, T compare, T val)
{
return raft::device_atomics::detail::typesAtomicCASImpl<T>()(address, compare, val);
}
/**
* @brief Overloads for `atomicAnd`
*
* reads the `old` located at the `address` in global or shared memory, computes (old & val), and
* stores the result back to memory at the same address. These three operations are performed in
* one atomic transaction.
*
* The supported types for `atomicAnd` are: integers.
* CUDA natively supports `int`, `unsigned int`, `unsigned long long int`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be computed
*
* @returns The old value at `address`
*/
template <typename T, typename std::enable_if_t<std::is_integral<T>::value, T>* = nullptr>
__forceinline__ __device__ T atomicAnd(T* address, T val)
{
return raft::genericAtomicOperation(address, val, raft::device_atomics::detail::DeviceAnd{});
}
/**
* @brief Overloads for `atomicOr`
*
* reads the `old` located at the `address` in global or shared memory, computes (old | val), and
* stores the result back to memory at the same address. These three operations are performed in
* one atomic transaction.
*
* The supported types for `atomicOr` are: integers.
* CUDA natively supports `int`, `unsigned int`, `unsigned long long int`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be computed
*
* @returns The old value at `address`
*/
template <typename T, typename std::enable_if_t<std::is_integral<T>::value, T>* = nullptr>
__forceinline__ __device__ T atomicOr(T* address, T val)
{
return raft::genericAtomicOperation(address, val, raft::device_atomics::detail::DeviceOr{});
}
/**
* @brief Overloads for `atomicXor`
*
* reads the `old` located at the `address` in global or shared memory, computes (old ^ val), and
* stores the result back to memory at the same address. These three operations are performed in
* one atomic transaction.
*
* The supported types for `atomicXor` are: integers.
* CUDA natively supports `int`, `unsigned int`, `unsigned long long int`.
*
* @param[in] address The address of old value in global or shared memory
* @param[in] val The value to be computed
*
* @returns The old value at `address`
*/
template <typename T, typename std::enable_if_t<std::is_integral<T>::value, T>* = nullptr>
__forceinline__ __device__ T atomicXor(T* address, T val)
{
return raft::genericAtomicOperation(address, val, raft::device_atomics::detail::DeviceXor{});
}
/**
* @brief: Warp aggregated atomic increment
*
* increments an atomic counter using all active threads in a warp. The return
* value is the original value of the counter plus the rank of the calling
* thread.
*
* The use of atomicIncWarp is a performance optimization. It can reduce the
* amount of atomic memory traffic by a factor of 32.
*
* Adapted from:
* https://developer.nvidia.com/blog/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics/
*
* @tparam T An integral type
* @param[in,out] ctr The address of old value
*
* @return The old value of the counter plus the rank of the calling thread.
*/
template <typename T = unsigned int,
typename std::enable_if_t<std::is_integral<T>::value, T>* = nullptr>
__device__ T atomicIncWarp(T* ctr)
{
namespace cg = cooperative_groups;
auto g = cg::coalesced_threads();
T warp_res;
if (g.thread_rank() == 0) { warp_res = atomicAdd(ctr, static_cast<T>(g.size())); }
return g.shfl(warp_res, 0) + g.thread_rank();
}
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/device_loads_stores.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // uintX_t
#include <raft/core/device_span.hpp>
#include <raft/util/cuda_dev_essentials.cuh> // DI
namespace raft {
/**
* @defgroup SmemStores Shared memory store operations
* @{
* @brief Stores to shared memory (both vectorized and non-vectorized forms)
* requires the given shmem pointer to be aligned by the vector
length, like for float4 lds/sts shmem pointer should be aligned
by 16 bytes else it might silently fail or can also give
runtime error.
* @param[out] addr shared memory address (should be aligned to vector size)
* @param[in] x data to be stored at this address
*/
DI void sts(uint8_t* addr, const uint8_t& x)
{
uint32_t x_int;
x_int = x;
auto s1 = __cvta_generic_to_shared(reinterpret_cast<uint8_t*>(addr));
asm volatile("st.shared.u8 [%0], {%1};" : : "l"(s1), "r"(x_int));
}
DI void sts(uint8_t* addr, const uint8_t (&x)[1])
{
uint32_t x_int[1];
x_int[0] = x[0];
auto s1 = __cvta_generic_to_shared(reinterpret_cast<uint8_t*>(addr));
asm volatile("st.shared.u8 [%0], {%1};" : : "l"(s1), "r"(x_int[0]));
}
DI void sts(uint8_t* addr, const uint8_t (&x)[2])
{
uint32_t x_int[2];
x_int[0] = x[0];
x_int[1] = x[1];
auto s2 = __cvta_generic_to_shared(reinterpret_cast<uint8_t*>(addr));
asm volatile("st.shared.v2.u8 [%0], {%1, %2};" : : "l"(s2), "r"(x_int[0]), "r"(x_int[1]));
}
DI void sts(uint8_t* addr, const uint8_t (&x)[4])
{
uint32_t x_int[4];
x_int[0] = x[0];
x_int[1] = x[1];
x_int[2] = x[2];
x_int[3] = x[3];
auto s4 = __cvta_generic_to_shared(reinterpret_cast<uint8_t*>(addr));
asm volatile("st.shared.v4.u8 [%0], {%1, %2, %3, %4};"
:
: "l"(s4), "r"(x_int[0]), "r"(x_int[1]), "r"(x_int[2]), "r"(x_int[3]));
}
DI void sts(int8_t* addr, const int8_t& x)
{
int32_t x_int = x;
auto s1 = __cvta_generic_to_shared(reinterpret_cast<int8_t*>(addr));
asm volatile("st.shared.s8 [%0], {%1};" : : "l"(s1), "r"(x_int));
}
DI void sts(int8_t* addr, const int8_t (&x)[1])
{
int32_t x_int[1];
x_int[0] = x[0];
auto s1 = __cvta_generic_to_shared(reinterpret_cast<int8_t*>(addr));
asm volatile("st.shared.s8 [%0], {%1};" : : "l"(s1), "r"(x_int[0]));
}
DI void sts(int8_t* addr, const int8_t (&x)[2])
{
int32_t x_int[2];
x_int[0] = x[0];
x_int[1] = x[1];
auto s2 = __cvta_generic_to_shared(reinterpret_cast<int8_t*>(addr));
asm volatile("st.shared.v2.s8 [%0], {%1, %2};" : : "l"(s2), "r"(x_int[0]), "r"(x_int[1]));
}
DI void sts(int8_t* addr, const int8_t (&x)[4])
{
int32_t x_int[4];
x_int[0] = x[0];
x_int[1] = x[1];
x_int[2] = x[2];
x_int[3] = x[3];
auto s4 = __cvta_generic_to_shared(reinterpret_cast<int8_t*>(addr));
asm volatile("st.shared.v4.s8 [%0], {%1, %2, %3, %4};"
:
: "l"(s4), "r"(x_int[0]), "r"(x_int[1]), "r"(x_int[2]), "r"(x_int[3]));
}
DI void sts(uint32_t* addr, const uint32_t& x)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<uint32_t*>(addr));
asm volatile("st.shared.u32 [%0], {%1};" : : "l"(s1), "r"(x));
}
DI void sts(uint32_t* addr, const uint32_t (&x)[1])
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<uint32_t*>(addr));
asm volatile("st.shared.u32 [%0], {%1};" : : "l"(s1), "r"(x[0]));
}
DI void sts(uint32_t* addr, const uint32_t (&x)[2])
{
auto s2 = __cvta_generic_to_shared(reinterpret_cast<uint2*>(addr));
asm volatile("st.shared.v2.u32 [%0], {%1, %2};" : : "l"(s2), "r"(x[0]), "r"(x[1]));
}
DI void sts(uint32_t* addr, const uint32_t (&x)[4])
{
auto s4 = __cvta_generic_to_shared(reinterpret_cast<uint4*>(addr));
asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};"
:
: "l"(s4), "r"(x[0]), "r"(x[1]), "r"(x[2]), "r"(x[3]));
}
DI void sts(int32_t* addr, const int32_t& x)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<int32_t*>(addr));
asm volatile("st.shared.u32 [%0], {%1};" : : "l"(s1), "r"(x));
}
DI void sts(int32_t* addr, const int32_t (&x)[1])
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<int32_t*>(addr));
asm volatile("st.shared.u32 [%0], {%1};" : : "l"(s1), "r"(x[0]));
}
DI void sts(int32_t* addr, const int32_t (&x)[2])
{
auto s2 = __cvta_generic_to_shared(reinterpret_cast<int2*>(addr));
asm volatile("st.shared.v2.u32 [%0], {%1, %2};" : : "l"(s2), "r"(x[0]), "r"(x[1]));
}
DI void sts(int32_t* addr, const int32_t (&x)[4])
{
auto s4 = __cvta_generic_to_shared(reinterpret_cast<int4*>(addr));
asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};"
:
: "l"(s4), "r"(x[0]), "r"(x[1]), "r"(x[2]), "r"(x[3]));
}
DI void sts(float* addr, const float& x)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<float*>(addr));
asm volatile("st.shared.f32 [%0], {%1};" : : "l"(s1), "f"(x));
}
DI void sts(float* addr, const float (&x)[1])
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<float*>(addr));
asm volatile("st.shared.f32 [%0], {%1};" : : "l"(s1), "f"(x[0]));
}
DI void sts(float* addr, const float (&x)[2])
{
auto s2 = __cvta_generic_to_shared(reinterpret_cast<float2*>(addr));
asm volatile("st.shared.v2.f32 [%0], {%1, %2};" : : "l"(s2), "f"(x[0]), "f"(x[1]));
}
DI void sts(float* addr, const float (&x)[4])
{
auto s4 = __cvta_generic_to_shared(reinterpret_cast<float4*>(addr));
asm volatile("st.shared.v4.f32 [%0], {%1, %2, %3, %4};"
:
: "l"(s4), "f"(x[0]), "f"(x[1]), "f"(x[2]), "f"(x[3]));
}
DI void sts(double* addr, const double& x)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<double*>(addr));
asm volatile("st.shared.f64 [%0], {%1};" : : "l"(s1), "d"(x));
}
DI void sts(double* addr, const double (&x)[1])
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<double*>(addr));
asm volatile("st.shared.f64 [%0], {%1};" : : "l"(s1), "d"(x[0]));
}
DI void sts(double* addr, const double (&x)[2])
{
auto s2 = __cvta_generic_to_shared(reinterpret_cast<double2*>(addr));
asm volatile("st.shared.v2.f64 [%0], {%1, %2};" : : "l"(s2), "d"(x[0]), "d"(x[1]));
}
/** @} */
/**
* @defgroup SmemLoads Shared memory load operations
* @{
* @brief Loads from shared memory (both vectorized and non-vectorized forms)
requires the given shmem pointer to be aligned by the vector
length, like for float4 lds/sts shmem pointer should be aligned
by 16 bytes else it might silently fail or can also give
runtime error.
* @param[out] x the data to be loaded
* @param[in] addr shared memory address from where to load
* (should be aligned to vector size)
*/
DI void lds(uint8_t& x, const uint8_t* addr)
{
uint32_t x_int;
auto s1 = __cvta_generic_to_shared(reinterpret_cast<const uint8_t*>(addr));
asm volatile("ld.shared.u8 {%0}, [%1];" : "=r"(x_int) : "l"(s1));
x = x_int;
}
DI void lds(uint8_t (&x)[1], const uint8_t* addr)
{
uint32_t x_int[1];
auto s1 = __cvta_generic_to_shared(reinterpret_cast<const uint8_t*>(addr));
asm volatile("ld.shared.u8 {%0}, [%1];" : "=r"(x_int[0]) : "l"(s1));
x[0] = x_int[0];
}
DI void lds(uint8_t (&x)[2], const uint8_t* addr)
{
uint32_t x_int[2];
auto s2 = __cvta_generic_to_shared(reinterpret_cast<const uint8_t*>(addr));
asm volatile("ld.shared.v2.u8 {%0, %1}, [%2];" : "=r"(x_int[0]), "=r"(x_int[1]) : "l"(s2));
x[0] = x_int[0];
x[1] = x_int[1];
}
DI void lds(uint8_t (&x)[4], const uint8_t* addr)
{
uint32_t x_int[4];
auto s4 = __cvta_generic_to_shared(reinterpret_cast<const uint8_t*>(addr));
asm volatile("ld.shared.v4.u8 {%0, %1, %2, %3}, [%4];"
: "=r"(x_int[0]), "=r"(x_int[1]), "=r"(x_int[2]), "=r"(x_int[3])
: "l"(s4));
x[0] = x_int[0];
x[1] = x_int[1];
x[2] = x_int[2];
x[3] = x_int[3];
}
DI void lds(int8_t& x, const int8_t* addr)
{
int32_t x_int;
auto s1 = __cvta_generic_to_shared(reinterpret_cast<const int8_t*>(addr));
asm volatile("ld.shared.s8 {%0}, [%1];" : "=r"(x_int) : "l"(s1));
x = x_int;
}
DI void lds(int8_t (&x)[1], const int8_t* addr)
{
int32_t x_int[1];
auto s1 = __cvta_generic_to_shared(reinterpret_cast<const int8_t*>(addr));
asm volatile("ld.shared.s8 {%0}, [%1];" : "=r"(x_int[0]) : "l"(s1));
x[0] = x_int[0];
}
DI void lds(int8_t (&x)[2], const int8_t* addr)
{
int32_t x_int[2];
auto s2 = __cvta_generic_to_shared(reinterpret_cast<const int8_t*>(addr));
asm volatile("ld.shared.v2.s8 {%0, %1}, [%2];" : "=r"(x_int[0]), "=r"(x_int[1]) : "l"(s2));
x[0] = x_int[0];
x[1] = x_int[1];
}
DI void lds(int8_t (&x)[4], const int8_t* addr)
{
int32_t x_int[4];
auto s4 = __cvta_generic_to_shared(reinterpret_cast<const int8_t*>(addr));
asm volatile("ld.shared.v4.s8 {%0, %1, %2, %3}, [%4];"
: "=r"(x_int[0]), "=r"(x_int[1]), "=r"(x_int[2]), "=r"(x_int[3])
: "l"(s4));
x[0] = x_int[0];
x[1] = x_int[1];
x[2] = x_int[2];
x[3] = x_int[3];
}
DI void lds(uint32_t (&x)[4], const uint32_t* addr)
{
auto s4 = __cvta_generic_to_shared(reinterpret_cast<const uint32_t*>(addr));
asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];"
: "=r"(x[0]), "=r"(x[1]), "=r"(x[2]), "=r"(x[3])
: "l"(s4));
}
DI void lds(uint32_t (&x)[2], const uint32_t* addr)
{
auto s2 = __cvta_generic_to_shared(reinterpret_cast<const uint32_t*>(addr));
asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];" : "=r"(x[0]), "=r"(x[1]) : "l"(s2));
}
DI void lds(uint32_t (&x)[1], const uint32_t* addr)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<const uint32_t*>(addr));
asm volatile("ld.shared.u32 {%0}, [%1];" : "=r"(x[0]) : "l"(s1));
}
DI void lds(uint32_t& x, const uint32_t* addr)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<const uint32_t*>(addr));
asm volatile("ld.shared.u32 {%0}, [%1];" : "=r"(x) : "l"(s1));
}
DI void lds(int32_t (&x)[4], const int32_t* addr)
{
auto s4 = __cvta_generic_to_shared(reinterpret_cast<const int32_t*>(addr));
asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];"
: "=r"(x[0]), "=r"(x[1]), "=r"(x[2]), "=r"(x[3])
: "l"(s4));
}
DI void lds(int32_t (&x)[2], const int32_t* addr)
{
auto s2 = __cvta_generic_to_shared(reinterpret_cast<const int32_t*>(addr));
asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];" : "=r"(x[0]), "=r"(x[1]) : "l"(s2));
}
DI void lds(int32_t (&x)[1], const int32_t* addr)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<const int32_t*>(addr));
asm volatile("ld.shared.u32 {%0}, [%1];" : "=r"(x[0]) : "l"(s1));
}
DI void lds(int32_t& x, const int32_t* addr)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<const int32_t*>(addr));
asm volatile("ld.shared.u32 {%0}, [%1];" : "=r"(x) : "l"(s1));
}
DI void lds(float& x, const float* addr)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<const float*>(addr));
asm volatile("ld.shared.f32 {%0}, [%1];" : "=f"(x) : "l"(s1));
}
DI void lds(float (&x)[1], const float* addr)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<const float*>(addr));
asm volatile("ld.shared.f32 {%0}, [%1];" : "=f"(x[0]) : "l"(s1));
}
DI void lds(float (&x)[2], const float* addr)
{
auto s2 = __cvta_generic_to_shared(reinterpret_cast<const float2*>(addr));
asm volatile("ld.shared.v2.f32 {%0, %1}, [%2];" : "=f"(x[0]), "=f"(x[1]) : "l"(s2));
}
DI void lds(float (&x)[4], const float* addr)
{
auto s4 = __cvta_generic_to_shared(reinterpret_cast<const float4*>(addr));
asm volatile("ld.shared.v4.f32 {%0, %1, %2, %3}, [%4];"
: "=f"(x[0]), "=f"(x[1]), "=f"(x[2]), "=f"(x[3])
: "l"(s4));
}
DI void lds(float& x, float* addr)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<float*>(addr));
asm volatile("ld.shared.f32 {%0}, [%1];" : "=f"(x) : "l"(s1));
}
DI void lds(float (&x)[1], float* addr)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<float*>(addr));
asm volatile("ld.shared.f32 {%0}, [%1];" : "=f"(x[0]) : "l"(s1));
}
DI void lds(float (&x)[2], float* addr)
{
auto s2 = __cvta_generic_to_shared(reinterpret_cast<float2*>(addr));
asm volatile("ld.shared.v2.f32 {%0, %1}, [%2];" : "=f"(x[0]), "=f"(x[1]) : "l"(s2));
}
DI void lds(float (&x)[4], float* addr)
{
auto s4 = __cvta_generic_to_shared(reinterpret_cast<float4*>(addr));
asm volatile("ld.shared.v4.f32 {%0, %1, %2, %3}, [%4];"
: "=f"(x[0]), "=f"(x[1]), "=f"(x[2]), "=f"(x[3])
: "l"(s4));
}
DI void lds(double& x, double* addr)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<double*>(addr));
asm volatile("ld.shared.f64 {%0}, [%1];" : "=d"(x) : "l"(s1));
}
DI void lds(double (&x)[1], double* addr)
{
auto s1 = __cvta_generic_to_shared(reinterpret_cast<double*>(addr));
asm volatile("ld.shared.f64 {%0}, [%1];" : "=d"(x[0]) : "l"(s1));
}
DI void lds(double (&x)[2], double* addr)
{
auto s2 = __cvta_generic_to_shared(reinterpret_cast<double2*>(addr));
asm volatile("ld.shared.v2.f64 {%0, %1}, [%2];" : "=d"(x[0]), "=d"(x[1]) : "l"(s2));
}
/** @} */
/**
* @defgroup GlobalLoads Global cached load operations
* @{
* @brief Load from global memory with caching at L1 level
* @param[out] x data to be loaded from global memory
* @param[in] addr address in global memory from where to load
*/
DI void ldg(float& x, const float* addr)
{
asm volatile("ld.global.cg.f32 %0, [%1];" : "=f"(x) : "l"(addr));
}
DI void ldg(float (&x)[1], const float* addr)
{
asm volatile("ld.global.cg.f32 %0, [%1];" : "=f"(x[0]) : "l"(addr));
}
DI void ldg(float (&x)[2], const float* addr)
{
asm volatile("ld.global.cg.v2.f32 {%0, %1}, [%2];" : "=f"(x[0]), "=f"(x[1]) : "l"(addr));
}
DI void ldg(float (&x)[4], const float* addr)
{
asm volatile("ld.global.cg.v4.f32 {%0, %1, %2, %3}, [%4];"
: "=f"(x[0]), "=f"(x[1]), "=f"(x[2]), "=f"(x[3])
: "l"(addr));
}
DI void ldg(double& x, const double* addr)
{
asm volatile("ld.global.cg.f64 %0, [%1];" : "=d"(x) : "l"(addr));
}
DI void ldg(double (&x)[1], const double* addr)
{
asm volatile("ld.global.cg.f64 %0, [%1];" : "=d"(x[0]) : "l"(addr));
}
DI void ldg(double (&x)[2], const double* addr)
{
asm volatile("ld.global.cg.v2.f64 {%0, %1}, [%2];" : "=d"(x[0]), "=d"(x[1]) : "l"(addr));
}
DI void ldg(uint32_t (&x)[4], const uint32_t* const& addr)
{
asm volatile("ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];"
: "=r"(x[0]), "=r"(x[1]), "=r"(x[2]), "=r"(x[3])
: "l"(addr));
}
DI void ldg(uint32_t (&x)[2], const uint32_t* const& addr)
{
asm volatile("ld.global.cg.v2.u32 {%0, %1}, [%2];" : "=r"(x[0]), "=r"(x[1]) : "l"(addr));
}
DI void ldg(uint32_t (&x)[1], const uint32_t* const& addr)
{
asm volatile("ld.global.cg.u32 %0, [%1];" : "=r"(x[0]) : "l"(addr));
}
DI void ldg(uint32_t& x, const uint32_t* const& addr)
{
asm volatile("ld.global.cg.u32 %0, [%1];" : "=r"(x) : "l"(addr));
}
DI void ldg(int32_t (&x)[4], const int32_t* const& addr)
{
asm volatile("ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];"
: "=r"(x[0]), "=r"(x[1]), "=r"(x[2]), "=r"(x[3])
: "l"(addr));
}
DI void ldg(int32_t (&x)[2], const int32_t* const& addr)
{
asm volatile("ld.global.cg.v2.u32 {%0, %1}, [%2];" : "=r"(x[0]), "=r"(x[1]) : "l"(addr));
}
DI void ldg(int32_t (&x)[1], const int32_t* const& addr)
{
asm volatile("ld.global.cg.u32 %0, [%1];" : "=r"(x[0]) : "l"(addr));
}
DI void ldg(int32_t& x, const int32_t* const& addr)
{
asm volatile("ld.global.cg.u32 %0, [%1];" : "=r"(x) : "l"(addr));
}
DI void ldg(uint8_t (&x)[4], const uint8_t* const& addr)
{
uint32_t x_int[4];
asm volatile("ld.global.cg.v4.u8 {%0, %1, %2, %3}, [%4];"
: "=r"(x_int[0]), "=r"(x_int[1]), "=r"(x_int[2]), "=r"(x_int[3])
: "l"(addr));
x[0] = x_int[0];
x[1] = x_int[1];
x[2] = x_int[2];
x[3] = x_int[3];
}
DI void ldg(uint8_t (&x)[2], const uint8_t* const& addr)
{
uint32_t x_int[2];
asm volatile("ld.global.cg.v2.u8 {%0, %1}, [%2];" : "=r"(x_int[0]), "=r"(x_int[1]) : "l"(addr));
x[0] = x_int[0];
x[1] = x_int[1];
}
DI void ldg(uint8_t (&x)[1], const uint8_t* const& addr)
{
uint32_t x_int;
asm volatile("ld.global.cg.u8 %0, [%1];" : "=r"(x_int) : "l"(addr));
x[0] = x_int;
}
DI void ldg(uint8_t& x, const uint8_t* const& addr)
{
uint32_t x_int;
asm volatile("ld.global.cg.u8 %0, [%1];" : "=r"(x_int) : "l"(addr));
x = x_int;
}
DI void ldg(int8_t (&x)[4], const int8_t* const& addr)
{
int x_int[4];
asm volatile("ld.global.cg.v4.s8 {%0, %1, %2, %3}, [%4];"
: "=r"(x_int[0]), "=r"(x_int[1]), "=r"(x_int[2]), "=r"(x_int[3])
: "l"(addr));
x[0] = x_int[0];
x[1] = x_int[1];
x[2] = x_int[2];
x[3] = x_int[3];
}
DI void ldg(int8_t (&x)[2], const int8_t* const& addr)
{
int x_int[2];
asm volatile("ld.global.cg.v2.s8 {%0, %1}, [%2];" : "=r"(x_int[0]), "=r"(x_int[1]) : "l"(addr));
x[0] = x_int[0];
x[1] = x_int[1];
}
DI void ldg(int8_t& x, const int8_t* const& addr)
{
int x_int;
asm volatile("ld.global.cg.s8 %0, [%1];" : "=r"(x_int) : "l"(addr));
x = x_int;
}
DI void ldg(int8_t (&x)[1], const int8_t* const& addr)
{
int x_int;
asm volatile("ld.global.cg.s8 %0, [%1];" : "=r"(x_int) : "l"(addr));
x[0] = x_int;
}
/**
* @brief Executes a 1D block strided copy
* @param dst destination pointer
* @param src source pointer
* @param size number of items to copy
*/
template <typename T>
DI void block_copy(T* dst, const T* src, const size_t size)
{
for (auto i = threadIdx.x; i < size; i += blockDim.x) {
dst[i] = src[i];
}
}
/**
* @brief Executes a 1D block strided copy
* @param dst span of destination pointer
* @param src span of source pointer
* @param size number of items to copy
*/
template <typename T>
DI void block_copy(raft::device_span<T> dst,
const raft::device_span<const T> src,
const size_t size)
{
assert(src.size() >= size);
assert(dst.size() >= size);
block_copy(dst.data(), src.data(), size);
}
/**
* @brief Executes a 1D block strided copy
* @param dst span of destination pointer
* @param src span of source pointer
* @param size number of items to copy
*/
template <typename T>
DI void block_copy(raft::device_span<T> dst, const raft::device_span<T> src, const size_t size)
{
assert(src.size() >= size);
assert(dst.size() >= size);
block_copy(dst.data(), src.data(), size);
}
/**
* @brief Executes a 1D block strided copy
* @param dst span of destination pointer
* @param src span of source pointer
*/
template <typename T>
DI void block_copy(raft::device_span<T> dst, const raft::device_span<T> src)
{
assert(dst.size() >= src.size());
block_copy(dst, src, src.size());
}
/** @} */
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/device_utils.cuh | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <utility> // pair
namespace raft {
// TODO move to raft https://github.com/rapidsai/raft/issues/90
/** helper method to get the compute capability version numbers */
inline std::pair<int, int> getDeviceCapability()
{
int devId;
RAFT_CUDA_TRY(cudaGetDevice(&devId));
int major, minor;
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, devId));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, devId));
return std::make_pair(major, minor);
}
/**
* @brief Batched warp-level sum reduction
*
* @tparam T data type
* @tparam NThreads Number of threads in the warp doing independent reductions
*
* @param[in] val input value
* @return for the first "group" of threads, the reduced value. All
* others will contain unusable values!
*
* @note Why not cub? Because cub doesn't seem to allow working with arbitrary
* number of warps in a block and also doesn't support this kind of
* batched reduction operation
* @note All threads in the warp must enter this function together
*
* @todo Expand this to support arbitrary reduction ops
*/
template <typename T, int NThreads>
DI T batchedWarpReduce(T val)
{
#pragma unroll
for (int i = NThreads; i < raft::WarpSize; i <<= 1) {
val += raft::shfl(val, raft::laneId() + i);
}
return val;
}
/**
* @brief 1-D block-level batched sum reduction
*
* @tparam T data type
* @tparam NThreads Number of threads in the warp doing independent reductions
*
* @param val input value
* @param smem shared memory region needed for storing intermediate results. It
* must alteast be of size: `sizeof(T) * nWarps * NThreads`
* @return for the first "group" of threads in the block, the reduced value.
* All others will contain unusable values!
*
* @note Why not cub? Because cub doesn't seem to allow working with arbitrary
* number of warps in a block and also doesn't support this kind of
* batched reduction operation
* @note All threads in the block must enter this function together
*
* @todo Expand this to support arbitrary reduction ops
*/
template <typename T, int NThreads>
DI T batchedBlockReduce(T val, char* smem)
{
auto* sTemp = reinterpret_cast<T*>(smem);
constexpr int nGroupsPerWarp = raft::WarpSize / NThreads;
static_assert(raft::isPo2(nGroupsPerWarp), "nGroupsPerWarp must be a PO2!");
const int nGroups = (blockDim.x + NThreads - 1) / NThreads;
const int lid = raft::laneId();
const int lgid = lid % NThreads;
const int gid = threadIdx.x / NThreads;
const auto wrIdx = (gid / nGroupsPerWarp) * NThreads + lgid;
const auto rdIdx = gid * NThreads + lgid;
for (int i = nGroups; i > 0;) {
auto iAligned = ((i + nGroupsPerWarp - 1) / nGroupsPerWarp) * nGroupsPerWarp;
if (gid < iAligned) {
val = batchedWarpReduce<T, NThreads>(val);
if (lid < NThreads) sTemp[wrIdx] = val;
}
__syncthreads();
i /= nGroupsPerWarp;
if (i > 0) { val = gid < i ? sTemp[rdIdx] : T(0); }
__syncthreads();
}
return val;
}
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/cutlass_utils.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cutlass/cutlass.h>
#include <raft/core/error.hpp>
namespace raft {
/**
* @brief Exception thrown when a CUTLASS error is encountered.
*/
struct cutlass_error : public raft::exception {
explicit cutlass_error(char const* const message) : raft::exception(message) {}
explicit cutlass_error(std::string const& message) : raft::exception(message) {}
};
} // namespace raft
/**
* @brief Error checking macro for CUTLASS functions.
*
* Invokes a CUTLASS function call, if the call does not return cutlass::Status::kSuccess,
* throws an exception detailing the CUTLASS error that occurred.
*
*/
#define RAFT_CUTLASS_TRY(call) \
do { \
cutlass::Status const status = call; \
if (status != cutlass::Status::kSuccess) { \
std::string msg{}; \
SET_ERROR_MSG(msg, \
"CUTLASS error encountered at: ", \
"call='%s', Reason=%s", \
#call, \
cutlassGetStatusString(status)); \
throw raft::cutlass_error(msg); \
} \
} while (0)
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/bitonic_sort.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/detail/macros.hpp>
#include <raft/util/cuda_utils.cuh>
namespace raft::util {
namespace {
template <typename T>
_RAFT_DEVICE _RAFT_FORCEINLINE void swap(T& x, T& y)
{
T t = x;
x = y;
y = t;
}
template <typename T>
_RAFT_DEVICE _RAFT_FORCEINLINE void conditional_assign(bool cond, T& ptr, T x)
{
if (cond) { ptr = x; }
}
} // namespace
/**
* Warp-wide bitonic merge and sort.
* The data is strided among `warp_width` threads,
* e.g. calling `bitonic<4>(ascending=true).sort(arr)` takes a unique 4-element array as input of
* each thread in a warp and sorts them, such that for a fixed i, arr[i] are sorted within the
* threads in a warp, and for any i < j, arr[j] in any thread is not smaller than arr[i] in any
* other thread.
* When `warp_width < WarpSize`, the data is sorted within all subwarps of the warp independently.
*
* As an example, assuming `Size = 4`, `warp_width = 16`, and `WarpSize = 32`, sorting a permutation
* of numbers 0-63 in each subwarp yield the following result:
* `
* arr_i \ laneId()
* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 ...
* subwarp_1 subwarp_2
* 0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 0 1 2 ...
* 1 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 16 17 18 ...
* 2 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 32 33 34 ...
* 3 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 48 49 50 ...
* `
*
* Here is a small usage example of device code, which sorts the arrays of length 8 (= 4 * 2)
* grouped in pairs of threads in ascending order:
* @code{.cpp}
* // Fill an array of four ints in each thread of a warp.
* int i = laneId();
* int arr[4] = {i+1, i+5, i, i+7};
* // Sort the arrays in groups of two threads.
* bitonic<4>(ascending=true, warp_width=2).sort(arr);
* // As a result,
* // for every even thread (`i == 2j`): arr == {2j, 2j+1, 2j+5, 2j+7}
* // for every odd thread (`i == 2j+1`): arr == {2j+1, 2j+2, 2j+6, 2j+8}
* @endcode
*
* @tparam Size
* number of elements processed in each thread;
* i.e. the total data size is `Size * warp_width`.
* Must be power-of-two.
*
*/
template <int Size = 1>
class bitonic {
static_assert(isPo2(Size));
public:
/**
* Initialize bitonic sort config.
*
* @param ascending
* the resulting order (true: ascending, false: descending).
* @param warp_width
* the number of threads participating in the warp-level primitives;
* the total size of the sorted data is `Size * warp_width`.
* Must be power-of-two, not larger than the WarpSize.
*/
_RAFT_DEVICE _RAFT_FORCEINLINE explicit bitonic(bool ascending, int warp_width = WarpSize)
: ascending_(ascending), warp_width_(warp_width)
{
}
bitonic(bitonic const&) = delete;
bitonic(bitonic&&) = delete;
auto operator=(bitonic const&) -> bitonic& = delete;
auto operator=(bitonic&&) -> bitonic& = delete;
/**
* You can think of this function in two ways:
*
* 1) Sort any bitonic sequence.
* 2) Merge two halves of the input data assuming they're already sorted, and their order is
* opposite (i.e. either ascending+descending or descending+ascending).
*
* The input pointers are unique per-thread.
* See the class description for the description of the data layout.
*
* @param keys
* is a device pointer to a contiguous array of keys, unique per thread; must be at least `Size`
* elements long.
* @param payloads
* are zero or more associated arrays of the same size as keys, which are sorted together with
* the keys; must be at least `Size` elements long.
*/
template <typename KeyT, typename... PayloadTs>
_RAFT_DEVICE _RAFT_FORCEINLINE void merge(KeyT* __restrict__ keys,
PayloadTs* __restrict__... payloads) const
{
return bitonic<Size>::merge_impl(ascending_, warp_width_, keys, payloads...);
}
/**
* Sort the data.
* The input pointers are unique per-thread.
* See the class description for the description of the data layout.
*
* @param keys
* is a device pointer to a contiguous array of keys, unique per thread; must be at least `Size`
* elements long.
* @param payloads
* are zero or more associated arrays of the same size as keys, which are sorted together with
* the keys; must be at least `Size` elements long.
*/
template <typename KeyT, typename... PayloadTs>
_RAFT_DEVICE _RAFT_FORCEINLINE void sort(KeyT* __restrict__ keys,
PayloadTs* __restrict__... payloads) const
{
return bitonic<Size>::sort_impl(ascending_, warp_width_, keys, payloads...);
}
/**
* @brief `merge` variant for the case of one element per thread
* (pass input by a reference instead of a pointer).
*
* @param key
* @param payload
*/
template <typename KeyT, typename... PayloadTs, int S = Size>
_RAFT_DEVICE _RAFT_FORCEINLINE auto merge(KeyT& __restrict__ key,
PayloadTs& __restrict__... payload) const
-> std::enable_if_t<S == 1, void> // SFINAE to enable this for Size == 1 only
{
static_assert(S == Size);
return merge(&key, &payload...);
}
/**
* @brief `sort` variant for the case of one element per thread
* (pass input by a reference instead of a pointer).
*
* @param key
* @param payload
*/
template <typename KeyT, typename... PayloadTs, int S = Size>
_RAFT_DEVICE _RAFT_FORCEINLINE auto sort(KeyT& __restrict__ key,
PayloadTs& __restrict__... payload) const
-> std::enable_if_t<S == 1, void> // SFINAE to enable this for Size == 1 only
{
static_assert(S == Size);
return sort(&key, &payload...);
}
private:
const int warp_width_;
const bool ascending_;
template <int AnotherSize>
friend class bitonic;
template <typename KeyT, typename... PayloadTs>
static _RAFT_DEVICE _RAFT_FORCEINLINE void merge_impl(bool ascending,
int warp_width,
KeyT* __restrict__ keys,
PayloadTs* __restrict__... payloads)
{
#pragma unroll
for (int size = Size; size > 1; size >>= 1) {
const int stride = size >> 1;
#pragma unroll
for (int offset = 0; offset < Size; offset += size) {
#pragma unroll
for (int i = offset + stride - 1; i >= offset; i--) {
const int other_i = i + stride;
KeyT& key = keys[i];
KeyT& other = keys[other_i];
if (ascending ? key > other : key < other) {
swap(key, other);
(swap(payloads[i], payloads[other_i]), ...);
}
}
}
}
const int lane = laneId();
#pragma unroll
for (int i = 0; i < Size; i++) {
KeyT& key = keys[i];
for (int stride = (warp_width >> 1); stride > 0; stride >>= 1) {
const bool is_second = lane & stride;
const KeyT other = shfl_xor(key, stride, warp_width);
const bool do_assign = (ascending != is_second) ? key > other : key < other;
conditional_assign(do_assign, key, other);
// NB: don't put shfl_xor in a conditional; it must be called by all threads in a warp.
(conditional_assign(do_assign, payloads[i], shfl_xor(payloads[i], stride, warp_width)),
...);
}
}
}
template <typename KeyT, typename... PayloadTs>
static _RAFT_DEVICE _RAFT_FORCEINLINE void sort_impl(bool ascending,
int warp_width,
KeyT* __restrict__ keys,
PayloadTs* __restrict__... payloads)
{
if constexpr (Size == 1) {
const int lane = laneId();
for (int width = 2; width < warp_width; width <<= 1) {
bitonic<1>::merge_impl(lane & width, width, keys, payloads...);
}
} else {
constexpr int kSize2 = Size / 2;
bitonic<kSize2>::sort_impl(false, warp_width, keys, payloads...);
bitonic<kSize2>::sort_impl(true, warp_width, keys + kSize2, (payloads + kSize2)...);
}
bitonic<Size>::merge_impl(ascending, warp_width, keys, payloads...);
}
};
} // namespace raft::util
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/scatter.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/detail/scatter.cuh>
namespace raft {
/**
* @brief Performs scatter operation based on the input indexing array
* @tparam DataT data type whose array gets scattered
* @tparam IdxT indexing type
* @tparam TPB threads-per-block in the final kernel launched
* @tparam Lambda the device-lambda performing a unary operation on the loaded
* data before it gets scattered
* @param out the output array
* @param in the input array
* @param idx the indexing array
* @param len number of elements in the input array
* @param stream cuda stream where to launch work
* @param op the device-lambda with signature `DataT func(DataT, IdxT);`. This
* will be applied to every element before scattering it to the right location.
* The second param in this method will be the destination index.
*/
template <typename DataT, typename IdxT, typename Lambda = raft::identity_op, int TPB = 256>
void scatter(DataT* out,
const DataT* in,
const IdxT* idx,
IdxT len,
cudaStream_t stream,
Lambda op = raft::identity_op())
{
if (len <= 0) return;
constexpr size_t DataSize = sizeof(DataT);
constexpr size_t IdxSize = sizeof(IdxT);
constexpr size_t MaxPerElem = DataSize > IdxSize ? DataSize : IdxSize;
size_t bytes = len * MaxPerElem;
if (16 / MaxPerElem && bytes % 16 == 0) {
detail::scatterImpl<DataT, 16 / MaxPerElem, Lambda, IdxT, TPB>(out, in, idx, len, op, stream);
} else if (8 / MaxPerElem && bytes % 8 == 0) {
detail::scatterImpl<DataT, 8 / MaxPerElem, Lambda, IdxT, TPB>(out, in, idx, len, op, stream);
} else if (4 / MaxPerElem && bytes % 4 == 0) {
detail::scatterImpl<DataT, 4 / MaxPerElem, Lambda, IdxT, TPB>(out, in, idx, len, op, stream);
} else if (2 / MaxPerElem && bytes % 2 == 0) {
detail::scatterImpl<DataT, 2 / MaxPerElem, Lambda, IdxT, TPB>(out, in, idx, len, op, stream);
} else if (1 / MaxPerElem) {
detail::scatterImpl<DataT, 1 / MaxPerElem, Lambda, IdxT, TPB>(out, in, idx, len, op, stream);
} else {
detail::scatterImpl<DataT, 1, Lambda, IdxT, TPB>(out, in, idx, len, op, stream);
}
}
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/vectorized.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_fp16.h>
#include <raft/util/cuda_utils.cuh>
namespace raft {
template <typename math_, int VecLen>
struct IOType {};
template <>
struct IOType<bool, 1> {
static_assert(sizeof(bool) == sizeof(int8_t), "IOType bool size assumption failed");
typedef int8_t Type;
};
template <>
struct IOType<bool, 2> {
typedef int16_t Type;
};
template <>
struct IOType<bool, 4> {
typedef int32_t Type;
};
template <>
struct IOType<bool, 8> {
typedef int2 Type;
};
template <>
struct IOType<bool, 16> {
typedef int4 Type;
};
template <>
struct IOType<int8_t, 1> {
typedef int8_t Type;
};
template <>
struct IOType<int8_t, 2> {
typedef int16_t Type;
};
template <>
struct IOType<int8_t, 4> {
typedef int32_t Type;
};
template <>
struct IOType<int8_t, 8> {
typedef int2 Type;
};
template <>
struct IOType<int8_t, 16> {
typedef int4 Type;
};
template <>
struct IOType<uint8_t, 1> {
typedef uint8_t Type;
};
template <>
struct IOType<uint8_t, 2> {
typedef uint16_t Type;
};
template <>
struct IOType<uint8_t, 4> {
typedef uint32_t Type;
};
template <>
struct IOType<uint8_t, 8> {
typedef uint2 Type;
};
template <>
struct IOType<uint8_t, 16> {
typedef uint4 Type;
};
template <>
struct IOType<int16_t, 1> {
typedef int16_t Type;
};
template <>
struct IOType<int16_t, 2> {
typedef int32_t Type;
};
template <>
struct IOType<int16_t, 4> {
typedef int2 Type;
};
template <>
struct IOType<int16_t, 8> {
typedef int4 Type;
};
template <>
struct IOType<uint16_t, 1> {
typedef uint16_t Type;
};
template <>
struct IOType<uint16_t, 2> {
typedef uint32_t Type;
};
template <>
struct IOType<uint16_t, 4> {
typedef uint2 Type;
};
template <>
struct IOType<uint16_t, 8> {
typedef uint4 Type;
};
template <>
struct IOType<__half, 1> {
typedef __half Type;
};
template <>
struct IOType<__half, 2> {
typedef __half2 Type;
};
template <>
struct IOType<__half, 4> {
typedef uint2 Type;
};
template <>
struct IOType<__half, 8> {
typedef uint4 Type;
};
template <>
struct IOType<__half2, 1> {
typedef __half2 Type;
};
template <>
struct IOType<__half2, 2> {
typedef uint2 Type;
};
template <>
struct IOType<__half2, 4> {
typedef uint4 Type;
};
template <>
struct IOType<int32_t, 1> {
typedef int32_t Type;
};
template <>
struct IOType<int32_t, 2> {
typedef uint2 Type;
};
template <>
struct IOType<int32_t, 4> {
typedef uint4 Type;
};
template <>
struct IOType<uint32_t, 1> {
typedef uint32_t Type;
};
template <>
struct IOType<uint32_t, 2> {
typedef uint2 Type;
};
template <>
struct IOType<uint32_t, 4> {
typedef uint4 Type;
};
template <>
struct IOType<float, 1> {
typedef float Type;
};
template <>
struct IOType<float, 2> {
typedef float2 Type;
};
template <>
struct IOType<float, 4> {
typedef float4 Type;
};
template <>
struct IOType<int64_t, 1> {
typedef int64_t Type;
};
template <>
struct IOType<int64_t, 2> {
typedef uint4 Type;
};
template <>
struct IOType<uint64_t, 1> {
typedef uint64_t Type;
};
template <>
struct IOType<uint64_t, 2> {
typedef uint4 Type;
};
template <>
struct IOType<unsigned long long, 1> {
typedef unsigned long long Type;
};
template <>
struct IOType<unsigned long long, 2> {
typedef uint4 Type;
};
template <>
struct IOType<double, 1> {
typedef double Type;
};
template <>
struct IOType<double, 2> {
typedef double2 Type;
};
/**
* @struct TxN_t
*
* @brief Internal data structure that is used to define a facade for vectorized
* loads/stores across the most common POD types. The goal of his file is to
* provide with CUDA programmers, an easy way to have compiler issue vectorized
* load or store instructions to memory (either global or shared). Vectorized
* accesses to memory are important as they'll utilize its resources
* efficiently,
* when compared to their non-vectorized counterparts. Obviously, for whatever
* reasons if one is unable to issue such vectorized operations, one can always
* fallback to using POD types.
*
* Concept of vectorized accesses : Threads process multiple elements
* to speed up processing. These are loaded in a single read thanks
* to type promotion. It is then reinterpreted as a vector elements
* to perform the kernel's work.
*
* Caution : vectorized accesses requires input addresses to be memory aligned
* according not to the input type but to the promoted type used for reading.
*
* Example demonstrating the use of load operations, performing math on such
* loaded data and finally storing it back.
* @code{.cu}
* TxN_t<uint8_t,8> mydata1, mydata2;
* int idx = (threadIdx.x + (blockIdx.x * blockDim.x)) * mydata1.Ratio;
* mydata1.load(ptr1, idx);
* mydata2.load(ptr2, idx);
* #pragma unroll
* for(int i=0;i<mydata1.Ratio;++i) {
* mydata1.val.data[i] += mydata2.val.data[i];
* }
* mydata1.store(ptr1, idx);
* @endcode
*
* By doing as above, the interesting thing is that the code effectively remains
* almost the same, in case one wants to upgrade to TxN_t<uint16_t,16> type.
* Only change required is to replace variable declaration appropriately.
*
* Obviously, it's caller's responsibility to take care of pointer alignment!
*
* @tparam math_ the data-type in which the compute/math needs to happen
* @tparam veclen_ the number of 'math_' types to be loaded/stored per
* instruction
*/
template <typename math_, int veclen_>
struct TxN_t {
/** underlying math data type */
typedef math_ math_t;
/** internal storage data type */
typedef typename IOType<math_t, veclen_>::Type io_t;
/** defines the number of 'math_t' types stored by this struct */
static const int Ratio = veclen_;
struct alignas(io_t) {
/** the vectorized data that is used for subsequent operations */
math_t data[Ratio];
} val;
__device__ auto* vectorized_data() { return reinterpret_cast<io_t*>(val.data); }
///@todo: add default constructor
/**
* @brief Fill the contents of this structure with a constant value
* @param _val the constant to be filled
*/
DI void fill(math_t _val)
{
#pragma unroll
for (int i = 0; i < Ratio; ++i) {
val.data[i] = _val;
}
}
///@todo: how to handle out-of-bounds!!?
/**
* @defgroup LoadsStores Global/Shared vectored loads or stores
*
* @brief Perform vectored loads/stores on this structure
* @tparam idx_t index data type
* @param ptr base pointer from where to load (or store) the data. It must
* be aligned to 'sizeof(io_t)'!
* @param idx the offset from the base pointer which will be loaded
* (or stored) by the current thread. This must be aligned to 'Ratio'!
*
* @note: In case of loads, after a successful execution, the val.data will
* be populated with the desired data loaded from the pointer location. In
* case of stores, the data in the val.data will be stored to that location.
* @{
*/
template <typename idx_t = int>
DI void load(const math_t* ptr, idx_t idx)
{
const io_t* bptr = reinterpret_cast<const io_t*>(&ptr[idx]);
*vectorized_data() = __ldg(bptr);
}
template <typename idx_t = int>
DI void load(math_t* ptr, idx_t idx)
{
io_t* bptr = reinterpret_cast<io_t*>(&ptr[idx]);
*vectorized_data() = *bptr;
}
template <typename idx_t = int>
DI void store(math_t* ptr, idx_t idx)
{
io_t* bptr = reinterpret_cast<io_t*>(&ptr[idx]);
*bptr = *vectorized_data();
}
/** @} */
};
/** this is just to keep the compiler happy! */
template <typename math_>
struct TxN_t<math_, 0> {
typedef math_ math_t;
static const int Ratio = 1;
struct {
math_t data[1];
} val;
DI void fill(math_t _val) {}
template <typename idx_t = int>
DI void load(const math_t* ptr, idx_t idx)
{
}
template <typename idx_t = int>
DI void load(math_t* ptr, idx_t idx)
{
}
template <typename idx_t = int>
DI void store(math_t* ptr, idx_t idx)
{
}
};
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/memory_pool-inl.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef>
#include <memory>
#include <raft/core/detail/macros.hpp> // RAFT_INLINE_CONDITIONAL
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
namespace raft {
/**
* @defgroup memory_pool Memory Pool
* @{
*/
/**
* @brief Get a pointer to a pooled memory resource within the scope of the lifetime of the returned
* unique pointer.
*
* This function is useful in the code where multiple repeated allocations/deallocations are
* expected.
* Use case example:
* @code{.cpp}
* void my_func(..., size_t n, rmm::mr::device_memory_resource* mr = nullptr) {
* auto pool_guard = raft::get_pool_memory_resource(mr, 2 * n * sizeof(float));
* if (pool_guard){
* RAFT_LOG_INFO("Created a pool");
* } else {
* RAFT_LOG_INFO("Using the current default or explicitly passed device memory resource");
* }
* rmm::device_uvector<float> x(n, stream, mr);
* rmm::device_uvector<float> y(n, stream, mr);
* ...
* }
* @endcode
* Here, the new memory resource would be created within the function scope if the passed `mr` is
* null and the default resource is not a pool. After the call, `mr` contains a valid memory
* resource in any case.
*
* @param[inout] mr if not null do nothing; otherwise get the current device resource and wrap it
* into a `pool_memory_resource` if necessary and return the pointer to the result.
* @param initial_size if a new memory pool is created, this would be its initial size (rounded up
* to 256 bytes).
*
* @return if a new memory pool is created, it returns a unique_ptr to it;
* this managed pointer controls the lifetime of the created memory resource.
*/
RAFT_INLINE_CONDITIONAL std::unique_ptr<rmm::mr::device_memory_resource> get_pool_memory_resource(
rmm::mr::device_memory_resource*& mr, size_t initial_size)
{
using pool_res_t = rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>;
std::unique_ptr<pool_res_t> pool_res{};
if (mr) return pool_res;
mr = rmm::mr::get_current_device_resource();
if (!dynamic_cast<pool_res_t*>(mr) &&
!dynamic_cast<rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>*>(mr) &&
!dynamic_cast<rmm::mr::pool_memory_resource<rmm::mr::managed_memory_resource>*>(mr)) {
pool_res = std::make_unique<pool_res_t>(mr, (initial_size + 255) & (~255));
mr = pool_res.get();
}
return pool_res;
}
/** @} */
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/raft_explicit.hpp | /* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* @brief Prevents a function template from being implicitly instantiated
*
* This macro defines a function body that can be used for function template
* definitions of functions that should not be implicitly instantiated.
*
* When the template is erroneously implicitly instantiated, it provides a
* useful error message that tells the user how to avoid the implicit
* instantiation.
*
* The error message is generated using a static assert. It is generally tricky
* to have a static assert fire only when you want it, as documented in
* P2593: https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p2593r0.html
*
* We use the strategy from paragraph 1.3 here. We define a struct
* `not_allowed`, whose type is dependent on the template parameters of the
* enclosing function instance. We use this struct type to instantiate the
* `implicit_instantiation` template class, whose value is always false. We pass
* this value to static_assert. This way, the static assert only fires when the
* template is instantiated, since `implicit_instantiation` cannot be
* instantiated without all the types in the enclosing function template.
*/
#define RAFT_EXPLICIT \
{ \
/* Type of `not_allowed` depends on template parameters of enclosing function. */ \
struct not_allowed {}; \
static_assert( \
raft::util::raft_explicit::implicit_instantiation<not_allowed>::value, \
"ACCIDENTAL_IMPLICIT_INSTANTIATION\n\n" \
\
"If you see this error, then you have implicitly instantiated a function\n" \
"template. To keep compile times in check, libraft has the policy of\n" \
"explicitly instantiating templates. To fix the compilation error, follow\n" \
"these steps.\n\n" \
\
"If you scroll up or down a bit, you probably saw a line like the following:\n\n" \
\
"detected during instantiation of \"void raft::foo(T) [with T=float]\" at line [..]\n\n" \
\
"Simplest temporary solution:\n\n" \
\
" Add '#undef RAFT_EXPLICIT_INSTANTIATE_ONLY' at the top of your .cpp/.cu file.\n\n" \
\
"Best solution:\n\n" \
\
" 1. Add the following line to the file include/raft/foo.hpp:\n\n" \
\
" extern template void raft::foo<double>(double);\n\n" \
\
" 2. Add the following line to the file src/raft/foo.cpp:\n\n" \
\
" template void raft::foo<double>(double)\n"); \
\
/* Function may have non-void return type. */ \
/* To prevent warnings/errors about missing returns, throw an exception. */ \
throw "raft_explicit_error"; \
}
namespace raft::util::raft_explicit {
/**
* @brief Template that is always false
*
* This template is from paragraph 1.3 of P2593:
* https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p2593r0.html
*
* The value of `value` is always false, but it depends on a template parameter.
*/
template <typename T>
struct implicit_instantiation {
static constexpr bool value = false;
};
} // namespace raft::util::raft_explicit
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/warp_primitives.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdint.h>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/operators.hpp>
#include <raft/util/cuda_dev_essentials.cuh>
namespace raft {
/** True CUDA alignment of a type (adapted from CUB) */
template <typename T>
struct cuda_alignment {
struct Pad {
T val;
char byte;
};
static constexpr int bytes = sizeof(Pad) - sizeof(T);
};
template <typename LargeT, typename UnitT>
struct is_multiple {
static constexpr int large_align_bytes = cuda_alignment<LargeT>::bytes;
static constexpr int unit_align_bytes = cuda_alignment<UnitT>::bytes;
static constexpr bool value =
(sizeof(LargeT) % sizeof(UnitT) == 0) && (large_align_bytes % unit_align_bytes == 0);
};
template <typename LargeT, typename UnitT>
inline constexpr bool is_multiple_v = is_multiple<LargeT, UnitT>::value;
/** apply a warp-wide fence (useful from Volta+ archs) */
DI void warpFence()
{
#if __CUDA_ARCH__ >= 700
__syncwarp();
#endif
}
/** warp-wide any boolean aggregator */
DI bool any(bool inFlag, uint32_t mask = 0xffffffffu)
{
#if CUDART_VERSION >= 9000
inFlag = __any_sync(mask, inFlag);
#else
inFlag = __any(inFlag);
#endif
return inFlag;
}
/** warp-wide all boolean aggregator */
DI bool all(bool inFlag, uint32_t mask = 0xffffffffu)
{
#if CUDART_VERSION >= 9000
inFlag = __all_sync(mask, inFlag);
#else
inFlag = __all(inFlag);
#endif
return inFlag;
}
/** For every thread in the warp, set the corresponding bit to the thread's flag value. */
DI uint32_t ballot(bool inFlag, uint32_t mask = 0xffffffffu)
{
#if CUDART_VERSION >= 9000
return __ballot_sync(mask, inFlag);
#else
return __ballot(inFlag);
#endif
}
template <typename T>
struct is_shuffleable {
static constexpr bool value =
std::is_same_v<T, int> || std::is_same_v<T, unsigned int> || std::is_same_v<T, long> ||
std::is_same_v<T, unsigned long> || std::is_same_v<T, long long> ||
std::is_same_v<T, unsigned long long> || std::is_same_v<T, float> || std::is_same_v<T, double>;
};
template <typename T>
inline constexpr bool is_shuffleable_v = is_shuffleable<T>::value;
/**
* @brief Shuffle the data inside a warp
* @tparam T the data type
* @param val value to be shuffled
* @param srcLane lane from where to shuffle
* @param width lane width
* @param mask mask of participating threads (Volta+)
* @return the shuffled data
*/
template <typename T>
DI std::enable_if_t<is_shuffleable_v<T>, T> shfl(T val,
int srcLane,
int width = WarpSize,
uint32_t mask = 0xffffffffu)
{
#if CUDART_VERSION >= 9000
return __shfl_sync(mask, val, srcLane, width);
#else
return __shfl(val, srcLane, width);
#endif
}
/// Overload of shfl for data types not supported by the CUDA intrinsics
template <typename T>
DI std::enable_if_t<!is_shuffleable_v<T>, T> shfl(T val,
int srcLane,
int width = WarpSize,
uint32_t mask = 0xffffffffu)
{
using UnitT =
std::conditional_t<is_multiple_v<T, int>,
unsigned int,
std::conditional_t<is_multiple_v<T, short>, unsigned short, unsigned char>>;
constexpr int n_words = sizeof(T) / sizeof(UnitT);
T output;
UnitT* output_alias = reinterpret_cast<UnitT*>(&output);
UnitT* input_alias = reinterpret_cast<UnitT*>(&val);
unsigned int shuffle_word;
shuffle_word = shfl((unsigned int)input_alias[0], srcLane, width, mask);
output_alias[0] = shuffle_word;
#pragma unroll
for (int i = 1; i < n_words; ++i) {
shuffle_word = shfl((unsigned int)input_alias[i], srcLane, width, mask);
output_alias[i] = shuffle_word;
}
return output;
}
/**
* @brief Shuffle the data inside a warp from lower lane IDs
* @tparam T the data type
* @param val value to be shuffled
* @param delta lower lane ID delta from where to shuffle
* @param width lane width
* @param mask mask of participating threads (Volta+)
* @return the shuffled data
*/
template <typename T>
DI std::enable_if_t<is_shuffleable_v<T>, T> shfl_up(T val,
int delta,
int width = WarpSize,
uint32_t mask = 0xffffffffu)
{
#if CUDART_VERSION >= 9000
return __shfl_up_sync(mask, val, delta, width);
#else
return __shfl_up(val, delta, width);
#endif
}
/// Overload of shfl_up for data types not supported by the CUDA intrinsics
template <typename T>
DI std::enable_if_t<!is_shuffleable_v<T>, T> shfl_up(T val,
int delta,
int width = WarpSize,
uint32_t mask = 0xffffffffu)
{
using UnitT =
std::conditional_t<is_multiple_v<T, int>,
unsigned int,
std::conditional_t<is_multiple_v<T, short>, unsigned short, unsigned char>>;
constexpr int n_words = sizeof(T) / sizeof(UnitT);
T output;
UnitT* output_alias = reinterpret_cast<UnitT*>(&output);
UnitT* input_alias = reinterpret_cast<UnitT*>(&val);
unsigned int shuffle_word;
shuffle_word = shfl_up((unsigned int)input_alias[0], delta, width, mask);
output_alias[0] = shuffle_word;
#pragma unroll
for (int i = 1; i < n_words; ++i) {
shuffle_word = shfl_up((unsigned int)input_alias[i], delta, width, mask);
output_alias[i] = shuffle_word;
}
return output;
}
/**
* @brief Shuffle the data inside a warp
* @tparam T the data type
* @param val value to be shuffled
* @param laneMask mask to be applied in order to perform xor shuffle
* @param width lane width
* @param mask mask of participating threads (Volta+)
* @return the shuffled data
*/
template <typename T>
DI std::enable_if_t<is_shuffleable_v<T>, T> shfl_xor(T val,
int laneMask,
int width = WarpSize,
uint32_t mask = 0xffffffffu)
{
#if CUDART_VERSION >= 9000
return __shfl_xor_sync(mask, val, laneMask, width);
#else
return __shfl_xor(val, laneMask, width);
#endif
}
/// Overload of shfl_xor for data types not supported by the CUDA intrinsics
template <typename T>
DI std::enable_if_t<!is_shuffleable_v<T>, T> shfl_xor(T val,
int laneMask,
int width = WarpSize,
uint32_t mask = 0xffffffffu)
{
using UnitT =
std::conditional_t<is_multiple_v<T, int>,
unsigned int,
std::conditional_t<is_multiple_v<T, short>, unsigned short, unsigned char>>;
constexpr int n_words = sizeof(T) / sizeof(UnitT);
T output;
UnitT* output_alias = reinterpret_cast<UnitT*>(&output);
UnitT* input_alias = reinterpret_cast<UnitT*>(&val);
unsigned int shuffle_word;
shuffle_word = shfl_xor((unsigned int)input_alias[0], laneMask, width, mask);
output_alias[0] = shuffle_word;
#pragma unroll
for (int i = 1; i < n_words; ++i) {
shuffle_word = shfl_xor((unsigned int)input_alias[i], laneMask, width, mask);
output_alias[i] = shuffle_word;
}
return output;
}
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/cudart_utils.hpp | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/error.hpp>
#include <raft/util/cuda_rt_essentials.hpp>
#include <raft/util/memory_pool.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <chrono>
#include <cstdio>
#include <execinfo.h>
#include <iomanip>
#include <iostream>
#include <memory>
#include <mutex>
#include <string>
namespace raft {
/** Helper method to get to know warp size in device code */
__host__ __device__ constexpr inline int warp_size() { return 32; }
__host__ __device__ constexpr inline unsigned int warp_full_mask() { return 0xffffffff; }
/**
* @brief A kernel grid configuration construction gadget for simple one-dimensional mapping
* elements to threads.
*/
class grid_1d_thread_t {
public:
int const block_size{0};
int const num_blocks{0};
/**
* @param overall_num_elements The number of elements the kernel needs to handle/process
* @param num_threads_per_block The grid block size, determined according to the kernel's
* specific features (amount of shared memory necessary, SM functional units use pattern etc.);
* this can't be determined generically/automatically (as opposed to the number of blocks)
* @param max_num_blocks_1d maximum number of blocks in 1d grid
* @param elements_per_thread Typically, a single kernel thread processes more than a single
* element; this affects the number of threads the grid must contain
*/
grid_1d_thread_t(size_t overall_num_elements,
size_t num_threads_per_block,
size_t max_num_blocks_1d,
size_t elements_per_thread = 1)
: block_size(num_threads_per_block),
num_blocks(
std::min((overall_num_elements + (elements_per_thread * num_threads_per_block) - 1) /
(elements_per_thread * num_threads_per_block),
max_num_blocks_1d))
{
RAFT_EXPECTS(overall_num_elements > 0, "overall_num_elements must be > 0");
RAFT_EXPECTS(num_threads_per_block / warp_size() > 0,
"num_threads_per_block / warp_size() must be > 0");
RAFT_EXPECTS(elements_per_thread > 0, "elements_per_thread must be > 0");
}
};
/**
* @brief A kernel grid configuration construction gadget for simple one-dimensional mapping
* elements to warps.
*/
class grid_1d_warp_t {
public:
int const block_size{0};
int const num_blocks{0};
/**
* @param overall_num_elements The number of elements the kernel needs to handle/process
* @param num_threads_per_block The grid block size, determined according to the kernel's
* specific features (amount of shared memory necessary, SM functional units use pattern etc.);
* this can't be determined generically/automatically (as opposed to the number of blocks)
* @param max_num_blocks_1d maximum number of blocks in 1d grid
*/
grid_1d_warp_t(size_t overall_num_elements,
size_t num_threads_per_block,
size_t max_num_blocks_1d)
: block_size(num_threads_per_block),
num_blocks(std::min((overall_num_elements + (num_threads_per_block / warp_size()) - 1) /
(num_threads_per_block / warp_size()),
max_num_blocks_1d))
{
RAFT_EXPECTS(overall_num_elements > 0, "overall_num_elements must be > 0");
RAFT_EXPECTS(num_threads_per_block / warp_size() > 0,
"num_threads_per_block / warp_size() must be > 0");
}
};
/**
* @brief A kernel grid configuration construction gadget for simple one-dimensional mapping
* elements to blocks.
*/
class grid_1d_block_t {
public:
int const block_size{0};
int const num_blocks{0};
/**
* @param overall_num_elements The number of elements the kernel needs to handle/process
* @param num_threads_per_block The grid block size, determined according to the kernel's
* specific features (amount of shared memory necessary, SM functional units use pattern etc.);
* this can't be determined generically/automatically (as opposed to the number of blocks)
* @param max_num_blocks_1d maximum number of blocks in 1d grid
*/
grid_1d_block_t(size_t overall_num_elements,
size_t num_threads_per_block,
size_t max_num_blocks_1d)
: block_size(num_threads_per_block),
num_blocks(std::min(overall_num_elements, max_num_blocks_1d))
{
RAFT_EXPECTS(overall_num_elements > 0, "overall_num_elements must be > 0");
RAFT_EXPECTS(num_threads_per_block / warp_size() > 0,
"num_threads_per_block / warp_size() must be > 0");
}
};
/**
* @brief Generic copy method for all kinds of transfers
* @tparam Type data type
* @param dst destination pointer
* @param src source pointer
* @param len length of the src/dst buffers in terms of number of elements
* @param stream cuda stream
*/
template <typename Type>
void copy(Type* dst, const Type* src, size_t len, rmm::cuda_stream_view stream)
{
RAFT_CUDA_TRY(cudaMemcpyAsync(dst, src, len * sizeof(Type), cudaMemcpyDefault, stream));
}
/**
* @defgroup Copy Copy methods
* These are here along with the generic 'copy' method in order to improve
* code readability using explicitly specified function names
* @{
*/
/** performs a host to device copy */
template <typename Type>
void update_device(Type* d_ptr, const Type* h_ptr, size_t len, rmm::cuda_stream_view stream)
{
copy(d_ptr, h_ptr, len, stream);
}
/** performs a device to host copy */
template <typename Type>
void update_host(Type* h_ptr, const Type* d_ptr, size_t len, rmm::cuda_stream_view stream)
{
copy(h_ptr, d_ptr, len, stream);
}
template <typename Type>
void copy_async(Type* d_ptr1, const Type* d_ptr2, size_t len, rmm::cuda_stream_view stream)
{
RAFT_CUDA_TRY(
cudaMemcpyAsync(d_ptr1, d_ptr2, len * sizeof(Type), cudaMemcpyDeviceToDevice, stream));
}
/** @} */
/**
* @defgroup Debug Utils for debugging host/device buffers
* @{
*/
template <class T, class OutStream>
void print_host_vector(const char* variable_name,
const T* host_mem,
size_t componentsCount,
OutStream& out)
{
out << variable_name << "=[";
for (size_t i = 0; i < componentsCount; ++i) {
if (i != 0) out << ",";
out << host_mem[i];
}
out << "];" << std::endl;
}
template <class T, class OutStream>
void print_device_vector(const char* variable_name,
const T* devMem,
size_t componentsCount,
OutStream& out)
{
auto host_mem = std::make_unique<T[]>(componentsCount);
RAFT_CUDA_TRY(
cudaMemcpy(host_mem.get(), devMem, componentsCount * sizeof(T), cudaMemcpyDeviceToHost));
print_host_vector(variable_name, host_mem.get(), componentsCount, out);
}
/**
* @brief Print an array given a device or a host pointer.
*
* @param[in] variable_name
* @param[in] ptr any pointer (device/host/managed, etc)
* @param[in] componentsCount array length
* @param out the output stream
*/
template <class T, class OutStream>
void print_vector(const char* variable_name, const T* ptr, size_t componentsCount, OutStream& out)
{
cudaPointerAttributes attr;
RAFT_CUDA_TRY(cudaPointerGetAttributes(&attr, ptr));
if (attr.hostPointer != nullptr) {
print_host_vector(variable_name, reinterpret_cast<T*>(attr.hostPointer), componentsCount, out);
} else if (attr.type == cudaMemoryTypeUnregistered) {
print_host_vector(variable_name, ptr, componentsCount, out);
} else {
print_device_vector(variable_name, ptr, componentsCount, out);
}
}
/** @} */
/**
* Returns the id of the device for which the pointer is located
* @param p pointer to check
* @return id of device for which pointer is located, otherwise -1.
*/
template <typename T>
int get_device_for_address(const T* p)
{
if (!p) { return -1; }
cudaPointerAttributes att;
cudaError_t err = cudaPointerGetAttributes(&att, p);
if (err == cudaErrorInvalidValue) {
// Make sure the current thread error status has been reset
err = cudaGetLastError();
return -1;
}
// memoryType is deprecated for CUDA 10.0+
if (att.type == cudaMemoryTypeDevice) {
return att.device;
} else {
return -1;
}
}
/** helper method to get max usable shared mem per block parameter */
inline int getSharedMemPerBlock()
{
int devId;
RAFT_CUDA_TRY(cudaGetDevice(&devId));
int smemPerBlk;
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&smemPerBlk, cudaDevAttrMaxSharedMemoryPerBlock, devId));
return smemPerBlk;
}
/** helper method to get multi-processor count parameter */
inline int getMultiProcessorCount()
{
int devId;
RAFT_CUDA_TRY(cudaGetDevice(&devId));
int mpCount;
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&mpCount, cudaDevAttrMultiProcessorCount, devId));
return mpCount;
}
/** helper method to get major minor compute capability version */
inline std::pair<int, int> getComputeCapability()
{
int devId;
RAFT_CUDA_TRY(cudaGetDevice(&devId));
int majorVer, minorVer;
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&majorVer, cudaDevAttrComputeCapabilityMajor, devId));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&minorVer, cudaDevAttrComputeCapabilityMinor, devId));
return std::make_pair(majorVer, minorVer);
}
/** helper method to convert an array on device to a string on host */
template <typename T>
std::string arr2Str(const T* arr, int size, std::string name, cudaStream_t stream, int width = 4)
{
std::stringstream ss;
T* arr_h = (T*)malloc(size * sizeof(T));
update_host(arr_h, arr, size, stream);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
ss << name << " = [ ";
for (int i = 0; i < size; i++) {
typedef
typename std::conditional_t<std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>, int, T>
CastT;
auto val = static_cast<CastT>(arr_h[i]);
ss << std::setw(width) << val;
if (i < size - 1) ss << ", ";
}
ss << " ]" << std::endl;
free(arr_h);
return ss.str();
}
/** this seems to be unused, but may be useful in the future */
template <typename T>
void ASSERT_DEVICE_MEM(T* ptr, std::string name)
{
cudaPointerAttributes s_att;
cudaError_t s_err = cudaPointerGetAttributes(&s_att, ptr);
if (s_err != 0 || s_att.device == -1)
std::cout << "Invalid device pointer encountered in " << name << ". device=" << s_att.device
<< ", err=" << s_err << std::endl;
}
inline uint32_t curTimeMillis()
{
auto now = std::chrono::high_resolution_clock::now();
auto duration = now.time_since_epoch();
return std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
}
/** Helper function to calculate need memory for allocate to store dense matrix.
* @param rows number of rows in matrix
* @param columns number of columns in matrix
* @return need number of items to allocate via allocate()
* @sa allocate()
*/
inline size_t allocLengthForMatrix(size_t rows, size_t columns) { return rows * columns; }
/** Helper function to check alignment of pointer.
* @param ptr the pointer to check
* @param alignment to be checked for
* @return true if address in bytes is a multiple of alignment
*/
template <typename Type>
bool is_aligned(Type* ptr, size_t alignment)
{
return reinterpret_cast<uintptr_t>(ptr) % alignment == 0;
}
/** calculate greatest common divisor of two numbers
* @a integer
* @b integer
* @ return gcd of a and b
*/
template <typename IntType>
constexpr IntType gcd(IntType a, IntType b)
{
while (b != 0) {
IntType tmp = b;
b = a % b;
a = tmp;
}
return a;
}
template <typename T>
constexpr T lower_bound()
{
if constexpr (std::numeric_limits<T>::has_infinity && std::numeric_limits<T>::is_signed) {
return -std::numeric_limits<T>::infinity();
}
return std::numeric_limits<T>::lowest();
}
template <typename T>
constexpr T upper_bound()
{
if constexpr (std::numeric_limits<T>::has_infinity) { return std::numeric_limits<T>::infinity(); }
return std::numeric_limits<T>::max();
}
namespace { // NOLINT
/**
* This is a hack to allow constexpr definition of `half` constants.
*
* Neither union-based nor reinterpret_cast-based type punning is possible within
* constexpr; at the same time, all non-default constructors of `half` data type are not constexpr
* as well.
*
* Based on the implementation details in `cuda_fp16.hpp`, we define here a new constructor for
* `half` data type, that is a proper constexpr.
*
* When we switch to C++20, perhaps we can use `bit_cast` for the same purpose.
*/
struct __half_constexpr : __half { // NOLINT
constexpr explicit inline __half_constexpr(uint16_t u) : __half() { __x = u; }
};
} // namespace
template <>
constexpr inline auto lower_bound<half>() -> half
{
return static_cast<half>(__half_constexpr{0xfc00u});
}
template <>
constexpr inline auto upper_bound<half>() -> half
{
return static_cast<half>(__half_constexpr{0x7c00u});
}
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/cuda_rt_essentials.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// This file provides a few essential functions that wrap the CUDA runtime API.
// The scope is necessarily limited to ensure that compilation times are
// minimized. Please make sure not to include large / expensive files from here.
#include <cuda_runtime.h>
#include <raft/core/error.hpp>
#include <cstdio>
namespace raft {
/**
* @brief Exception thrown when a CUDA error is encountered.
*/
struct cuda_error : public raft::exception {
explicit cuda_error(char const* const message) : raft::exception(message) {}
explicit cuda_error(std::string const& message) : raft::exception(message) {}
};
} // namespace raft
/**
* @brief Error checking macro for CUDA runtime API functions.
*
* Invokes a CUDA runtime API function call, if the call does not return
* cudaSuccess, invokes cudaGetLastError() to clear the error and throws an
* exception detailing the CUDA error that occurred
*
*/
#define RAFT_CUDA_TRY(call) \
do { \
cudaError_t const status = call; \
if (status != cudaSuccess) { \
cudaGetLastError(); \
std::string msg{}; \
SET_ERROR_MSG(msg, \
"CUDA error encountered at: ", \
"call='%s', Reason=%s:%s", \
#call, \
cudaGetErrorName(status), \
cudaGetErrorString(status)); \
throw raft::cuda_error(msg); \
} \
} while (0)
/**
* @brief Debug macro to check for CUDA errors
*
* In a non-release build, this macro will synchronize the specified stream
* before error checking. In both release and non-release builds, this macro
* checks for any pending CUDA errors from previous calls. If an error is
* reported, an exception is thrown detailing the CUDA error that occurred.
*
* The intent of this macro is to provide a mechanism for synchronous and
* deterministic execution for debugging asynchronous CUDA execution. It should
* be used after any asynchronous CUDA call, e.g., cudaMemcpyAsync, or an
* asynchronous kernel launch.
*/
#ifndef NDEBUG
#define RAFT_CHECK_CUDA(stream) RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
#else
#define RAFT_CHECK_CUDA(stream) RAFT_CUDA_TRY(cudaPeekAtLastError());
#endif
// /**
// * @brief check for cuda runtime API errors but log error instead of raising
// * exception.
// */
#define RAFT_CUDA_TRY_NO_THROW(call) \
do { \
cudaError_t const status = call; \
if (cudaSuccess != status) { \
printf("CUDA call='%s' at file=%s line=%d failed with %s\n", \
#call, \
__FILE__, \
__LINE__, \
cudaGetErrorString(status)); \
} \
} while (0)
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/reduction.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdint.h>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/operators.hpp>
#include <raft/util/cuda_dev_essentials.cuh>
#include <raft/util/warp_primitives.cuh>
namespace raft {
/**
* @brief Logical-warp-level reduction
* @tparam logicalWarpSize Logical warp size (2, 4, 8, 16 or 32)
* @tparam T Value type to be reduced
* @tparam ReduceLambda Reduction operation type
* @param val input value
* @param reduce_op Reduction operation
* @return Reduction result. All lanes will have the valid result.
*/
template <int logicalWarpSize, typename T, typename ReduceLambda>
DI T logicalWarpReduce(T val, ReduceLambda reduce_op)
{
#pragma unroll
for (int i = logicalWarpSize / 2; i > 0; i >>= 1) {
T tmp = shfl_xor(val, i);
val = reduce_op(val, tmp);
}
return val;
}
/**
* @brief Warp-level reduction
* @tparam T Value type to be reduced
* @tparam ReduceLambda Reduction operation type
* @param val input value
* @param reduce_op Reduction operation
* @return Reduction result. All lanes will have the valid result.
* @note Why not cub? Because cub doesn't seem to allow working with arbitrary
* number of warps in a block. All threads in the warp must enter this
* function together
*/
template <typename T, typename ReduceLambda>
DI T warpReduce(T val, ReduceLambda reduce_op)
{
return logicalWarpReduce<WarpSize>(val, reduce_op);
}
/**
* @brief Warp-level reduction
* @tparam T Value type to be reduced
* @param val input value
* @return Reduction result. All lanes will have the valid result.
* @note Why not cub? Because cub doesn't seem to allow working with arbitrary
* number of warps in a block. All threads in the warp must enter this
* function together
*/
template <typename T>
DI T warpReduce(T val)
{
return warpReduce(val, raft::add_op{});
}
/**
* @brief 1-D block-level reduction
* @param val input value
* @param smem shared memory region needed for storing intermediate results. It
* must alteast be of size: `sizeof(T) * nWarps`
* @param reduce_op a binary reduction operation.
* @return only the thread0 will contain valid reduced result
* @note Why not cub? Because cub doesn't seem to allow working with arbitrary
* number of warps in a block. All threads in the block must enter this
* function together. cub also uses too many registers
*/
template <typename T, typename ReduceLambda = raft::add_op>
DI T blockReduce(T val, char* smem, ReduceLambda reduce_op = raft::add_op{})
{
auto* sTemp = reinterpret_cast<T*>(smem);
int nWarps = (blockDim.x + WarpSize - 1) / WarpSize;
int lid = laneId();
int wid = threadIdx.x / WarpSize;
val = warpReduce(val, reduce_op);
if (lid == 0) sTemp[wid] = val;
__syncthreads();
val = lid < nWarps ? sTemp[lid] : T(0);
return warpReduce(val, reduce_op);
}
/**
* @brief 1-D warp-level ranked reduction which returns the value and rank.
* thread 0 will have valid result and rank(idx).
* @param val input value
* @param idx index to be used as rank
* @param reduce_op a binary reduction operation.
*/
template <typename T, typename ReduceLambda, typename i_t = int>
DI void warpRankedReduce(T& val, i_t& idx, ReduceLambda reduce_op = raft::min_op{})
{
#pragma unroll
for (i_t offset = WarpSize / 2; offset > 0; offset /= 2) {
T tmpVal = shfl(val, laneId() + offset);
i_t tmpIdx = shfl(idx, laneId() + offset);
if (reduce_op(tmpVal, val) == tmpVal) {
val = tmpVal;
idx = tmpIdx;
}
}
}
/**
* @brief 1-D block-level ranked reduction which returns the value and rank.
* thread 0 will have valid result and rank(idx).
* @param val input value
* @param shbuf shared memory region needed for storing intermediate results. It
* must alteast be of size: `(sizeof(T) + sizeof(i_t)) * WarpSize`
* @param idx index to be used as rank
* @param reduce_op binary min or max operation.
* @return only the thread0 will contain valid reduced result
*/
template <typename T, typename ReduceLambda, typename i_t = int>
DI std::pair<T, i_t> blockRankedReduce(T val,
T* shbuf,
i_t idx = threadIdx.x,
ReduceLambda reduce_op = raft::min_op{})
{
T* values = shbuf;
i_t* indices = (i_t*)&shbuf[WarpSize];
i_t wid = threadIdx.x / WarpSize;
i_t nWarps = (blockDim.x + WarpSize - 1) / WarpSize;
warpRankedReduce(val, idx, reduce_op); // Each warp performs partial reduction
i_t lane = laneId();
if (lane == 0) {
values[wid] = val; // Write reduced value to shared memory
indices[wid] = idx; // Write reduced value to shared memory
}
__syncthreads(); // Wait for all partial reductions
// read from shared memory only if that warp existed
if (lane < nWarps) {
val = values[lane];
idx = indices[lane];
} else {
// get the min if it is a max op, get the max if it is a min op
val = reduce_op(std::numeric_limits<T>::min(), std::numeric_limits<T>::max()) ==
std::numeric_limits<T>::min()
? std::numeric_limits<T>::max()
: std::numeric_limits<T>::min();
idx = -1;
}
__syncthreads();
if (wid == 0) warpRankedReduce(val, idx, reduce_op);
return std::pair<T, i_t>{val, idx};
}
/**
* @brief Executes a 1d binary block reduce
* @param val binary value to be reduced across the thread block
* @param shmem memory needed for the reduction. It should be at least of size blockDim.x/WarpSize
* @return only the thread0 will contain valid reduced result
*/
template <int BLOCK_SIZE, typename i_t>
DI i_t binaryBlockReduce(i_t val, i_t* shmem)
{
static_assert(BLOCK_SIZE <= 1024);
assert(val == 0 || val == 1);
const uint32_t mask = __ballot_sync(~0, val);
const uint32_t n_items = __popc(mask);
// Each first thread of the warp
if (threadIdx.x % WarpSize == 0) { shmem[threadIdx.x / WarpSize] = n_items; }
__syncthreads();
val = (threadIdx.x < BLOCK_SIZE / WarpSize) ? shmem[threadIdx.x] : 0;
if (threadIdx.x < WarpSize) {
return warpReduce(val);
}
// Only first warp gets the results
else {
return -1;
}
}
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/util/cuda_dev_essentials.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// This file provides a few essential functions for use in __device__ code. The
// scope is necessarily limited to ensure that compilation times are minimized.
// Please make sure not to include large / expensive files from here.
namespace raft {
/** helper macro for device inlined functions */
#define DI inline __device__
#define HDI inline __host__ __device__
#define HD __host__ __device__
/**
* @brief Provide a ceiling division operation ie. ceil(a / b)
* @tparam IntType supposed to be only integers for now!
*/
template <typename IntType>
constexpr HDI IntType ceildiv(IntType a, IntType b)
{
return (a + b - 1) / b;
}
/**
* @brief Provide an alignment function ie. ceil(a / b) * b
* @tparam IntType supposed to be only integers for now!
*/
template <typename IntType>
constexpr HDI IntType alignTo(IntType a, IntType b)
{
return ceildiv(a, b) * b;
}
/**
* @brief Provide an alignment function ie. (a / b) * b
* @tparam IntType supposed to be only integers for now!
*/
template <typename IntType>
constexpr HDI IntType alignDown(IntType a, IntType b)
{
return (a / b) * b;
}
/**
* @brief Check if the input is a power of 2
* @tparam IntType data type (checked only for integers)
*/
template <typename IntType>
constexpr HDI bool isPo2(IntType num)
{
return (num && !(num & (num - 1)));
}
/**
* @brief Give logarithm of the number to base-2
* @tparam IntType data type (checked only for integers)
*/
template <typename IntType>
constexpr HDI IntType log2(IntType num, IntType ret = IntType(0))
{
return num <= IntType(1) ? ret : log2(num >> IntType(1), ++ret);
}
/** number of threads per warp */
static const int WarpSize = 32;
/** get the laneId of the current thread */
DI int laneId()
{
int id;
asm("mov.s32 %0, %%laneid;" : "=r"(id));
return id;
}
/** Device function to apply the input lambda across threads in the grid */
template <int ItemsPerThread, typename L>
DI void forEach(int num, L lambda)
{
int idx = (blockDim.x * blockIdx.x) + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
#pragma unroll
for (int itr = 0; itr < ItemsPerThread; ++itr, idx += numThreads) {
if (idx < num) lambda(idx, itr);
}
}
/**
* @brief Swap two values
* @tparam T the datatype of the values
* @param a first input
* @param b second input
*/
template <typename T>
HDI void swapVals(T& a, T& b)
{
T tmp = a;
a = b;
b = tmp;
}
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/util | rapidsai_public_repos/raft/cpp/include/raft/util/detail/itertools.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <tuple>
#include <vector>
namespace raft::util::itertools::detail {
template <class S, typename... Args, size_t... Is>
inline std::vector<S> product(std::index_sequence<Is...> index, const std::vector<Args>&... vecs)
{
size_t len = 1;
((len *= vecs.size()), ...);
std::vector<S> out;
out.reserve(len);
for (size_t i = 0; i < len; i++) {
std::tuple<Args...> tup;
size_t mod = len, new_mod;
((new_mod = mod / vecs.size(), std::get<Is>(tup) = vecs[(i % mod) / new_mod], mod = new_mod),
...);
out.push_back({std::get<Is>(tup)...});
}
return out;
}
} // namespace raft::util::itertools::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/util | rapidsai_public_repos/raft/cpp/include/raft/util/detail/cub_wrappers.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <rmm/device_uvector.hpp>
namespace raft {
/**
* @brief Convenience wrapper over cub's SortPairs method
* @tparam KeyT key type
* @tparam ValueT value type
* @param workspace workspace buffer which will get resized if not enough space
* @param inKeys input keys array
* @param outKeys output keys array
* @param inVals input values array
* @param outVals output values array
* @param len array length
* @param stream cuda stream
*/
template <typename KeyT, typename ValueT>
void sortPairs(rmm::device_uvector<char>& workspace,
const KeyT* inKeys,
KeyT* outKeys,
const ValueT* inVals,
ValueT* outVals,
int len,
cudaStream_t stream)
{
size_t worksize = 0; // Fix 'worksize' may be used uninitialized in this function.
cub::DeviceRadixSort::SortPairs(
nullptr, worksize, inKeys, outKeys, inVals, outVals, len, 0, sizeof(KeyT) * 8, stream);
workspace.resize(worksize, stream);
cub::DeviceRadixSort::SortPairs(
workspace.data(), worksize, inKeys, outKeys, inVals, outVals, len, 0, sizeof(KeyT) * 8, stream);
}
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/util | rapidsai_public_repos/raft/cpp/include/raft/util/detail/scatter.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <raft/util/vectorized.cuh>
namespace raft::detail {
template <typename DataT, int VecLen, typename Lambda, typename IdxT>
RAFT_KERNEL scatterKernel(DataT* out, const DataT* in, const IdxT* idx, IdxT len, Lambda op)
{
typedef TxN_t<DataT, VecLen> DataVec;
typedef TxN_t<IdxT, VecLen> IdxVec;
IdxT tid = threadIdx.x + ((IdxT)blockIdx.x * blockDim.x);
tid *= VecLen;
if (tid >= len) return;
IdxVec idxIn;
idxIn.load(idx, tid);
DataVec dataIn;
#pragma unroll
for (int i = 0; i < VecLen; ++i) {
auto inPos = idxIn.val.data[i];
dataIn.val.data[i] = op(in[inPos], tid + i);
}
dataIn.store(out, tid);
}
template <typename DataT, int VecLen, typename Lambda, typename IdxT, int TPB>
void scatterImpl(
DataT* out, const DataT* in, const IdxT* idx, IdxT len, Lambda op, cudaStream_t stream)
{
const IdxT nblks = raft::ceildiv(VecLen ? len / VecLen : len, (IdxT)TPB);
scatterKernel<DataT, VecLen, Lambda, IdxT><<<nblks, TPB, 0, stream>>>(out, in, idx, len, op);
RAFT_CUDA_TRY(cudaGetLastError());
}
} // namespace raft::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/solver/linear_assignment.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
* Copyright 2020 KETAN DATE & RAKESH NAGI
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* CUDA Implementation of O(n^3) alternating tree Hungarian Algorithm
* Authors: Ketan Date and Rakesh Nagi
*
* Article reference:
* Date, Ketan, and Rakesh Nagi. "GPU-accelerated Hungarian algorithms
* for the Linear Assignment Problem." Parallel Computing 57 (2016): 52-72.
*
*/
#ifndef __LAP_H
#define __LAP_H
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <raft/solver/detail/lap_functions.cuh>
#include <raft/solver/linear_assignment_types.hpp>
namespace raft::solver {
/**
* @brief CUDA Implementation of O(n^3) alternating tree Hungarian Algorithm
* @note This is a port to RAFT from original authors Ketan Date and Rakesh Nagi
*
* @see Date, Ketan, and Rakesh Nagi. "GPU-accelerated Hungarian algorithms
* for the Linear Assignment Problem." Parallel Computing 57 (2016): 52-72.
*
* @tparam vertex_t
* @tparam weight_t
*/
template <typename vertex_t, typename weight_t>
class LinearAssignmentProblem {
private:
vertex_t size_;
vertex_t batchsize_;
weight_t epsilon_;
weight_t const* d_costs_;
Vertices<vertex_t, weight_t> d_vertices_dev;
VertexData<vertex_t> d_row_data_dev, d_col_data_dev;
raft::resources const& handle_;
rmm::device_uvector<int> row_covers_v;
rmm::device_uvector<int> col_covers_v;
rmm::device_uvector<weight_t> row_duals_v;
rmm::device_uvector<weight_t> col_duals_v;
rmm::device_uvector<weight_t> col_slacks_v;
rmm::device_uvector<int> row_is_visited_v;
rmm::device_uvector<int> col_is_visited_v;
rmm::device_uvector<vertex_t> row_parents_v;
rmm::device_uvector<vertex_t> col_parents_v;
rmm::device_uvector<vertex_t> row_children_v;
rmm::device_uvector<vertex_t> col_children_v;
rmm::device_uvector<weight_t> obj_val_primal_v;
rmm::device_uvector<weight_t> obj_val_dual_v;
public:
/**
* @brief Constructor
* @param handle raft handle for managing resources
* @param size size of square matrix
* @param batchsize
* @param epsilon
*/
LinearAssignmentProblem(raft::resources const& handle,
vertex_t size,
vertex_t batchsize,
weight_t epsilon)
: handle_(handle),
size_(size),
batchsize_(batchsize),
epsilon_(epsilon),
d_costs_(nullptr),
row_covers_v(0, resource::get_cuda_stream(handle_)),
col_covers_v(0, resource::get_cuda_stream(handle_)),
row_duals_v(0, resource::get_cuda_stream(handle_)),
col_duals_v(0, resource::get_cuda_stream(handle_)),
col_slacks_v(0, resource::get_cuda_stream(handle_)),
row_is_visited_v(0, resource::get_cuda_stream(handle_)),
col_is_visited_v(0, resource::get_cuda_stream(handle_)),
row_parents_v(0, resource::get_cuda_stream(handle_)),
col_parents_v(0, resource::get_cuda_stream(handle_)),
row_children_v(0, resource::get_cuda_stream(handle_)),
col_children_v(0, resource::get_cuda_stream(handle_)),
obj_val_primal_v(0, resource::get_cuda_stream(handle_)),
obj_val_dual_v(0, resource::get_cuda_stream(handle_))
{
}
/**
* Executes Hungarian algorithm on the input cost matrix.
* @param d_cost_matrix
* @param d_row_assignment
* @param d_col_assignment
*/
void solve(weight_t const* d_cost_matrix, vertex_t* d_row_assignment, vertex_t* d_col_assignment)
{
initializeDevice();
d_vertices_dev.row_assignments = d_row_assignment;
d_vertices_dev.col_assignments = d_col_assignment;
d_costs_ = d_cost_matrix;
int step = 0;
while (step != 100) {
switch (step) {
case 0: step = hungarianStep0(); break;
case 1: step = hungarianStep1(); break;
case 2: step = hungarianStep2(); break;
case 3: step = hungarianStep3(); break;
case 4: step = hungarianStep4(); break;
case 5: step = hungarianStep5(); break;
case 6: step = hungarianStep6(); break;
}
}
d_costs_ = nullptr;
}
/**
* Function for getting optimal row dual vector for subproblem spId.
* @param spId
* @return
*/
std::pair<const weight_t*, vertex_t> getRowDualVector(int spId) const
{
return std::make_pair(row_duals_v.data() + spId * size_, size_);
}
/**
* Function for getting optimal col dual vector for subproblem spId.
* @param spId
* @return
*/
std::pair<const weight_t*, vertex_t> getColDualVector(int spId)
{
return std::make_pair(col_duals_v.data() + spId * size_, size_);
}
/**
* Function for getting optimal primal objective value for subproblem spId.
* @param spId
* @return
*/
weight_t getPrimalObjectiveValue(int spId)
{
weight_t result;
raft::update_host(
&result, obj_val_primal_v.data() + spId, 1, resource::get_cuda_stream(handle_));
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle_));
return result;
}
/**
* Function for getting optimal dual objective value for subproblem spId.
* @param spId
* @return
*/
weight_t getDualObjectiveValue(int spId)
{
weight_t result;
raft::update_host(&result, obj_val_dual_v.data() + spId, 1, resource::get_cuda_stream(handle_));
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle_));
return result;
}
private:
// Helper function for initializing global variables and arrays on a single host.
void initializeDevice()
{
cudaStream_t stream = resource::get_cuda_stream(handle_);
row_covers_v.resize(batchsize_ * size_, stream);
col_covers_v.resize(batchsize_ * size_, stream);
row_duals_v.resize(batchsize_ * size_, stream);
col_duals_v.resize(batchsize_ * size_, stream);
col_slacks_v.resize(batchsize_ * size_, stream);
row_is_visited_v.resize(batchsize_ * size_, stream);
col_is_visited_v.resize(batchsize_ * size_, stream);
row_parents_v.resize(batchsize_ * size_, stream);
col_parents_v.resize(batchsize_ * size_, stream);
row_children_v.resize(batchsize_ * size_, stream);
col_children_v.resize(batchsize_ * size_, stream);
obj_val_primal_v.resize(batchsize_, stream);
obj_val_dual_v.resize(batchsize_, stream);
d_vertices_dev.row_covers = row_covers_v.data();
d_vertices_dev.col_covers = col_covers_v.data();
d_vertices_dev.row_duals = row_duals_v.data();
d_vertices_dev.col_duals = col_duals_v.data();
d_vertices_dev.col_slacks = col_slacks_v.data();
d_row_data_dev.is_visited = row_is_visited_v.data();
d_col_data_dev.is_visited = col_is_visited_v.data();
d_row_data_dev.parents = row_parents_v.data();
d_row_data_dev.children = row_children_v.data();
d_col_data_dev.parents = col_parents_v.data();
d_col_data_dev.children = col_children_v.data();
thrust::fill(thrust::device, row_covers_v.begin(), row_covers_v.end(), int{0});
thrust::fill(thrust::device, col_covers_v.begin(), col_covers_v.end(), int{0});
thrust::fill(thrust::device, row_duals_v.begin(), row_duals_v.end(), weight_t{0});
thrust::fill(thrust::device, col_duals_v.begin(), col_duals_v.end(), weight_t{0});
}
// Function for calculating initial zeros by subtracting row and column minima from each element.
int hungarianStep0()
{
detail::initialReduction(handle_, d_costs_, d_vertices_dev, batchsize_, size_);
return 1;
}
// Function for calculating initial zeros by subtracting row and column minima from each element.
int hungarianStep1()
{
detail::computeInitialAssignments(
handle_, d_costs_, d_vertices_dev, batchsize_, size_, epsilon_);
int next = 2;
while (true) {
if ((next = hungarianStep2()) == 6) break;
if ((next = hungarianStep3()) == 5) break;
hungarianStep4();
}
return next;
}
// Function for checking optimality and constructing predicates and covers.
int hungarianStep2()
{
int cover_count = detail::computeRowCovers(
handle_, d_vertices_dev, d_row_data_dev, d_col_data_dev, batchsize_, size_);
int next = (cover_count == batchsize_ * size_) ? 6 : 3;
return next;
}
// Function for building alternating tree rooted at unassigned rows.
int hungarianStep3()
{
int next;
rmm::device_scalar<bool> flag_v(resource::get_cuda_stream(handle_));
bool h_flag = false;
flag_v.set_value_async(h_flag, resource::get_cuda_stream(handle_));
detail::executeZeroCover(handle_,
d_costs_,
d_vertices_dev,
d_row_data_dev,
d_col_data_dev,
flag_v.data(),
batchsize_,
size_,
epsilon_);
h_flag = flag_v.value(resource::get_cuda_stream(handle_));
next = h_flag ? 4 : 5;
return next;
}
// Function for augmenting the solution along multiple node-disjoint alternating trees.
int hungarianStep4()
{
detail::reversePass(handle_, d_row_data_dev, d_col_data_dev, batchsize_, size_);
detail::augmentationPass(
handle_, d_vertices_dev, d_row_data_dev, d_col_data_dev, batchsize_, size_);
return 2;
}
// Function for updating dual solution to introduce new zero-cost arcs.
int hungarianStep5()
{
detail::dualUpdate(
handle_, d_vertices_dev, d_row_data_dev, d_col_data_dev, batchsize_, size_, epsilon_);
return 3;
}
// Function for calculating primal and dual objective values at optimality.
int hungarianStep6()
{
detail::calcObjValPrimal(handle_,
obj_val_primal_v.data(),
d_costs_,
d_vertices_dev.row_assignments,
batchsize_,
size_);
detail::calcObjValDual(handle_, obj_val_dual_v.data(), d_vertices_dev, batchsize_, size_);
return 100;
}
};
} // namespace raft::solver
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/solver/linear_assignment_types.hpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
* Copyright 2020 KETAN DATE & RAKESH NAGI
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* CUDA Implementation of O(n^3) alternating tree Hungarian Algorithm
* Authors: Ketan Date and Rakesh Nagi
*
* Article reference:
* Date, Ketan, and Rakesh Nagi. "GPU-accelerated Hungarian algorithms
* for the Linear Assignment Problem." Parallel Computing 57 (2016): 52-72.
*
*/
#pragma once
namespace raft::solver {
template <typename vertex_t, typename weight_t>
struct Vertices {
vertex_t* row_assignments;
vertex_t* col_assignments;
int* row_covers;
int* col_covers;
weight_t* row_duals;
weight_t* col_duals;
weight_t* col_slacks;
};
template <typename vertex_t>
struct VertexData {
vertex_t* parents;
vertex_t* children;
int* is_visited;
};
} // namespace raft::solver
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/solver | rapidsai_public_repos/raft/cpp/include/raft/solver/detail/lap_functions.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
* Copyright 2020 KETAN DATE & RAKESH NAGI
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* CUDA Implementation of O(n^3) alternating tree Hungarian Algorithm
* Authors: Ketan Date and Rakesh Nagi
*
* Article reference:
* Date, Ketan, and Rakesh Nagi. "GPU-accelerated Hungarian algorithms
* for the Linear Assignment Problem." Parallel Computing 57 (2016): 52-72.
*
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/solver/linear_assignment_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/solver/detail/lap_kernels.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <cstddef>
namespace raft::solver::detail {
const int BLOCKDIMX{64};
const int BLOCKDIMY{1};
// Function for calculating grid and block dimensions from the given input size.
inline void calculateLinearDims(dim3& blocks_per_grid,
dim3& threads_per_block,
int& total_blocks,
int size)
{
threads_per_block.x = BLOCKDIMX * BLOCKDIMY;
int value = size / threads_per_block.x;
if (size % threads_per_block.x > 0) value++;
total_blocks = value;
blocks_per_grid.x = value;
}
// Function for calculating grid and block dimensions from the given input size for square grid.
inline void calculateSquareDims(dim3& blocks_per_grid,
dim3& threads_per_block,
int& total_blocks,
int size)
{
threads_per_block.x = BLOCKDIMX;
threads_per_block.y = BLOCKDIMY;
int sq_size = (int)ceil(sqrt(size));
int valuex = (int)ceil((float)(sq_size) / BLOCKDIMX);
int valuey = (int)ceil((float)(sq_size) / BLOCKDIMY);
total_blocks = valuex * valuey;
blocks_per_grid.x = valuex;
blocks_per_grid.y = valuey;
}
// Function for calculating grid and block dimensions from the given input size for rectangular
// grid.
inline void calculateRectangularDims(
dim3& blocks_per_grid, dim3& threads_per_block, int& total_blocks, int xsize, int ysize)
{
threads_per_block.x = BLOCKDIMX;
threads_per_block.y = BLOCKDIMY;
int valuex = xsize / threads_per_block.x;
if (xsize % threads_per_block.x > 0) valuex++;
int valuey = ysize / threads_per_block.y;
if (ysize % threads_per_block.y > 0) valuey++;
total_blocks = valuex * valuey;
blocks_per_grid.x = valuex;
blocks_per_grid.y = valuey;
}
template <typename vertex_t, typename weight_t>
inline void initialReduction(raft::resources const& handle,
weight_t const* d_costs,
Vertices<vertex_t, weight_t>& d_vertices_dev,
int SP,
vertex_t N)
{
dim3 blocks_per_grid;
dim3 threads_per_block;
int total_blocks = 0;
detail::calculateRectangularDims(blocks_per_grid, threads_per_block, total_blocks, N, SP);
kernel_rowReduction<<<blocks_per_grid, threads_per_block, 0, resource::get_cuda_stream(handle)>>>(
d_costs, d_vertices_dev.row_duals, SP, N, std::numeric_limits<weight_t>::max());
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
kernel_columnReduction<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(
d_costs,
d_vertices_dev.row_duals,
d_vertices_dev.col_duals,
SP,
N,
std::numeric_limits<weight_t>::max());
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
}
template <typename vertex_t, typename weight_t>
inline void computeInitialAssignments(raft::resources const& handle,
weight_t const* d_costs,
Vertices<vertex_t, weight_t>& d_vertices,
int SP,
vertex_t N,
weight_t epsilon)
{
dim3 blocks_per_grid;
dim3 threads_per_block;
int total_blocks = 0;
std::size_t size = SP * N;
rmm::device_uvector<int> row_lock_v(size, resource::get_cuda_stream(handle));
rmm::device_uvector<int> col_lock_v(size, resource::get_cuda_stream(handle));
thrust::fill_n(thrust::device, d_vertices.row_assignments, size, -1);
thrust::fill_n(thrust::device, d_vertices.col_assignments, size, -1);
thrust::fill_n(thrust::device, row_lock_v.data(), size, 0);
thrust::fill_n(thrust::device, col_lock_v.data(), size, 0);
detail::calculateRectangularDims(blocks_per_grid, threads_per_block, total_blocks, N, SP);
kernel_computeInitialAssignments<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(
d_costs,
d_vertices.row_duals,
d_vertices.col_duals,
d_vertices.row_assignments,
d_vertices.col_assignments,
row_lock_v.data(),
col_lock_v.data(),
SP,
N,
epsilon);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
}
// Function for finding row cover on individual devices.
template <typename vertex_t, typename weight_t>
inline int computeRowCovers(raft::resources const& handle,
Vertices<vertex_t, weight_t>& d_vertices,
VertexData<vertex_t>& d_row_data,
VertexData<vertex_t>& d_col_data,
int SP,
vertex_t N)
{
dim3 blocks_per_grid;
dim3 threads_per_block;
int total_blocks = 0;
std::size_t size = SP * N;
thrust::fill_n(thrust::device, d_vertices.row_covers, size, int{0});
thrust::fill_n(thrust::device, d_vertices.col_covers, size, int{0});
thrust::fill_n(thrust::device, d_vertices.col_slacks, size, std::numeric_limits<weight_t>::max());
thrust::fill_n(thrust::device, d_row_data.is_visited, size, DORMANT);
thrust::fill_n(thrust::device, d_col_data.is_visited, size, DORMANT);
thrust::fill_n(thrust::device, d_row_data.parents, size, vertex_t{-1});
thrust::fill_n(thrust::device, d_row_data.children, size, vertex_t{-1});
thrust::fill_n(thrust::device, d_col_data.parents, size, vertex_t{-1});
thrust::fill_n(thrust::device, d_col_data.children, size, vertex_t{-1});
detail::calculateRectangularDims(blocks_per_grid, threads_per_block, total_blocks, N, SP);
kernel_computeRowCovers<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(
d_vertices.row_assignments, d_vertices.row_covers, d_row_data.is_visited, SP, N);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
return thrust::reduce(thrust::device, d_vertices.row_covers, d_vertices.row_covers + size);
}
// Function for covering the zeros in uncovered rows and expanding the frontier.
template <typename vertex_t, typename weight_t>
inline void coverZeroAndExpand(raft::resources const& handle,
weight_t const* d_costs_dev,
vertex_t const* d_rows_csr_neighbors,
vertex_t const* d_rows_csr_ptrs,
Vertices<vertex_t, weight_t>& d_vertices_dev,
VertexData<vertex_t>& d_row_data_dev,
VertexData<vertex_t>& d_col_data_dev,
bool* d_flag,
int SP,
vertex_t N,
weight_t epsilon)
{
int total_blocks = 0;
dim3 blocks_per_grid;
dim3 threads_per_block;
detail::calculateRectangularDims(blocks_per_grid, threads_per_block, total_blocks, N, SP);
kernel_coverAndExpand<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(d_flag,
d_rows_csr_ptrs,
d_rows_csr_neighbors,
d_costs_dev,
d_vertices_dev,
d_row_data_dev,
d_col_data_dev,
SP,
N,
epsilon);
}
template <typename vertex_t, typename weight_t>
inline vertex_t zeroCoverIteration(raft::resources const& handle,
weight_t const* d_costs_dev,
Vertices<vertex_t, weight_t>& d_vertices_dev,
VertexData<vertex_t>& d_row_data_dev,
VertexData<vertex_t>& d_col_data_dev,
bool* d_flag,
int SP,
vertex_t N,
weight_t epsilon)
{
vertex_t M;
rmm::device_uvector<vertex_t> csr_ptrs_v(0, resource::get_cuda_stream(handle));
rmm::device_uvector<vertex_t> csr_neighbors_v(0, resource::get_cuda_stream(handle));
{
dim3 blocks_per_grid;
dim3 threads_per_block;
int total_blocks = 0;
rmm::device_uvector<bool> predicates_v(SP * N, resource::get_cuda_stream(handle));
rmm::device_uvector<vertex_t> addresses_v(SP * N, resource::get_cuda_stream(handle));
thrust::fill_n(thrust::device, predicates_v.data(), SP * N, false);
thrust::fill_n(thrust::device, addresses_v.data(), SP * N, vertex_t{0});
csr_ptrs_v.resize(SP + 1, resource::get_cuda_stream(handle));
thrust::fill_n(thrust::device, csr_ptrs_v.data(), (SP + 1), vertex_t{-1});
detail::calculateRectangularDims(blocks_per_grid, threads_per_block, total_blocks, N, SP);
// construct predicate matrix for edges.
kernel_rowPredicateConstructionCSR<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(
predicates_v.data(), addresses_v.data(), d_row_data_dev.is_visited, SP, N);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
M = thrust::reduce(thrust::device, addresses_v.begin(), addresses_v.end());
thrust::exclusive_scan(
thrust::device, addresses_v.begin(), addresses_v.end(), addresses_v.begin());
if (M > 0) {
csr_neighbors_v.resize(M, resource::get_cuda_stream(handle));
kernel_rowScatterCSR<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(predicates_v.data(),
addresses_v.data(),
csr_neighbors_v.data(),
csr_ptrs_v.data(),
M,
SP,
N);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
}
}
if (M > 0) {
coverZeroAndExpand(handle,
d_costs_dev,
csr_neighbors_v.data(),
csr_ptrs_v.data(),
d_vertices_dev,
d_row_data_dev,
d_col_data_dev,
d_flag,
SP,
N,
epsilon);
}
return M;
}
// Function for executing recursive zero cover. Returns the next step (Step 4 or Step 5) depending
// on the presence of uncovered zeros.
template <typename vertex_t, typename weight_t>
inline void executeZeroCover(raft::resources const& handle,
weight_t const* d_costs_dev,
Vertices<vertex_t, weight_t>& d_vertices_dev,
VertexData<vertex_t>& d_row_data_dev,
VertexData<vertex_t>& d_col_data_dev,
bool* d_flag,
int SP,
vertex_t N,
weight_t epsilon)
{
vertex_t M = 1;
while (M > 0) {
M = zeroCoverIteration(
handle, d_costs_dev, d_vertices_dev, d_row_data_dev, d_col_data_dev, d_flag, SP, N, epsilon);
}
}
// Function for executing reverse pass of the maximum matching.
template <typename vertex_t>
inline void reversePass(raft::resources const& handle,
VertexData<vertex_t>& d_row_data_dev,
VertexData<vertex_t>& d_col_data_dev,
int SP,
int N)
{
int total_blocks = 0;
dim3 blocks_per_grid;
dim3 threads_per_block;
std::size_t size = SP * N;
detail::calculateLinearDims(blocks_per_grid, threads_per_block, total_blocks, size);
rmm::device_uvector<bool> predicates_v(size, resource::get_cuda_stream(handle));
rmm::device_uvector<vertex_t> addresses_v(size, resource::get_cuda_stream(handle));
thrust::fill_n(thrust::device, predicates_v.data(), size, false);
thrust::fill_n(thrust::device, addresses_v.data(), size, vertex_t{0});
// compact the reverse pass row vertices.
kernel_augmentPredicateConstruction<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(
predicates_v.data(), addresses_v.data(), d_col_data_dev.is_visited, size);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
// calculate total number of vertices.
std::size_t csr_size = thrust::reduce(thrust::device, addresses_v.begin(), addresses_v.end());
// exclusive scan for calculating the scatter addresses.
thrust::exclusive_scan(
thrust::device, addresses_v.begin(), addresses_v.end(), addresses_v.begin());
if (csr_size > 0) {
int total_blocks_1 = 0;
dim3 blocks_per_grid_1;
dim3 threads_per_block_1;
detail::calculateLinearDims(blocks_per_grid_1, threads_per_block_1, total_blocks_1, csr_size);
rmm::device_uvector<vertex_t> elements_v(csr_size, resource::get_cuda_stream(handle));
kernel_augmentScatter<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(
elements_v.data(), predicates_v.data(), addresses_v.data(), size);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
kernel_reverseTraversal<<<blocks_per_grid_1,
threads_per_block_1,
0,
resource::get_cuda_stream(handle)>>>(
elements_v.data(), d_row_data_dev, d_col_data_dev, csr_size);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
}
}
// Function for executing augmentation pass of the maximum matching.
template <typename vertex_t, typename weight_t>
inline void augmentationPass(raft::resources const& handle,
Vertices<vertex_t, weight_t>& d_vertices_dev,
VertexData<vertex_t>& d_row_data_dev,
VertexData<vertex_t>& d_col_data_dev,
int SP,
int N)
{
int total_blocks = 0;
dim3 blocks_per_grid;
dim3 threads_per_block;
detail::calculateLinearDims(blocks_per_grid, threads_per_block, total_blocks, SP * N);
rmm::device_uvector<bool> predicates_v(SP * N, resource::get_cuda_stream(handle));
rmm::device_uvector<vertex_t> addresses_v(SP * N, resource::get_cuda_stream(handle));
thrust::fill_n(thrust::device, predicates_v.data(), SP * N, false);
thrust::fill_n(thrust::device, addresses_v.data(), SP * N, vertex_t{0});
// compact the reverse pass row vertices.
kernel_augmentPredicateConstruction<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(
predicates_v.data(), addresses_v.data(), d_row_data_dev.is_visited, SP * N);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
// calculate total number of vertices.
// TODO: should be vertex_t
vertex_t row_ids_csr_size =
thrust::reduce(thrust::device, addresses_v.begin(), addresses_v.end());
// exclusive scan for calculating the scatter addresses.
thrust::exclusive_scan(
thrust::device, addresses_v.begin(), addresses_v.end(), addresses_v.begin());
if (row_ids_csr_size > 0) {
int total_blocks_1 = 0;
dim3 blocks_per_grid_1;
dim3 threads_per_block_1;
detail::calculateLinearDims(
blocks_per_grid_1, threads_per_block_1, total_blocks_1, row_ids_csr_size);
rmm::device_uvector<vertex_t> elements_v(row_ids_csr_size, resource::get_cuda_stream(handle));
kernel_augmentScatter<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(
elements_v.data(), predicates_v.data(), addresses_v.data(), vertex_t{SP * N});
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
kernel_augmentation<<<blocks_per_grid_1,
threads_per_block_1,
0,
resource::get_cuda_stream(handle)>>>(d_vertices_dev.row_assignments,
d_vertices_dev.col_assignments,
elements_v.data(),
d_row_data_dev,
d_col_data_dev,
vertex_t{N},
row_ids_csr_size);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
}
}
template <typename vertex_t, typename weight_t>
inline void dualUpdate(raft::resources const& handle,
Vertices<vertex_t, weight_t>& d_vertices_dev,
VertexData<vertex_t>& d_row_data_dev,
VertexData<vertex_t>& d_col_data_dev,
int SP,
vertex_t N,
weight_t epsilon)
{
dim3 blocks_per_grid;
dim3 threads_per_block;
int total_blocks;
rmm::device_uvector<weight_t> sp_min_v(SP, resource::get_cuda_stream(handle));
detail::calculateLinearDims(blocks_per_grid, threads_per_block, total_blocks, SP);
kernel_dualUpdate_1<<<blocks_per_grid, threads_per_block, 0, resource::get_cuda_stream(handle)>>>(
sp_min_v.data(),
d_vertices_dev.col_slacks,
d_vertices_dev.col_covers,
SP,
N,
std::numeric_limits<weight_t>::max());
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
detail::calculateRectangularDims(blocks_per_grid, threads_per_block, total_blocks, N, SP);
kernel_dualUpdate_2<<<blocks_per_grid, threads_per_block, 0, resource::get_cuda_stream(handle)>>>(
sp_min_v.data(),
d_vertices_dev.row_duals,
d_vertices_dev.col_duals,
d_vertices_dev.col_slacks,
d_vertices_dev.row_covers,
d_vertices_dev.col_covers,
d_row_data_dev.is_visited,
d_col_data_dev.parents,
SP,
N,
std::numeric_limits<weight_t>::max(),
epsilon);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
}
// Function for calculating optimal objective function value using dual variables.
template <typename vertex_t, typename weight_t>
inline void calcObjValDual(raft::resources const& handle,
weight_t* d_obj_val,
Vertices<vertex_t, weight_t>& d_vertices_dev,
int SP,
int N)
{
dim3 blocks_per_grid;
dim3 threads_per_block;
int total_blocks = 0;
detail::calculateLinearDims(blocks_per_grid, threads_per_block, total_blocks, SP);
kernel_calcObjValDual<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(
d_obj_val, d_vertices_dev.row_duals, d_vertices_dev.col_duals, SP, N);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
}
// Function for calculating optimal objective function value using dual variables.
template <typename vertex_t, typename weight_t>
inline void calcObjValPrimal(raft::resources const& handle,
weight_t* d_obj_val,
weight_t const* d_costs,
vertex_t const* d_row_assignments,
int SP,
vertex_t N)
{
dim3 blocks_per_grid;
dim3 threads_per_block;
int total_blocks = 0;
detail::calculateLinearDims(blocks_per_grid, threads_per_block, total_blocks, SP);
kernel_calcObjValPrimal<<<blocks_per_grid,
threads_per_block,
0,
resource::get_cuda_stream(handle)>>>(
d_obj_val, d_costs, d_row_assignments, SP, N);
RAFT_CHECK_CUDA(resource::get_cuda_stream(handle));
}
} // namespace raft::solver::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/solver | rapidsai_public_repos/raft/cpp/include/raft/solver/detail/lap_kernels.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
* Copyright 2020 KETAN DATE & RAKESH NAGI
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* CUDA Implementation of O(n^3) alternating tree Hungarian Algorithm
* Authors: Ketan Date and Rakesh Nagi
*
* Article reference:
* Date, Ketan, and Rakesh Nagi. "GPU-accelerated Hungarian algorithms
* for the Linear Assignment Problem." Parallel Computing 57 (2016): 52-72.
*
*/
#pragma once
#include "../linear_assignment_types.hpp"
#include <raft/core/resources.hpp>
#include <raft/util/cudart_utils.hpp>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <cstddef>
namespace raft::solver::detail {
const int DORMANT{0};
const int ACTIVE{1};
const int VISITED{2};
const int REVERSE{3};
const int AUGMENT{4};
const int MODIFIED{5};
template <typename weight_t>
bool __device__ near_zero(weight_t w, weight_t epsilon)
{
return ((w > -epsilon) && (w < epsilon));
}
template <>
bool __device__ near_zero<int32_t>(int32_t w, int32_t epsilon)
{
return (w == 0);
}
template <>
bool __device__ near_zero<int64_t>(int64_t w, int64_t epsilon)
{
return (w == 0);
}
// Device function for traversing the neighbors from start pointer to end pointer and updating the
// covers. The function sets d_next to 4 if there are uncovered zeros, indicating the requirement of
// Step 4 execution.
template <typename vertex_t, typename weight_t>
__device__ void cover_and_expand_row(weight_t const* d_elements,
weight_t const* d_row_duals,
weight_t const* d_col_duals,
weight_t* d_col_slacks,
int* d_row_covers,
int* d_col_covers,
vertex_t const* d_col_assignments,
bool* d_flag,
vertex_t* d_row_parents,
vertex_t* d_col_parents,
int* d_row_visited,
int* d_col_visited,
vertex_t rowid,
int spid,
int colid,
vertex_t N,
weight_t epsilon)
{
int ROWID = spid * N + rowid;
int COLID = spid * N + colid;
weight_t slack =
d_elements[spid * N * N + rowid * N + colid] - d_row_duals[ROWID] - d_col_duals[COLID];
int nxt_rowid = d_col_assignments[COLID];
int NXT_ROWID = spid * N + nxt_rowid;
if (rowid != nxt_rowid && d_col_covers[COLID] == 0) {
if (slack < d_col_slacks[COLID]) {
d_col_slacks[COLID] = slack;
d_col_parents[COLID] = ROWID;
}
if (near_zero(d_col_slacks[COLID], epsilon)) {
if (nxt_rowid != -1) {
d_row_parents[NXT_ROWID] = COLID; // update parent info
d_row_covers[NXT_ROWID] = 0;
d_col_covers[COLID] = 1;
if (d_row_visited[NXT_ROWID] != VISITED) d_row_visited[NXT_ROWID] = ACTIVE;
} else {
d_col_visited[COLID] = REVERSE;
*d_flag = true;
}
}
}
d_row_visited[ROWID] = VISITED;
}
// Device function for traversing an alternating path from unassigned row to unassigned column.
template <typename vertex_t>
__device__ void __reverse_traversal(int* d_row_visited,
vertex_t* d_row_children,
vertex_t* d_col_children,
vertex_t const* d_row_parents,
vertex_t const* d_col_parents,
int cur_colid)
{
int cur_rowid = -1;
while (cur_colid != -1) {
d_col_children[cur_colid] = cur_rowid;
cur_rowid = d_col_parents[cur_colid];
d_row_children[cur_rowid] = cur_colid;
cur_colid = d_row_parents[cur_rowid];
}
d_row_visited[cur_rowid] = AUGMENT;
}
// Device function for augmenting the alternating path from unassigned column to unassigned row.
template <typename vertex_t>
__device__ void __augment(vertex_t* d_row_assignments,
vertex_t* d_col_assignments,
vertex_t const* d_row_children,
vertex_t const* d_col_children,
vertex_t cur_rowid,
vertex_t N)
{
int cur_colid = -1;
while (cur_rowid != -1) {
cur_colid = d_row_children[cur_rowid];
d_row_assignments[cur_rowid] = cur_colid % N;
d_col_assignments[cur_colid] = cur_rowid % N;
cur_rowid = d_col_children[cur_colid];
}
}
// Kernel for reducing the rows by subtracting row minimum from each row element.
// FIXME: Once cuda 10.2 is the standard should replace passing infinity
// here with using cuda::std::numeric_limits<weight_t>::max()
template <typename vertex_t, typename weight_t>
RAFT_KERNEL kernel_rowReduction(
weight_t const* d_costs, weight_t* d_row_duals, int SP, vertex_t N, weight_t infinity)
{
int spid = blockIdx.y * blockDim.y + threadIdx.y;
int rowid = blockIdx.x * blockDim.x + threadIdx.x;
weight_t min = infinity;
if (spid < SP && rowid < N) {
for (int colid = 0; colid < N; colid++) {
weight_t slack = d_costs[spid * N * N + rowid * N + colid];
if (slack < min) { min = slack; }
}
d_row_duals[spid * N + rowid] = min;
}
}
// Kernel for reducing the column by subtracting column minimum from each column element.
// FIXME: Once cuda 10.2 is the standard should replace passing infinity
// here with using cuda::std::numeric_limits<weight_t>::max()
template <typename vertex_t, typename weight_t>
RAFT_KERNEL kernel_columnReduction(weight_t const* d_costs,
weight_t const* d_row_duals,
weight_t* d_col_duals,
int SP,
vertex_t N,
weight_t infinity)
{
int spid = blockIdx.y * blockDim.y + threadIdx.y;
int colid = blockIdx.x * blockDim.x + threadIdx.x;
weight_t min = infinity;
if (spid < SP && colid < N) {
for (int rowid = 0; rowid < N; rowid++) {
weight_t cost = d_costs[spid * N * N + rowid * N + colid];
weight_t row_dual = d_row_duals[spid * N + rowid];
weight_t slack = cost - row_dual;
if (slack < min) { min = slack; }
}
d_col_duals[spid * N + colid] = min;
}
}
// Kernel for calculating initial assignments.
template <typename vertex_t, typename weight_t>
RAFT_KERNEL kernel_computeInitialAssignments(weight_t const* d_costs,
weight_t const* d_row_duals,
weight_t const* d_col_duals,
vertex_t* d_row_assignments,
vertex_t* d_col_assignments,
int* d_row_lock,
int* d_col_lock,
int SP,
vertex_t N,
weight_t epsilon)
{
int spid = blockIdx.y * blockDim.y + threadIdx.y;
int colid = blockIdx.x * blockDim.x + threadIdx.x;
if (spid < SP && colid < N) {
int overall_colid = spid * N + colid;
weight_t col_dual = d_col_duals[overall_colid];
for (vertex_t rowid = 0; rowid < N; rowid++) {
int overall_rowid = spid * N + rowid;
if (d_col_lock[overall_colid] == 1) break;
weight_t cost = d_costs[spid * N * N + rowid * N + colid];
weight_t row_dual = d_row_duals[overall_rowid];
weight_t slack = cost - row_dual - col_dual;
if (near_zero(slack, epsilon)) {
if (atomicCAS(&d_row_lock[overall_rowid], 0, 1) == 0) {
d_row_assignments[overall_rowid] = colid;
d_col_assignments[overall_colid] = rowid;
d_col_lock[overall_colid] = 1;
}
}
}
}
}
// Kernel for populating the cover arrays and initializing alternating tree.
template <typename vertex_t>
RAFT_KERNEL kernel_computeRowCovers(
vertex_t* d_row_assignments, int* d_row_covers, int* d_row_visited, int SP, vertex_t N)
{
int spid = blockIdx.y * blockDim.y + threadIdx.y;
int rowid = blockIdx.x * blockDim.x + threadIdx.x;
if (spid < SP && rowid < N) {
int index = spid * N + rowid;
if (d_row_assignments[index] != -1) {
d_row_covers[index] = 1;
} else {
d_row_visited[index] = ACTIVE;
}
}
}
// Kernel for populating the predicate matrix for edges in row major format.
template <typename vertex_t>
RAFT_KERNEL kernel_rowPredicateConstructionCSR(
bool* d_predicates, vertex_t* d_addresses, int* d_row_visited, int SP, vertex_t N)
{
int spid = blockIdx.y * blockDim.y + threadIdx.y;
int rowid = blockIdx.x * blockDim.x + threadIdx.x;
if (spid < SP && rowid < N) {
int index = spid * N + rowid;
if (d_row_visited[index] == ACTIVE) {
d_predicates[index] = true;
d_addresses[index] = 1;
} else {
d_predicates[index] = false;
d_addresses[index] = 0;
}
}
}
// Kernel for scattering the edges based on the scatter addresses.
template <typename vertex_t>
RAFT_KERNEL kernel_rowScatterCSR(bool const* d_predicates,
vertex_t const* d_addresses,
vertex_t* d_neighbors,
vertex_t* d_ptrs,
vertex_t M,
int SP,
vertex_t N)
{
int spid = blockIdx.y * blockDim.y + threadIdx.y;
int rowid = blockIdx.x * blockDim.x + threadIdx.x;
if (spid < SP && rowid < N) {
int index = spid * N + rowid;
bool predicate = d_predicates[index];
vertex_t compid = d_addresses[index];
if (predicate) { d_neighbors[compid] = rowid; }
if (rowid == 0) {
d_ptrs[spid] = compid;
d_ptrs[SP] = M;
}
}
}
// Kernel for finding the minimum zero cover.
template <typename vertex_t, typename weight_t>
RAFT_KERNEL kernel_coverAndExpand(bool* d_flag,
vertex_t const* d_ptrs,
vertex_t const* d_neighbors,
weight_t const* d_elements,
Vertices<vertex_t, weight_t> d_vertices,
VertexData<vertex_t> d_row_data,
VertexData<vertex_t> d_col_data,
int SP,
vertex_t N,
weight_t epsilon)
{
int spid = blockIdx.y * blockDim.y + threadIdx.y;
int colid = blockIdx.x * blockDim.x + threadIdx.x;
// Load values into local memory
if (spid < SP && colid < N) {
thrust::for_each(
thrust::seq,
d_neighbors + d_ptrs[spid],
d_neighbors + d_ptrs[spid + 1],
[d_elements, d_vertices, d_flag, d_row_data, d_col_data, spid, colid, N, epsilon] __device__(
vertex_t rowid) {
cover_and_expand_row(d_elements,
d_vertices.row_duals,
d_vertices.col_duals,
d_vertices.col_slacks,
d_vertices.row_covers,
d_vertices.col_covers,
d_vertices.col_assignments,
d_flag,
d_row_data.parents,
d_col_data.parents,
d_row_data.is_visited,
d_col_data.is_visited,
rowid,
spid,
colid,
N,
epsilon);
});
}
}
// Kernel for constructing the predicates for reverse pass or augmentation candidates.
template <typename vertex_t>
RAFT_KERNEL kernel_augmentPredicateConstruction(bool* d_predicates,
vertex_t* d_addresses,
int* d_visited,
int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int visited = d_visited[id];
if ((visited == REVERSE) || (visited == AUGMENT)) {
d_predicates[id] = true;
d_addresses[id] = 1;
} else {
d_predicates[id] = false;
d_addresses[id] = 0;
}
}
}
// Kernel for scattering the vertices based on the scatter addresses.
template <typename vertex_t>
RAFT_KERNEL kernel_augmentScatter(vertex_t* d_elements,
bool const* d_predicates,
vertex_t const* d_addresses,
std::size_t size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
if (d_predicates[id]) { d_elements[d_addresses[id]] = id; }
}
}
// Kernel for executing the reverse pass of the maximum matching algorithm.
template <typename vertex_t>
RAFT_KERNEL kernel_reverseTraversal(vertex_t* d_elements,
VertexData<vertex_t> d_row_data,
VertexData<vertex_t> d_col_data,
int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
__reverse_traversal(d_row_data.is_visited,
d_row_data.children,
d_col_data.children,
d_row_data.parents,
d_col_data.parents,
d_elements[id]);
}
}
// Kernel for executing the augmentation pass of the maximum matching algorithm.
template <typename vertex_t>
RAFT_KERNEL kernel_augmentation(vertex_t* d_row_assignments,
vertex_t* d_col_assignments,
vertex_t const* d_row_elements,
VertexData<vertex_t> d_row_data,
VertexData<vertex_t> d_col_data,
vertex_t N,
vertex_t size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
__augment(d_row_assignments,
d_col_assignments,
d_row_data.children,
d_col_data.children,
d_row_elements[id],
N);
}
}
// Kernel for updating the dual values in Step 5.
// FIXME: Once cuda 10.2 is the standard should replace passing infinity
// here with using cuda::std::numeric_limits<weight_t>::max()
template <typename vertex_t, typename weight_t>
RAFT_KERNEL kernel_dualUpdate_1(weight_t* d_sp_min,
weight_t const* d_col_slacks,
int const* d_col_covers,
int SP,
vertex_t N,
weight_t infinity)
{
int spid = blockIdx.x * blockDim.x + threadIdx.x;
if (spid < SP) {
weight_t min = infinity;
for (int colid = 0; colid < N; colid++) {
int index = spid * N + colid;
weight_t slack = d_col_slacks[index];
int col_cover = d_col_covers[index];
if (col_cover == 0)
if (slack < min) min = slack;
}
d_sp_min[spid] = min;
}
}
// Kernel for updating the dual values in Step 5.
// FIXME: Once cuda 10.2 is the standard should replace passing infinity
// here with using cuda::std::numeric_limits<weight_t>::max()
template <typename vertex_t, typename weight_t>
RAFT_KERNEL kernel_dualUpdate_2(weight_t const* d_sp_min,
weight_t* d_row_duals,
weight_t* d_col_duals,
weight_t* d_col_slacks,
int const* d_row_covers,
int const* d_col_covers,
int* d_row_visited,
vertex_t* d_col_parents,
int SP,
vertex_t N,
weight_t infinity,
weight_t epsilon)
{
int spid = blockIdx.y * blockDim.y + threadIdx.y;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (spid < SP && id < N) {
int index = spid * N + id;
if (d_sp_min[spid] < infinity) {
weight_t theta = d_sp_min[spid];
int row_cover = d_row_covers[index];
int col_cover = d_col_covers[index];
if (row_cover == 0) // Row vertex is reachable from source.
d_row_duals[index] += theta;
if (col_cover == 1) // Col vertex is reachable from source.
d_col_duals[index] -= theta;
else {
// Col vertex is unreachable from source.
d_col_slacks[index] -= d_sp_min[spid];
if (near_zero(d_col_slacks[index], epsilon)) {
int par_rowid = d_col_parents[index];
if (par_rowid != -1) d_row_visited[par_rowid] = ACTIVE;
}
}
}
}
}
// Kernel for calculating optimal objective function value using dual variables.
template <typename vertex_t, typename weight_t>
RAFT_KERNEL kernel_calcObjValDual(weight_t* d_obj_val_dual,
weight_t const* d_row_duals,
weight_t const* d_col_duals,
int SP,
vertex_t N)
{
int spid = blockIdx.x * blockDim.x + threadIdx.x;
if (spid < SP) {
float val = 0;
for (int i = 0; i < N; i++)
val += (d_row_duals[spid * N + i] + d_col_duals[spid * N + i]);
d_obj_val_dual[spid] = val;
}
}
// Kernel for calculating optimal objective function value using dual variables.
template <typename vertex_t, typename weight_t>
RAFT_KERNEL kernel_calcObjValPrimal(weight_t* d_obj_val_primal,
weight_t const* d_costs,
vertex_t const* d_row_assignments,
int SP,
vertex_t N)
{
int spid = blockIdx.x * blockDim.x + threadIdx.x;
if (spid < SP) {
weight_t val = 0;
for (int i = 0; i < N; i++) {
vertex_t j = d_row_assignments[spid * N + i];
val += d_costs[spid * N * N + i * N + j];
}
d_obj_val_primal[spid] = val;
}
}
} // namespace raft::solver::detail | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/mdspan.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/error.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/memory_type.hpp>
#include <raft/core/detail/macros.hpp>
#include <raft/core/detail/mdspan_util.cuh>
#include <raft/core/host_device_accessor.hpp>
#include <raft/thirdparty/mdspan/include/experimental/mdspan>
namespace raft {
template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
typename AccessorPolicy = std::experimental::default_accessor<ElementType>>
using mdspan = std::experimental::mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>;
namespace detail {
// keeping ByteAlignment as optional to allow testing
template <class ValueType, size_t ByteAlignment = 128>
struct padding {
static_assert(std::is_same<std::remove_cv_t<ValueType>, ValueType>::value,
"std::experimental::padding ValueType has to be provided without "
"const or volatile specifiers.");
static_assert(ByteAlignment % sizeof(ValueType) == 0 || sizeof(ValueType) % ByteAlignment == 0,
"std::experimental::padding sizeof(ValueType) has to be multiple or "
"divider of ByteAlignment.");
static constexpr size_t value = std::max(ByteAlignment / sizeof(ValueType), 1ul);
};
// alignment fixed to 128 bytes
struct alignment {
static constexpr size_t value = 128;
};
} // namespace detail
template <typename ElementType>
using layout_right_padded = std::experimental::layout_right_padded<
detail::padding<std::remove_cv_t<std::remove_reference_t<ElementType>>>::value>;
template <typename ElementType>
using layout_left_padded = std::experimental::layout_left_padded<
detail::padding<std::remove_cv_t<std::remove_reference_t<ElementType>>>::value>;
template <typename ElementType, typename LayoutPolicy>
using enable_if_layout_padded =
std::enable_if_t<std::is_same<LayoutPolicy, layout_left_padded<ElementType>>::value ||
std::is_same<LayoutPolicy, layout_right_padded<ElementType>>::value>;
/**
* Ensure all types listed in the parameter pack `Extents` are integral types.
* Usage:
* put it as the last nameless template parameter of a function:
* `typename = ensure_integral_extents<Extents...>`
*/
template <typename... Extents>
using ensure_integral_extents = std::enable_if_t<std::conjunction_v<std::is_integral<Extents>...>>;
/**
* @\brief Template checks and helpers to determine if type T is an std::mdspan
* or a derived type
*/
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
void __takes_an_mdspan_ptr(mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>*);
template <typename T, typename = void>
struct is_mdspan : std::false_type {};
template <typename T>
struct is_mdspan<T, std::void_t<decltype(__takes_an_mdspan_ptr(std::declval<T*>()))>>
: std::true_type {};
template <typename T, typename = void>
struct is_input_mdspan : std::false_type {};
template <typename T>
struct is_input_mdspan<T, std::void_t<decltype(__takes_an_mdspan_ptr(std::declval<T*>()))>>
: std::bool_constant<std::is_const_v<typename T::element_type>> {};
template <typename T, typename = void>
struct is_output_mdspan : std::false_type {};
template <typename T>
struct is_output_mdspan<T, std::void_t<decltype(__takes_an_mdspan_ptr(std::declval<T*>()))>>
: std::bool_constant<not std::is_const_v<typename T::element_type>> {};
template <typename T>
using is_mdspan_t = is_mdspan<std::remove_const_t<T>>;
template <typename T>
using is_input_mdspan_t = is_input_mdspan<T>;
template <typename T>
using is_output_mdspan_t = is_output_mdspan<T>;
/**
* @\brief Boolean to determine if variadic template types Tn are either
* raft::host_mdspan/raft::device_mdspan or their derived types
*/
template <typename... Tn>
inline constexpr bool is_mdspan_v = std::conjunction_v<is_mdspan_t<Tn>...>;
template <typename... Tn>
using enable_if_mdspan = std::enable_if_t<is_mdspan_v<Tn...>>;
template <typename... Tn>
inline constexpr bool is_input_mdspan_v = std::conjunction_v<is_input_mdspan_t<Tn>...>;
template <typename... Tn>
using enable_if_input_mdspan = std::enable_if_t<is_input_mdspan_v<Tn...>>;
template <typename... Tn>
inline constexpr bool is_output_mdspan_v = std::conjunction_v<is_output_mdspan_t<Tn>...>;
template <typename... Tn>
using enable_if_output_mdspan = std::enable_if_t<is_output_mdspan_v<Tn...>>;
// uint division optimization inspired by the CIndexer in cupy. Division operation is
// slow on both CPU and GPU, especially 64 bit integer. So here we first try to avoid 64
// bit when the index is smaller, then try to avoid division when it's exp of 2.
template <typename I, typename IndexType, size_t... Extents>
RAFT_INLINE_FUNCTION auto unravel_index_impl(
I idx, std::experimental::extents<IndexType, Extents...> shape)
{
constexpr auto kRank = static_cast<int32_t>(shape.rank());
std::size_t index[shape.rank()]{0}; // NOLINT
static_assert(std::is_signed<decltype(kRank)>::value,
"Don't change the type without changing the for loop.");
for (int32_t dim = kRank; --dim > 0;) {
auto s = static_cast<std::remove_const_t<std::remove_reference_t<I>>>(shape.extent(dim));
if (s & (s - 1)) {
auto t = idx / s;
index[dim] = idx - t * s;
idx = t;
} else { // exp of 2
index[dim] = idx & (s - 1);
idx >>= detail::popc(s - 1);
}
}
index[0] = idx;
return detail::arr_to_tup(index);
}
/**
* @brief Create a raft::mdspan
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @tparam is_host_accessible whether the data is accessible on host
* @tparam is_device_accessible whether the data is accessible on device
* @param ptr Pointer to the data
* @param exts dimensionality of the array (series of integers)
* @return raft::mdspan
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous,
bool is_host_accessible = false,
bool is_device_accessible = true,
size_t... Extents>
constexpr auto make_mdspan(ElementType* ptr, extents<IndexType, Extents...> exts)
{
using accessor_type = host_device_accessor<
std::experimental::default_accessor<ElementType>,
detail::memory_type_from_access<is_host_accessible, is_device_accessible>()>;
/*using accessor_type = host_device_accessor<std::experimental::default_accessor<ElementType>,
mem_type>; */
return mdspan<ElementType, decltype(exts), LayoutPolicy, accessor_type>{ptr, exts};
}
/**
* @brief Create a layout_stride mapping from extents and strides
* @param[in] extents the dimensionality of the layout
* @param[in] strides the strides between elements in the layout
* @return raft::layout_stride::mapping<Extents>
*/
template <typename Extents, typename Strides>
auto make_strided_layout(Extents extents, Strides strides)
{
return layout_stride::mapping<Extents>{extents, strides};
}
/**
* @brief Create raft::extents to specify dimensionality
*
* @tparam IndexType The type of each dimension of the extents
* @tparam Extents Dimensions (a series of integers)
* @param exts The desired dimensions
* @return raft::extents
*/
template <typename IndexType, typename... Extents, typename = ensure_integral_extents<Extents...>>
constexpr auto make_extents(Extents... exts)
{
return extents<IndexType, ((void)exts, dynamic_extent)...>{exts...};
}
/**
* @defgroup mdspan_reshape Row- or Col-norm computation
* @{
*/
/**
* @brief Flatten raft::mdspan into a 1-dim array view
*
* @tparam mdspan_type Expected type raft::host_mdspan or raft::device_mdspan
* @param mds raft::host_mdspan or raft::device_mdspan object
* @return raft::host_mdspan or raft::device_mdspan with vector_extent
* depending on AccessoryPolicy
*/
template <typename mdspan_type, typename = enable_if_mdspan<mdspan_type>>
auto flatten(mdspan_type mds)
{
RAFT_EXPECTS(mds.is_exhaustive(), "Input must be contiguous.");
vector_extent<typename mdspan_type::size_type> ext{mds.size()};
return std::experimental::mdspan<typename mdspan_type::element_type,
decltype(ext),
typename mdspan_type::layout_type,
typename mdspan_type::accessor_type>(mds.data_handle(), ext);
}
/**
* @brief Reshape raft::host_mdspan or raft::device_mdspan
*
* @tparam mdspan_type Expected type raft::host_mdspan or raft::device_mdspan
* @tparam IndexType the index type of the extents
* @tparam Extents raft::extents for dimensions
* @param mds raft::host_mdspan or raft::device_mdspan object
* @param new_shape Desired new shape of the input
* @return raft::host_mdspan or raft::device_mdspan, depending on AccessorPolicy
*/
template <typename mdspan_type,
typename IndexType = std::uint32_t,
size_t... Extents,
typename = enable_if_mdspan<mdspan_type>>
auto reshape(mdspan_type mds, extents<IndexType, Extents...> new_shape)
{
RAFT_EXPECTS(mds.is_exhaustive(), "Input must be contiguous.");
size_t new_size = 1;
for (size_t i = 0; i < new_shape.rank(); ++i) {
new_size *= new_shape.extent(i);
}
RAFT_EXPECTS(new_size == mds.size(), "Cannot reshape array with size mismatch");
return std::experimental::mdspan<typename mdspan_type::element_type,
decltype(new_shape),
typename mdspan_type::layout_type,
typename mdspan_type::accessor_type>(mds.data_handle(),
new_shape);
}
/* @} */
/**
* @defgroup mdspan_unravel Unravel mdspan
* @{
*/
/**
* \brief Turns linear index into coordinate. Similar to numpy unravel_index.
*
* \code
* auto m = make_host_matrix<float>(7, 6);
* auto m_v = m.view();
* auto coord = unravel_index(2, m.extents(), typename decltype(m)::layout_type{});
* std::apply(m_v, coord) = 2;
* \endcode
*
* \param idx The linear index.
* \param shape The shape of the array to use.
* \param layout Must be `layout_c_contiguous` (row-major) in current implementation.
*
* \return A std::tuple that represents the coordinate.
*/
template <typename Idx, typename IndexType, typename LayoutPolicy, size_t... Exts>
RAFT_INLINE_FUNCTION auto unravel_index(Idx idx,
extents<IndexType, Exts...> shape,
LayoutPolicy const& layout)
{
static_assert(std::is_same_v<std::remove_cv_t<std::remove_reference_t<decltype(layout)>>,
layout_c_contiguous>,
"Only C layout is supported.");
static_assert(std::is_integral_v<Idx>, "Index must be integral.");
auto constexpr kIs64 = sizeof(std::remove_cv_t<std::remove_reference_t<Idx>>) == sizeof(uint64_t);
if (kIs64 && static_cast<uint64_t>(idx) > std::numeric_limits<uint32_t>::max()) {
return unravel_index_impl<uint64_t>(static_cast<uint64_t>(idx), shape);
} else {
return unravel_index_impl<uint32_t>(static_cast<uint32_t>(idx), shape);
}
}
/** @} */
/**
* @defgroup mdspan_contiguous Whether the strides imply a contiguous layout.
* @{
*/
/**
* @brief Whether the strides imply a c-contiguous layout.
*/
template <typename Extents, typename Strides>
[[nodiscard]] auto is_c_contiguous(Extents const& extents, Strides const& strides) -> bool
{
typename Extents::index_type stride = 1;
for (auto r = extents.rank(); r > 0; r--) {
if (stride != strides[r - 1]) { return false; }
stride *= extents.extent(r - 1);
}
return true;
}
/**
* @brief Whether the strides imply a f-contiguous layout.
*/
template <typename Extents, typename Strides>
[[nodiscard]] auto is_f_contiguous(Extents const& extents, Strides const& strides) -> bool
{
typename Extents::index_type stride = 1;
for (typename Extents::rank_type r = 0; r < extents.rank(); r++) {
if (stride != strides[r]) { return false; }
stride *= extents.extent(r);
}
return true;
}
/** @} */
/**
* @brief Const accessor specialization for default_accessor
*
* @tparam ElementType
* @param a
* @return std::experimental::default_accessor<std::add_const_t<ElementType>>
*/
template <class ElementType>
std::experimental::default_accessor<std::add_const_t<ElementType>> accessor_of_const(
std::experimental::default_accessor<ElementType> a)
{
return {a};
}
/**
* @brief Const accessor specialization for host_device_accessor
*
* @tparam ElementType the data type of the mdspan elements
* @tparam MemType the type of memory where the elements are stored.
* @param a host_device_accessor
* @return host_device_accessor<std::experimental::default_accessor<std::add_const_t<ElementType>>,
* MemType>
*/
template <class ElementType, memory_type MemType>
host_device_accessor<std::experimental::default_accessor<std::add_const_t<ElementType>>, MemType>
accessor_of_const(host_device_accessor<std::experimental::default_accessor<ElementType>, MemType> a)
{
return {a};
}
/**
* @defgroup mdspan_make_const Convert an mdspan to a const type
* @{
*/
/**
* @brief Create a copy of the given mdspan with const element type
*
* @tparam ElementType the const-qualified data type of the mdspan elements
* @tparam Extents raft::extents for dimensions
* @tparam Layout policy for strides and layout ordering
* @tparam Accessor Accessor policy for the input and output
* @param mds raft::mdspan object
* @return raft::mdspan
*/
template <class ElementType, class Extents, class Layout, class Accessor>
auto make_const_mdspan(mdspan<ElementType, Extents, Layout, Accessor> mds)
{
auto acc_c = accessor_of_const(mds.accessor());
return mdspan<std::add_const_t<ElementType>, Extents, Layout, decltype(acc_c)>{
mds.data_handle(), mds.mapping(), acc_c};
}
/** @} */
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/operators.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/detail/macros.hpp>
#include <raft/util/device_atomics.cuh>
namespace raft {
/**
* @defgroup DeviceFunctors Commonly used device-only functors.
* @{
*/
struct atomic_add_op {
template <typename Type>
_RAFT_DEVICE _RAFT_FORCEINLINE Type operator()(Type* address, const Type& val)
{
return atomicAdd(address, val);
}
};
struct atomic_max_op {
template <typename Type>
_RAFT_DEVICE _RAFT_FORCEINLINE Type operator()(Type* address, const Type& val)
{
return atomicMax(address, val);
}
};
struct atomic_min_op {
template <typename Type>
_RAFT_DEVICE _RAFT_FORCEINLINE Type operator()(Type* address, const Type& val)
{
return atomicMin(address, val);
}
};
/** @} */
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/coo_matrix.hpp |
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/logger.hpp>
#include <raft/core/resources.hpp>
#include <raft/core/span.hpp>
#include <raft/core/sparse_types.hpp>
namespace raft {
/**
* \defgroup coo_matrix COO Matrix
* @{
*/
template <typename RowType, typename ColType, typename NZType, int is_device>
class coordinate_structure_t : public sparse_structure<RowType, ColType, NZType, is_device> {
public:
coordinate_structure_t(RowType n_rows, ColType n_cols, NZType nnz)
: sparse_structure<RowType, ColType, NZType, is_device>(n_rows, n_cols, nnz){};
/**
* Return span containing underlying rows array
* @return span containing underlying rows array
*/
virtual span<RowType, is_device> get_rows() = 0;
/**
* Return span containing underlying cols array
* @return span containing underlying cols array
*/
virtual span<ColType, is_device> get_cols() = 0;
};
/**
* A non-owning view into a coordinate structure
*
* The structure representation does not have a value/weight
* component so that its const-ness can be varied from it.
*
* @tparam RowType
* @tparam ColType
*/
template <typename RowType, typename ColType, typename NZType, bool is_device>
class coordinate_structure_view
: public coordinate_structure_t<RowType, ColType, NZType, is_device> {
public:
static constexpr SparsityType sparsity_type = PRESERVING;
using view_type = coordinate_structure_view<RowType, ColType, NZType, is_device>;
using row_type = typename sparse_structure<RowType, ColType, NZType, is_device>::row_type;
using col_type = typename sparse_structure<RowType, ColType, NZType, is_device>::col_type;
using nnz_type = typename sparse_structure<RowType, ColType, NZType, is_device>::nnz_type;
coordinate_structure_view(span<row_type, is_device> rows,
span<col_type, is_device> cols,
row_type n_rows,
col_type n_cols)
: coordinate_structure_t<RowType, ColType, NZType, is_device>(n_rows, n_cols, rows.size()),
rows_{rows},
cols_{cols}
{
}
/**
* Return span containing underlying rows array
* @return span containing underlying rows array
*/
span<row_type, is_device> get_rows() override { return rows_; }
/**
* Return span containing underlying cols array
* @return span containing underlying cols array
*/
span<col_type, is_device> get_cols() override { return cols_; }
protected:
raft::span<row_type, is_device> rows_;
raft::span<col_type, is_device> cols_;
};
/**
* Represents a sparse coordinate structure (or edge list)
* which can be used to model a COO matrix.
*
* The structure representation does not have a value/weight
* component so that its const-ness can be varied from it.
*
* @tparam RowType
* @tparam ColType
* @tparam ContainerPolicy
*/
template <typename RowType,
typename ColType,
typename NZType,
bool is_device,
template <typename T>
typename ContainerPolicy>
class coordinate_structure : public coordinate_structure_t<RowType, ColType, NZType, is_device> {
public:
static constexpr SparsityType sparsity_type = OWNING;
using sparse_structure_type = coordinate_structure_t<RowType, ColType, NZType, is_device>;
using row_type = typename sparse_structure_type::row_type;
using col_type = typename sparse_structure_type::col_type;
using nnz_type = typename sparse_structure_type::nnz_type;
using view_type = coordinate_structure_view<row_type, col_type, nnz_type, is_device>;
using row_container_policy_type = ContainerPolicy<RowType>;
using col_container_policy_type = ContainerPolicy<ColType>;
using row_container_type = typename row_container_policy_type::container_type;
using col_container_type = typename col_container_policy_type::container_type;
coordinate_structure(
raft::resources const& handle,
row_type n_rows,
col_type n_cols,
nnz_type nnz = 0) noexcept(std::is_nothrow_default_constructible_v<row_container_type>)
: coordinate_structure_t<RowType, ColType, NZType, is_device>(n_rows, n_cols, nnz),
cp_rows_{},
cp_cols_{},
c_rows_{cp_rows_.create(handle, 0)},
c_cols_{cp_cols_.create(handle, 0)} {};
coordinate_structure(coordinate_structure const&) noexcept(
std::is_nothrow_copy_constructible_v<row_container_type>) = default;
coordinate_structure(coordinate_structure&&) noexcept(
std::is_nothrow_move_constructible<row_container_type>::value) = default;
constexpr auto operator=(coordinate_structure const&) noexcept(
std::is_nothrow_copy_assignable<row_container_type>::value) -> coordinate_structure& = default;
constexpr auto operator=(coordinate_structure&&) noexcept(
std::is_nothrow_move_assignable<row_container_type>::value) -> coordinate_structure& = default;
~coordinate_structure() noexcept(std::is_nothrow_destructible<row_container_type>::value) =
default;
/**
* Return a view of the coordinate structure. Structural views are sparsity-preserving
* so while the structural elements can be updated in a non-const view, the sparsity
* itself (number of nonzeros) cannot be changed.
* @return coordinate structure view
*/
view_type view()
{
if (this->get_nnz() == 0) {
RAFT_LOG_WARN(
"Cannot create coordinate_structure.view() because it has not been initialized "
"(sparsity is 0)");
}
auto row_span = raft::span<row_type, is_device>(c_rows_.data(), this->get_nnz());
auto col_span = raft::span<col_type, is_device>(c_cols_.data(), this->get_nnz());
return view_type(row_span, col_span, this->get_n_rows(), this->get_n_cols());
}
/**
* Return span containing underlying rows array
* @return span containing underlying rows array
*/
span<row_type, is_device> get_rows() override
{
return raft::span<row_type, is_device>(c_rows_.data(), this->get_n_rows());
}
/**
* Return span containing underlying cols array
* @return span containing underlying cols array
*/
span<col_type, is_device> get_cols() override
{
return raft::span<col_type, is_device>(c_cols_.data(), this->get_n_cols());
}
/**
* Change the sparsity of the current compressed structure. This will
* resize the underlying data arrays.
* @param nnz new sparsity
*/
void initialize_sparsity(nnz_type nnz)
{
sparse_structure_type::initialize_sparsity(nnz);
c_rows_.resize(nnz);
c_cols_.resize(nnz);
}
protected:
row_container_policy_type cp_rows_;
col_container_policy_type cp_cols_;
row_container_type c_rows_;
col_container_type c_cols_;
};
template <typename ElementType, typename RowType, typename ColType, typename NZType, bool is_device>
class coo_matrix_view
: public sparse_matrix_view<ElementType,
coordinate_structure_view<RowType, ColType, NZType, is_device>,
is_device> {
public:
using element_type = ElementType;
using row_type = RowType;
using col_type = ColType;
using nnz_type = NZType;
coo_matrix_view(raft::span<ElementType, is_device> element_span,
coordinate_structure_view<RowType, ColType, NZType, is_device> structure_view)
: sparse_matrix_view<ElementType,
coordinate_structure_view<RowType, ColType, NZType, is_device>,
is_device>(element_span, structure_view)
{
}
};
template <typename ElementType,
typename RowType,
typename ColType,
typename NZType,
bool is_device,
template <typename T>
typename ContainerPolicy,
SparsityType sparsity_type = SparsityType::OWNING,
typename structure_type = std::conditional_t<
sparsity_type == SparsityType::OWNING,
coordinate_structure<RowType, ColType, NZType, is_device, ContainerPolicy>,
coordinate_structure_view<RowType, ColType, NZType, is_device>>>
class coo_matrix
: public sparse_matrix<ElementType,
structure_type,
coo_matrix_view<ElementType, RowType, ColType, NZType, is_device>,
is_device,
ContainerPolicy> {
public:
using element_type = ElementType;
using row_type = RowType;
using col_type = ColType;
using nnz_type = NZType;
using structure_view_type = typename structure_type::view_type;
using container_type = typename ContainerPolicy<ElementType>::container_type;
using sparse_matrix_type =
sparse_matrix<ElementType,
structure_type,
coo_matrix_view<ElementType, RowType, ColType, NZType, is_device>,
is_device,
ContainerPolicy>;
static constexpr auto get_sparsity_type() { return sparsity_type; }
template <SparsityType sparsity_type_ = get_sparsity_type(),
typename = typename std::enable_if_t<sparsity_type_ == SparsityType::OWNING>>
coo_matrix(raft::resources const& handle,
RowType n_rows,
ColType n_cols,
NZType nnz = 0) noexcept(std::is_nothrow_default_constructible_v<container_type>)
: sparse_matrix_type(handle, n_rows, n_cols, nnz){};
// Constructor that owns the data but not the structure
template <SparsityType sparsity_type_ = get_sparsity_type(),
typename = typename std::enable_if_t<sparsity_type_ == SparsityType::PRESERVING>>
coo_matrix(raft::resources const& handle, structure_type structure) noexcept(
std::is_nothrow_default_constructible_v<container_type>)
: sparse_matrix_type(handle, structure){};
/**
* Initialize the sparsity on this instance if it was not known upon construction
* Please note this will resize the underlying memory buffers
* @param nnz new sparsity to initialize.
*/
template <SparsityType sparsity_type_ = get_sparsity_type(),
typename = typename std::enable_if_t<sparsity_type_ == SparsityType::OWNING>>
void initialize_sparsity(NZType nnz)
{
sparse_matrix_type::initialize_sparsity(nnz);
this->structure_.initialize_sparsity(nnz);
}
/**
* Return a view of the structure underlying this matrix
* @return
*/
structure_view_type structure_view()
{
if constexpr (get_sparsity_type() == SparsityType::OWNING) {
return this->structure_.view();
} else {
return this->structure_;
}
}
};
/** @} */
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/csr_matrix.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/logger.hpp>
#include <raft/core/resources.hpp>
#include <raft/core/span.hpp>
#include <raft/core/sparse_types.hpp>
namespace raft {
/**
* \defgroup csr_matrix CSR Matrix
* @{
*/
template <typename IndptrType, typename IndicesType, typename NZType, int is_device>
class compressed_structure_t : public sparse_structure<IndptrType, IndicesType, NZType, is_device> {
public:
/**
* Constructor when sparsity is already known
* @param n_rows total number of rows in matrix
* @param n_cols total number of columns in matrix
* @param nnz sparsity of matrix
*/
compressed_structure_t(IndptrType n_rows, IndicesType n_cols, NZType nnz)
: sparse_structure<IndptrType, IndicesType, NZType, is_device>(n_rows, n_cols, nnz){};
/**
* Return span containing underlying indptr array
* @return span containing underlying indptr array
*/
virtual span<IndptrType, is_device> get_indptr() = 0;
/**
* Return span containing underlying indices array
* @return span containing underlying indices array
*/
virtual span<IndicesType, is_device> get_indices() = 0;
};
/**
* A non-owning view into a compressed sparse structure
*
* The structure representation does not have a value/weight
* component so that its const-ness can be varied from it.
*
* @tparam IndptrType
* @tparam IndicesType
*/
template <typename IndptrType, typename IndicesType, typename NZType, bool is_device>
class compressed_structure_view
: public compressed_structure_t<IndptrType, IndicesType, NZType, is_device> {
public:
using sparse_structure_type = compressed_structure_t<IndptrType, IndicesType, NZType, is_device>;
using view_type = compressed_structure_view<IndptrType, IndicesType, NZType, is_device>;
using indptr_type = typename sparse_structure_type::row_type;
using indices_type = typename sparse_structure_type::col_type;
using nnz_type = typename sparse_structure_type::nnz_type;
compressed_structure_view(span<indptr_type, is_device> indptr,
span<indices_type, is_device> indices,
indices_type n_cols)
: sparse_structure_type(indptr.size() - 1, n_cols, indices.size()),
indptr_(indptr),
indices_(indices)
{
}
/**
* Return span containing underlying indptr array
* @return span containing underlying indptr array
*/
span<indptr_type, is_device> get_indptr() override { return indptr_; }
/**
* Return span containing underlying indices array
* @return span containing underlying indices array
*/
span<indices_type, is_device> get_indices() override { return indices_; }
protected:
raft::span<indptr_type, is_device> indptr_;
raft::span<indices_type, is_device> indices_;
};
/**
* Represents a sparse compressed structure (or adjacency list)
* which can be used to model both a CSR and CSC matrix.
*
* The structure representation does not have a value/weight
* component so that its const-ness can be varied from it.
*
* @tparam IndptrType
* @tparam IndicesType
* @tparam ContainerPolicy
*/
template <typename IndptrType,
typename IndicesType,
typename NZType,
bool is_device,
template <typename T>
typename ContainerPolicy>
class compressed_structure
: public compressed_structure_t<IndptrType, IndicesType, NZType, is_device> {
public:
using sparse_structure_type = compressed_structure_t<IndptrType, IndicesType, NZType, is_device>;
using indptr_type = typename sparse_structure_type::row_type;
using indices_type = typename sparse_structure_type::col_type;
using nnz_type = typename sparse_structure_type::nnz_type;
using view_type = compressed_structure_view<IndptrType, IndicesType, NZType, is_device>;
using indptr_container_policy_type = ContainerPolicy<IndptrType>;
using indices_container_policy_type = ContainerPolicy<IndicesType>;
using indptr_container_type = typename indptr_container_policy_type::container_type;
using indices_container_type = typename indices_container_policy_type::container_type;
constexpr compressed_structure(
raft::resources const& handle,
IndptrType n_rows,
IndicesType n_cols,
NZType nnz = 0) noexcept(std::is_nothrow_default_constructible_v<indptr_container_type>)
: sparse_structure_type{n_rows, n_cols, nnz},
cp_indptr_{},
cp_indices_{},
c_indptr_{cp_indptr_.create(handle, n_rows + 1)},
c_indices_{cp_indices_.create(handle, nnz)} {};
compressed_structure(compressed_structure const&) noexcept(
std::is_nothrow_copy_constructible_v<indptr_container_type>) = default;
compressed_structure(compressed_structure&&) noexcept(
std::is_nothrow_move_constructible<indptr_container_type>::value) = default;
constexpr auto operator=(compressed_structure const&) noexcept(
std::is_nothrow_copy_assignable<indptr_container_type>::value)
-> compressed_structure& = default;
constexpr auto operator=(compressed_structure&&) noexcept(
std::is_nothrow_move_assignable<indptr_container_type>::value)
-> compressed_structure& = default;
/**
* Return span containing underlying indptr array
* @return span containing underlying indptr array
*/
span<IndptrType, is_device> get_indptr() override
{
return raft::span<IndptrType, is_device>(c_indptr_.data(), this->get_n_rows() + 1);
}
/**
* Return span containing underlying indices array
* @return span containing underlying indices array
*/
span<IndicesType, is_device> get_indices() override
{
if (this->get_nnz() == 0) {
RAFT_LOG_WARN("Indices requested for structure that has uninitialized sparsity.");
}
return raft::span<IndicesType, is_device>(c_indices_.data(), this->get_nnz());
}
~compressed_structure() noexcept(std::is_nothrow_destructible<indptr_container_type>::value) =
default;
/**
* Return a view of the compressed structure. Structural views are sparsity-preserving
* so while the structural elements can be updated in a non-const view, the sparsity
* itself (number of nonzeros) cannot be changed.
* @return compressed structure view
*/
view_type view()
{
if (this->get_nnz() == 0) {
RAFT_LOG_WARN(
"Cannot create compressed_structure.view() because it has not been initialized (sparsity "
"is 0)");
}
auto indptr_span = raft::span<IndptrType, is_device>(c_indptr_.data(), this->get_n_rows() + 1);
auto indices_span = raft::span<IndicesType, is_device>(c_indices_.data(), this->get_nnz());
return view_type(indptr_span, indices_span, this->get_n_cols());
}
/**
* Change the sparsity of the current compressed structure. This will
* resize the underlying data arrays.
* @param nnz new sparsity
*/
void initialize_sparsity(NZType nnz) override
{
sparse_structure_type::initialize_sparsity(nnz);
c_indptr_.resize(this->get_n_rows() + 1);
c_indices_.resize(nnz);
}
protected:
indptr_container_policy_type cp_indptr_;
indices_container_policy_type cp_indices_;
indptr_container_type c_indptr_;
indices_container_type c_indices_;
};
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType,
bool is_device>
class csr_matrix_view
: public sparse_matrix_view<ElementType,
compressed_structure_view<IndptrType, IndicesType, NZType, is_device>,
is_device> {
public:
using element_type = ElementType;
using indptr_type = IndptrType;
using indices_type = IndicesType;
using nnz_type = NZType;
csr_matrix_view(
raft::span<ElementType, is_device> element_span,
compressed_structure_view<IndptrType, IndicesType, NZType, is_device> structure_view)
: sparse_matrix_view<ElementType,
compressed_structure_view<IndptrType, IndicesType, NZType, is_device>,
is_device>(element_span, structure_view){};
};
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType,
bool is_device,
template <typename T>
typename ContainerPolicy,
SparsityType sparsity_type = SparsityType::OWNING,
typename structure_type = std::conditional_t<
sparsity_type == SparsityType::OWNING,
compressed_structure<IndptrType, IndicesType, NZType, is_device, ContainerPolicy>,
compressed_structure_view<IndptrType, IndicesType, NZType, is_device>>>
class csr_matrix
: public sparse_matrix<ElementType,
structure_type,
csr_matrix_view<ElementType, IndptrType, IndicesType, NZType, is_device>,
is_device,
ContainerPolicy> {
public:
using element_type = ElementType;
using indptr_type = IndptrType;
using indices_type = IndicesType;
using nnz_type = NZType;
using structure_view_type = typename structure_type::view_type;
static constexpr auto get_sparsity_type() { return sparsity_type; }
using sparse_matrix_type =
sparse_matrix<ElementType,
structure_type,
csr_matrix_view<ElementType, IndptrType, IndicesType, NZType, is_device>,
is_device,
ContainerPolicy>;
using container_type = typename ContainerPolicy<ElementType>::container_type;
template <SparsityType sparsity_type_ = get_sparsity_type(),
typename = typename std::enable_if_t<sparsity_type_ == SparsityType::OWNING>>
csr_matrix(raft::resources const& handle,
IndptrType n_rows,
IndicesType n_cols,
NZType nnz = 0) noexcept(std::is_nothrow_default_constructible_v<container_type>)
: sparse_matrix_type(handle, n_rows, n_cols, nnz){};
// Constructor that owns the data but not the structure
template <SparsityType sparsity_type_ = get_sparsity_type(),
typename = typename std::enable_if_t<sparsity_type_ == SparsityType::PRESERVING>>
csr_matrix(raft::resources const& handle, structure_type structure) noexcept(
std::is_nothrow_default_constructible_v<container_type>)
: sparse_matrix_type(handle, structure){};
/**
* Initialize the sparsity on this instance if it was not known upon construction
* Please note this will resize the underlying memory buffers
* @param nnz new sparsity to initialize.
*/
template <typename = std::enable_if<sparsity_type == SparsityType::OWNING>>
void initialize_sparsity(NZType nnz)
{
sparse_matrix_type::initialize_sparsity(nnz);
this->structure_.initialize_sparsity(nnz);
}
/**
* Return a view of the structure underlying this matrix
* @return
*/
structure_view_type structure_view()
{
if constexpr (get_sparsity_type() == SparsityType::OWNING) {
return this->structure_.view();
} else {
return this->structure_;
}
}
};
/** @} */
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/mdspan_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/thirdparty/mdspan/include/experimental/mdspan>
namespace raft {
using std::experimental::dynamic_extent;
using std::experimental::extents;
/**
* @defgroup mdspan_layout C- and F-contiguous mdspan layouts
* @{
*/
using std::experimental::layout_right;
using layout_c_contiguous = layout_right;
using row_major = layout_right;
using std::experimental::layout_left;
using layout_f_contiguous = layout_left;
using col_major = layout_left;
/** @} */
template <typename IndexType>
using vector_extent = std::experimental::extents<IndexType, dynamic_extent>;
template <typename IndexType>
using matrix_extent = std::experimental::extents<IndexType, dynamic_extent, dynamic_extent>;
template <typename IndexType>
using scalar_extent = std::experimental::extents<IndexType, 1>;
/**
* @brief Strided layout for non-contiguous memory.
*/
using std::experimental::layout_stride;
template <typename IndexType>
using extent_1d = vector_extent<IndexType>;
template <typename IndexType>
using extent_2d = matrix_extent<IndexType>;
template <typename IndexType>
using extent_3d =
std::experimental::extents<IndexType, dynamic_extent, dynamic_extent, dynamic_extent>;
template <typename IndexType>
using extent_4d = std::experimental::
extents<IndexType, dynamic_extent, dynamic_extent, dynamic_extent, dynamic_extent>;
template <typename IndexType>
using extent_5d = std::experimental::extents<IndexType,
dynamic_extent,
dynamic_extent,
dynamic_extent,
dynamic_extent,
dynamic_extent>;
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/interruptible.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __RAFT_RT_INTERRUPTIBLE_H
#define __RAFT_RT_INTERRUPTIBLE_H
#pragma once
#include <memory>
#include <mutex>
#include <optional>
#include <raft/core/error.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thread>
#include <unordered_map>
namespace raft {
/**
* @defgroup interruptible definitions and classes related to the interruptible API
* @{
*/
/**
* @brief Exception thrown during `interruptible::synchronize` call when it detects a request
* to cancel the work performed in this CPU thread.
*/
struct interrupted_exception : public raft::exception {
using raft::exception::exception;
};
/**
* @brief Cooperative-style interruptible execution.
*
* This class provides facilities for interrupting execution of a C++ thread at designated points
* in code from outside of the thread. In particular, it provides an interruptible version of the
* blocking CUDA synchronization function, that allows dropping a long-running GPU work.
*
*
* **Important:** Although CUDA synchronize calls serve as cancellation points, the interruptible
* machinery has nothing to do with CUDA streams or events. In other words, when you call `cancel`,
* it’s the CPU waiting function what is interrupted, not the GPU stream work. This means, when the
* `interrupted_exception` is raised, any unfinished GPU stream work continues to run. It’s the
* responsibility of the developer then to make sure the unfinished stream work does not affect the
* program in an undesirable way.
*
*
* What can happen to CUDA stream when the `synchronize` is cancelled? If you catch the
* `interrupted_exception` immediately, you can safely wait on the stream again.
* Otherwise, some of the allocated resources may be released before the active kernel finishes
* using them, which will result in writing into deallocated or reallocated memory and undefined
* behavior in general. A dead-locked kernel may never finish (or may crash if you’re lucky). In
* practice, the outcome is usually acceptable for the use case of emergency program interruption
* (e.g., CTRL+C), but extra effort on the use side is required to allow safe interrupting and
* resuming of the GPU stream work.
*/
class interruptible {
public:
/**
* @brief Synchronize the CUDA stream, subject to being interrupted by `interruptible::cancel`
* called on this CPU thread.
*
* @param [in] stream a CUDA stream.
*
* @throw raft::interrupted_exception if interruptible::cancel() was called on the current CPU
* thread before the currently captured work has been finished.
* @throw raft::cuda_error if another CUDA error happens.
*/
static inline void synchronize(rmm::cuda_stream_view stream)
{
get_token()->synchronize_impl(cudaStreamQuery, stream);
}
/**
* @brief Synchronize the CUDA event, subject to being interrupted by `interruptible::cancel`
* called on this CPU thread.
*
* @param [in] event a CUDA event.
*
* @throw raft::interrupted_exception if interruptible::cancel() was called on the current CPU
* thread before the currently captured work has been finished.
* @throw raft::cuda_error if another CUDA error happens.
*/
static inline void synchronize(cudaEvent_t event)
{
get_token()->synchronize_impl(cudaEventQuery, event);
}
/**
* @brief Check the thread state, whether the thread can continue execution or is interrupted by
* `interruptible::cancel`.
*
* This is a cancellation point for an interruptible thread. It's called in the internals of
* `interruptible::synchronize` in a loop. If two synchronize calls are far apart, it's
* recommended to call `interruptible::yield()` in between to make sure the thread does not become
* unresponsive for too long.
*
* Both `yield` and `yield_no_throw` reset the state to non-cancelled after execution.
*
* @throw raft::interrupted_exception if interruptible::cancel() was called on the current CPU
* thread.
*/
static inline void yield() { get_token()->yield_impl(); }
/**
* @brief Check the thread state, whether the thread can continue execution or is interrupted by
* `interruptible::cancel`.
*
* Same as `interruptible::yield`, but does not throw an exception if the thread is cancelled.
*
* Both `yield` and `yield_no_throw` reset the state to non-cancelled after execution.
*
* @return whether the thread can continue, i.e. `true` means continue, `false` means cancelled.
*/
static inline auto yield_no_throw() -> bool { return get_token()->yield_no_throw_impl(); }
/**
* @brief Get a cancellation token for this CPU thread.
*
* @return an object that can be used to cancel the GPU work waited on this CPU thread.
*/
static inline auto get_token() -> std::shared_ptr<interruptible>
{
// NB: using static thread-local storage to keep the token alive once it is initialized
static thread_local std::shared_ptr<interruptible> s(
get_token_impl<true>(std::this_thread::get_id()));
return s;
}
/**
* @brief Get a cancellation token for a CPU thread given by its id.
*
* The returned token may live longer than the associated thread. In that case, using its
* `cancel` method has no effect.
*
* @param [in] thread_id an id of a C++ CPU thread.
* @return an object that can be used to cancel the GPU work waited on the given CPU thread.
*/
static inline auto get_token(std::thread::id thread_id) -> std::shared_ptr<interruptible>
{
return get_token_impl<false>(thread_id);
}
/**
* @brief Cancel any current or next call to `interruptible::synchronize` performed on the
* CPU thread given by the `thread_id`
*
* Note, this function uses a mutex to safely get a cancellation token that may be shared
* among multiple threads. If you plan to use it from a signal handler, consider the non-static
* `cancel()` instead.
*
* @param [in] thread_id a CPU thread, in which the work should be interrupted.
*/
static inline void cancel(std::thread::id thread_id) { get_token(thread_id)->cancel(); }
/**
* @brief Cancel any current or next call to `interruptible::synchronize` performed on the
* CPU thread given by this `interruptible` token.
*
* Note, this function does not involve thread synchronization/locks and does not throw any
* exceptions, so it's safe to call from a signal handler.
*/
inline void cancel() noexcept { continue_.clear(std::memory_order_relaxed); }
// don't allow the token to leave the shared_ptr
interruptible(interruptible const&) = delete;
interruptible(interruptible&&) = delete;
auto operator=(interruptible const&) -> interruptible& = delete;
auto operator=(interruptible&&) -> interruptible& = delete;
private:
/** Global registry of thread-local cancellation stores. */
using registry_t =
std::tuple<std::mutex, std::unordered_map<std::thread::id, std::weak_ptr<interruptible>>>;
/**
* The registry "garbage collector": a custom deleter for the interruptible tokens that removes
* the token from the registry, if the registry still exists.
*/
struct registry_gc_t {
std::weak_ptr<registry_t> weak_registry;
std::thread::id thread_id;
inline void operator()(interruptible* thread_store) const noexcept
{
// the deleter kicks in at thread/program exit; in some cases, the registry_ (static variable)
// may have been destructed by this point of time.
// Hence, we use a weak pointer to check if the registry still exists.
auto registry = weak_registry.lock();
if (registry) {
std::lock_guard<std::mutex> guard_erase(std::get<0>(*registry));
auto& map = std::get<1>(*registry);
auto found = map.find(thread_id);
if (found != map.end()) {
auto stored = found->second.lock();
// thread_store is not moveable, thus retains its original location.
// Not equal pointers below imply the new store has been already placed
// in the registry by the same std::thread::id
if (!stored || stored.get() == thread_store) { map.erase(found); }
}
}
delete thread_store;
}
};
/**
* The registry itself is stored in the static memory, in a shared pointer.
* This is to safely access it from the destructors of the thread-local tokens.
*/
static inline std::shared_ptr<registry_t> registry_{new registry_t{}};
/**
* Create a new interruptible token or get an existing from the global registry_.
*
* Presumptions:
*
* 1. get_token_impl<true> must be called at most once per thread.
* 2. When `Claim == true`, thread_id must be equal to std::this_thread::get_id().
* 3. get_token_impl<false> can be called as many times as needed, producing a valid
* token for any input thread_id, independent of whether a C++ thread with this
* id exists or not.
*
* @tparam Claim whether to bind the token to the given thread.
* @param [in] thread_id the id of the associated C++ thread.
* @return new or existing interruptible token.
*/
template <bool Claim>
static auto get_token_impl(std::thread::id thread_id) -> std::shared_ptr<interruptible>
{
// Make a local copy of the shared pointer to make sure the registry is not destroyed,
// if, for any reason, this function is called at program exit.
std::shared_ptr<registry_t> shared_registry = registry_;
// If the registry is not available, create a lone token that cannot be accessed from
// the outside of the thread.
if (!shared_registry) { return std::shared_ptr<interruptible>{new interruptible()}; }
// Otherwise, proceed with the normal logic
std::lock_guard<std::mutex> guard_get(std::get<0>(*shared_registry));
// the following two lines construct an empty shared_ptr if the key does not exist.
auto& weak_store = std::get<1>(*shared_registry)[thread_id];
auto thread_store = weak_store.lock();
if (!thread_store || (Claim && thread_store->claimed_)) {
// Create a new thread_store in two cases:
// 1. It does not exist in the map yet
// 2. The previous store in the map has not yet been deleted
thread_store.reset(new interruptible(), registry_gc_t{shared_registry, thread_id});
std::weak_ptr<interruptible>(thread_store).swap(weak_store);
}
// The thread_store is "claimed" by the thread
if constexpr (Claim) { thread_store->claimed_ = true; }
return thread_store;
}
/**
* Communicate whether the thread is in a cancelled state or can continue execution.
*
* `yield` checks this flag and always resets it to the signalled state; `cancel` clears it.
* These are the only two places where it's used.
*/
std::atomic_flag continue_;
/** This flag is set to true when the created token is placed into a thread-local storage. */
bool claimed_ = false;
interruptible() noexcept { yield_no_throw_impl(); }
void yield_impl()
{
if (!yield_no_throw_impl()) {
throw interrupted_exception("The work in this thread was cancelled.");
}
}
auto yield_no_throw_impl() noexcept -> bool
{
return continue_.test_and_set(std::memory_order_relaxed);
}
template <typename Query, typename Object>
inline void synchronize_impl(Query query, Object object)
{
cudaError_t query_result;
while (true) {
yield_impl();
query_result = query(object);
if (query_result != cudaErrorNotReady) { break; }
std::this_thread::yield();
}
RAFT_CUDA_TRY(query_result);
}
};
/**
* @} // end doxygen group interruptible
*/
} // namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/stream_view.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/cuda_support.hpp>
#include <raft/core/error.hpp>
#include <raft/core/logger.hpp>
#ifndef RAFT_DISABLE_CUDA
#include <raft/core/interruptible.hpp>
#include <rmm/cuda_stream_view.hpp>
#endif
namespace raft {
namespace detail {
struct fail_stream_view {
constexpr fail_stream_view() = default;
constexpr fail_stream_view(fail_stream_view const&) = default;
constexpr fail_stream_view(fail_stream_view&&) = default;
auto constexpr operator=(fail_stream_view const&) -> fail_stream_view& = default;
auto constexpr operator=(fail_stream_view&&) -> fail_stream_view& = default;
auto value() { throw non_cuda_build_error{"Attempted to access CUDA stream in non-CUDA build"}; }
[[nodiscard]] auto is_per_thread_default() const { return false; }
[[nodiscard]] auto is_default() const { return false; }
void synchronize() const
{
throw non_cuda_build_error{"Attempted to sync CUDA stream in non-CUDA build"};
}
void synchronize_no_throw() const
{
RAFT_LOG_ERROR("Attempted to sync CUDA stream in non-CUDA build");
}
};
} // namespace detail
/** A lightweight wrapper around rmm::cuda_stream_view that can be used in
* CUDA-free builds
*
* While CUDA-free builds should never actually make use of a CUDA stream at
* runtime, it is sometimes useful to have a symbol that can stand in place of
* a CUDA stream to avoid excessive ifdef directives interspersed with other
* logic. This struct's methods invoke the underlying rmm::cuda_stream_view in
* CUDA-enabled builds but throw runtime exceptions if any non-trivial method
* is called from a CUDA-free build */
struct stream_view {
#ifndef RAFT_DISABLE_CUDA
using underlying_view_type = rmm::cuda_stream_view;
#else
using underlying_view_type = detail::fail_stream_view;
#endif
constexpr stream_view(
underlying_view_type base_view = stream_view::get_underlying_per_thread_default())
: base_view_{base_view}
{
}
constexpr stream_view(stream_view const&) = default;
constexpr stream_view(stream_view&&) = default;
auto operator=(stream_view const&) -> stream_view& = default;
auto operator=(stream_view&&) -> stream_view& = default;
auto value() { return base_view_.value(); }
operator underlying_view_type() const noexcept { return base_view_; }
[[nodiscard]] auto is_per_thread_default() const { return base_view_.is_per_thread_default(); }
[[nodiscard]] auto is_default() const { return base_view_.is_default(); }
void synchronize() const { base_view_.synchronize(); }
void synchronize_no_throw() const { base_view_.synchronize_no_throw(); }
void interruptible_synchronize() const
{
#ifndef RAFT_DISABLE_CUDA
interruptible::synchronize(base_view_);
#else
synchronize();
#endif
}
auto underlying() { return base_view_; }
void synchronize_if_cuda_enabled()
{
if constexpr (raft::CUDA_ENABLED) { base_view_.synchronize(); }
}
private:
underlying_view_type base_view_;
auto static get_underlying_per_thread_default() -> underlying_view_type
{
#ifndef RAFT_DISABLE_CUDA
return rmm::cuda_stream_per_thread;
#else
auto static constexpr const default_fail_stream = underlying_view_type{};
return default_fail_stream;
#endif
}
};
auto static const stream_view_per_thread = stream_view{};
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/device_coo_matrix.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/coo_matrix.hpp>
#include <raft/core/device_container_policy.hpp>
#include <raft/core/device_span.hpp>
#include <raft/core/resources.hpp>
#include <raft/core/sparse_types.hpp>
namespace raft {
/**
* \defgroup device_coo_matrix Device COO Matrix
* @{
*/
/**
* Specialization for a sparsity-preserving coordinate structure view which uses device memory
*/
template <typename RowType, typename ColType, typename NZType>
using device_coordinate_structure_view = coordinate_structure_view<RowType, ColType, NZType, true>;
/**
* Specialization for a sparsity-owning coordinate structure which uses device memory
*/
template <typename RowType,
typename ColType,
typename NZType,
template <typename T> typename ContainerPolicy = device_uvector_policy>
using device_coordinate_structure =
coordinate_structure<RowType, ColType, NZType, true, ContainerPolicy>;
/**
* Specialization for a coo matrix view which uses device memory
*/
template <typename ElementType, typename RowType, typename ColType, typename NZType>
using device_coo_matrix_view = coo_matrix_view<ElementType, RowType, ColType, NZType, true>;
template <typename ElementType,
typename RowType,
typename ColType,
typename NZType,
template <typename T> typename ContainerPolicy = device_uvector_policy,
SparsityType sparsity_type = SparsityType::OWNING>
using device_coo_matrix =
coo_matrix<ElementType, RowType, ColType, NZType, true, ContainerPolicy, sparsity_type>;
/**
* Specialization for a sparsity-owning coo matrix which uses device memory
*/
template <typename ElementType,
typename RowType,
typename ColType,
typename NZType,
template <typename T> typename ContainerPolicy = device_uvector_policy>
using device_sparsity_owning_coo_matrix =
coo_matrix<ElementType, RowType, ColType, NZType, true, ContainerPolicy>;
template <typename ElementType,
typename RowType,
typename ColType,
typename NZType,
template <typename T> typename ContainerPolicy = device_uvector_policy>
using device_sparsity_preserving_coo_matrix = coo_matrix<ElementType,
RowType,
ColType,
NZType,
true,
ContainerPolicy,
SparsityType::PRESERVING>;
template <typename T>
struct is_device_coo_matrix_view : std::false_type {};
template <typename ElementType, typename RowType, typename ColType, typename NZType>
struct is_device_coo_matrix_view<device_coo_matrix_view<ElementType, RowType, ColType, NZType>>
: std::true_type {};
template <typename T>
constexpr bool is_device_coo_matrix_view_v = is_device_coo_matrix_view<T>::value;
template <typename T>
struct is_device_coo_matrix : std::false_type {};
template <typename ElementType,
typename RowType,
typename ColType,
typename NZType,
template <typename T>
typename ContainerPolicy,
SparsityType sparsity_type>
struct is_device_coo_matrix<
device_coo_matrix<ElementType, RowType, ColType, NZType, ContainerPolicy, sparsity_type>>
: std::true_type {};
template <typename T>
constexpr bool is_device_coo_matrix_v = is_device_coo_matrix<T>::value;
template <typename T>
constexpr bool is_device_coo_sparsity_owning_v =
is_device_coo_matrix<T>::value and T::get_sparsity_type() == OWNING;
template <typename T>
constexpr bool is_device_coo_sparsity_preserving_v =
is_device_coo_matrix<T>::value and T::get_sparsity_type() == PRESERVING;
/**
* Create a sparsity-owning sparse matrix in the coordinate format. sparsity-owning means that
* all of the underlying vectors (data, indptr, indices) are owned by the coo_matrix instance. If
* not known up front, the sparsity can be ignored in this factory function and `resize()` invoked
* on the instance once the sparsity is known.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
*
* raft::resources handle;
* coo_matrix = raft::make_device_coo_matrix(handle, n_rows, n_cols);
* ...
* // compute expected sparsity
* ...
* int nnz = 5000;
* coo_matrix.initialize_sparsity(nnz);
* @endcode
*
* @tparam ElementType
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] handle a raft handle for managing expensive device resources
* @param[in] n_rows total number of rows in the matrix
* @param[in] n_cols total number of columns in the matrix
* @param[in] nnz number of non-zeros in the matrix if known [optional]
* @return a sparsity-owning sparse matrix in coordinate (coo) format
*/
template <typename ElementType, typename RowType, typename ColType, typename NZType>
auto make_device_coo_matrix(raft::resources const& handle,
RowType n_rows,
ColType n_cols,
NZType nnz = 0)
{
return device_sparsity_owning_coo_matrix<ElementType, RowType, ColType, NZType>(
handle, n_rows, n_cols, nnz);
}
/**
* Create a sparsity-preserving sparse matrix in the coordinate format. sparsity-preserving means
* that a view of the coo sparsity is supplied, allowing the values in the sparsity to change but
* not the sparsity itself. The csr_matrix instance does not own the sparsity, the sparsity must
* be known up front, and cannot be resized later.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
*
* raft::resources handle;
* coo_structure = raft::make_device_coordinate_structure(handle, n_rows, n_cols);
* ...
* // compute expected sparsity
* ...
* coo_structure.initialize_sparsity(nnz);
* coo_matrix = raft::make_device_coo_matrix(handle, coo_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] handle raft handle for managing expensive device resources
* @param[in] structure a sparsity-preserving coordinate structural view
* @return a sparsity-preserving sparse matrix in coordinate (coo) format
*/
template <typename ElementType, typename RowType, typename ColType, typename NZType>
auto make_device_coo_matrix(raft::resources const& handle,
device_coordinate_structure_view<RowType, ColType, NZType> structure)
{
return device_sparsity_preserving_coo_matrix<ElementType, RowType, ColType, NZType>(handle,
structure);
}
/**
* Create a non-owning sparse matrix view in the coordinate format. This is sparsity-preserving,
* meaning that the underlying sparsity is known and cannot be changed. Use the sparsity-owning
* coo_matrix if sparsity needs to be mutable.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following pointer is assumed to reference device memory for a size of nnz
* float* d_elm_ptr = ...;
*
* raft::resources handle;
* coo_structure = raft::make_device_coordinate_structure(handle, n_rows, n_cols, nnz);
* coo_matrix_view = raft::make_device_coo_matrix_view(handle, d_elm_ptr, coo_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] ptr a pointer to array of nonzero matrix elements on device (size nnz)
* @param[in] structure a sparsity-preserving coordinate structural view
* @return a sparsity-preserving sparse matrix in coordinate (coo) format
*/
template <typename ElementType, typename RowType, typename ColType, typename NZType>
auto make_device_coo_matrix_view(
ElementType* ptr, device_coordinate_structure_view<RowType, ColType, NZType> structure)
{
return device_coo_matrix_view<ElementType, RowType, ColType, NZType>(
raft::device_span<ElementType>(ptr, structure.get_nnz()), structure);
}
/**
* Create a non-owning sparse matrix view in the coordinate format. This is sparsity-preserving,
* meaning that the underlying sparsity is known and cannot be changed. Use the sparsity-owning
* coo_matrix if sparsity needs to be mutable.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_span.hpp>
* #include <raft/core/device_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following span is assumed to be of size nnz
* raft::device_span<float> d_elm_ptr;
*
* raft::resources handle;
* coo_structure = raft::make_device_coordinate_structure(handle, n_rows, n_cols, nnz);
* coo_matrix_view = raft::make_device_coo_matrix_view(handle, d_elm_ptr, coo_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] elements a device span containing nonzero matrix elements (size nnz)
* @param[in] structure a sparsity-preserving coordinate structural view
* @return
*/
template <typename ElementType, typename RowType, typename ColType, typename NZType>
auto make_device_coo_matrix_view(
raft::device_span<ElementType> elements,
device_coordinate_structure_view<RowType, ColType, NZType> structure)
{
RAFT_EXPECTS(elements.size() == structure.get_nnz(),
"Size of elements must be equal to the nnz from the structure");
return device_coo_matrix_view<ElementType, RowType, ColType, NZType>(elements, structure);
}
/**
* Create a sparsity-owning coordinate structure object. If not known up front, this object can be
* resized() once the sparsity (number of non-zeros) is known, postponing the allocation of the
* underlying data arrays.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* raft::resources handle;
* coo_structure = raft::make_device_coordinate_structure(handle, n_rows, n_cols, nnz);
* * ...
* // compute expected sparsity
* ...
* coo_structure.initialize_sparsity(nnz);
* @endcode
*
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] handle raft handle for managing expensive resources on device
* @param[in] n_rows total number of rows
* @param[in] n_cols total number of cols
* @param[in] nnz number of non-zeros
* @return a sparsity-owning coordinate structure instance
*/
template <typename RowType, typename ColType, typename NZType>
auto make_device_coordinate_structure(raft::resources const& handle,
RowType n_rows,
ColType n_cols,
NZType nnz = 0)
{
return device_coordinate_structure<RowType, ColType, NZType>(handle, n_rows, n_cols, nnz);
}
/**
* Create a non-owning sparsity-preserved coordinate structure view. Sparsity-preserving means that
* the underlying sparsity is known and cannot be changed. Use the sparsity-owning version if the
* sparsity is not known up front.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following pointers are assumed to reference device memory of size nnz
* int *rows = ...;
* int *cols = ...;
*
* raft::resources handle;
* coo_structure = raft::make_device_coordinate_structure_view(handle, rows, cols, n_rows, n_cols,
* nnz);
* @endcode
*
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] rows pointer to row indices array on device (size nnz)
* @param[in] cols pointer to column indices array on device (size nnz)
* @param[in] n_rows total number of rows
* @param[in] n_cols total number of columns
* @param[in] nnz number of non-zeros
* @return a sparsity-preserving coordinate structural view
*/
template <typename RowType, typename ColType, typename NZType>
auto make_device_coordinate_structure_view(
RowType* rows, ColType* cols, RowType n_rows, ColType n_cols, NZType nnz)
{
return device_coordinate_structure_view<RowType, ColType, NZType>(
raft::device_span<RowType>(rows, nnz), raft::device_span<ColType>(cols, nnz), n_rows, n_cols);
}
/**
* Create a non-owning sparsity-preserved coordinate structure view. Sparsity-preserving means that
* the underlying sparsity is known and cannot be changed. Use the sparsity-owning version if the
* sparsity is not known up front.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following device spans are assumed to be of size nnz
* raft::device_span<int> rows;
* raft::device_span<int> cols;
*
* raft::resources handle;
* coo_structure = raft::make_device_coordinate_structure_view(handle, rows, cols, n_rows, n_cols);
* @endcode
*
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] rows a device span containing row indices (size nnz)
* @param[in] cols a device span containing column indices (size nnz)
* @param[in] n_rows total number of rows
* @param[in] n_cols total number of columns
* @return a sparsity-preserving coordinate structural view
*/
template <typename RowType, typename ColType, typename NZType>
auto make_device_coordinate_structure_view(raft::device_span<RowType> rows,
raft::device_span<ColType> cols,
RowType n_rows,
ColType n_cols)
{
return device_coordinate_structure_view<RowType, ColType, NZType>(rows, cols, n_rows, n_cols);
}
/** @} */
}; // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/host_device_accessor.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/memory_type.hpp>
#include <type_traits>
namespace raft {
/**
* @brief A mixin to distinguish host and device memory. This is the primary
* accessor used throughout RAFT's APIs to denote whether an underlying pointer
* is accessible from device, host, or both.
*/
template <typename AccessorPolicy, memory_type MemType>
struct host_device_accessor : public AccessorPolicy {
using accessor_type = AccessorPolicy;
auto static constexpr const mem_type = MemType;
using is_host_type =
std::conditional_t<raft::is_host_accessible(mem_type), std::true_type, std::false_type>;
using is_device_type =
std::conditional_t<raft::is_device_accessible(mem_type), std::true_type, std::false_type>;
using is_managed_type =
std::conditional_t<raft::is_host_device_accessible(mem_type), std::true_type, std::false_type>;
static constexpr bool is_host_accessible = raft::is_host_accessible(mem_type);
static constexpr bool is_device_accessible = raft::is_device_accessible(mem_type);
static constexpr bool is_managed_accessible = raft::is_host_device_accessible(mem_type);
// make sure the explicit ctor can fall through
using AccessorPolicy::AccessorPolicy;
using offset_policy = host_device_accessor;
host_device_accessor(AccessorPolicy const& that) : AccessorPolicy{that} {} // NOLINT
};
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/operators.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <tuple>
#include <type_traits>
#include <utility>
#include <raft/core/detail/macros.hpp>
#include <raft/core/math.hpp>
namespace raft {
/**
* @defgroup operators Commonly used functors.
* The optional unused arguments are useful for kernels that pass the index along with the value.
* @{
*/
struct identity_op {
template <typename Type, typename... UnusedArgs>
constexpr RAFT_INLINE_FUNCTION auto operator()(const Type& in, UnusedArgs...) const
{
return in;
}
};
struct void_op {
template <typename... UnusedArgs>
constexpr RAFT_INLINE_FUNCTION void operator()(UnusedArgs...) const
{
return;
}
};
template <typename OutT>
struct cast_op {
template <typename InT, typename... UnusedArgs>
constexpr RAFT_INLINE_FUNCTION auto operator()(InT in, UnusedArgs...) const
{
return static_cast<OutT>(in);
}
};
struct key_op {
template <typename KVP, typename... UnusedArgs>
constexpr RAFT_INLINE_FUNCTION auto operator()(const KVP& p, UnusedArgs...) const
{
return p.key;
}
};
struct value_op {
template <typename KVP, typename... UnusedArgs>
constexpr RAFT_INLINE_FUNCTION auto operator()(const KVP& p, UnusedArgs...) const
{
return p.value;
}
};
struct sqrt_op {
template <typename Type, typename... UnusedArgs>
RAFT_INLINE_FUNCTION auto operator()(const Type& in, UnusedArgs...) const
{
return raft::sqrt(in);
}
};
struct nz_op {
template <typename Type, typename... UnusedArgs>
constexpr RAFT_INLINE_FUNCTION auto operator()(const Type& in, UnusedArgs...) const
{
return in != Type(0) ? Type(1) : Type(0);
}
};
struct abs_op {
template <typename Type, typename... UnusedArgs>
RAFT_INLINE_FUNCTION auto operator()(const Type& in, UnusedArgs...) const
{
return raft::abs(in);
}
};
struct sq_op {
template <typename Type, typename... UnusedArgs>
constexpr RAFT_INLINE_FUNCTION auto operator()(const Type& in, UnusedArgs...) const
{
return in * in;
}
};
struct add_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a + b;
}
};
struct sub_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a - b;
}
};
struct mul_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a * b;
}
};
struct div_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a / b;
}
};
struct div_checkzero_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
if (b == T2{0}) { return T1{0} / T2{1}; }
return a / b;
}
};
struct pow_op {
template <typename Type>
RAFT_INLINE_FUNCTION auto operator()(const Type& a, const Type& b) const
{
return raft::pow(a, b);
}
};
struct mod_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a % b;
}
};
struct min_op {
template <typename... Args>
RAFT_INLINE_FUNCTION auto operator()(Args&&... args) const
{
return raft::min(std::forward<Args>(args)...);
}
};
struct max_op {
template <typename... Args>
RAFT_INLINE_FUNCTION auto operator()(Args&&... args) const
{
return raft::max(std::forward<Args>(args)...);
}
};
struct argmin_op {
template <typename KVP>
constexpr RAFT_INLINE_FUNCTION auto operator()(const KVP& a, const KVP& b) const
{
if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) { return b; }
return a;
}
};
struct argmax_op {
template <typename KVP>
constexpr RAFT_INLINE_FUNCTION auto operator()(const KVP& a, const KVP& b) const
{
if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) { return b; }
return a;
}
};
struct greater_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a > b;
}
};
struct less_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a < b;
}
};
struct greater_or_equal_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a >= b;
}
};
struct less_or_equal_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a <= b;
}
};
struct equal_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a == b;
}
};
struct notequal_op {
template <typename T1, typename T2>
constexpr RAFT_INLINE_FUNCTION auto operator()(const T1& a, const T2& b) const
{
return a != b;
}
};
template <typename ScalarT>
struct const_op {
const ScalarT scalar;
constexpr explicit const_op(const ScalarT& s) : scalar{s} {}
template <typename... Args>
constexpr RAFT_INLINE_FUNCTION auto operator()(Args...) const
{
return scalar;
}
};
/**
* @brief Wraps around a binary operator, passing a constant on the right-hand side.
*
* Usage example:
* @code{.cpp}
* #include <raft/core/operators.hpp>
*
* raft::plug_const_op<float, raft::mul_op> op(2.0f);
* std::cout << op(2.1f) << std::endl; // 4.2
* @endcode
*
* @tparam ConstT
* @tparam BinaryOpT
*/
template <typename ConstT, typename BinaryOpT>
struct plug_const_op {
const ConstT c;
const BinaryOpT composed_op;
template <typename OpT = BinaryOpT,
typename UnusedT = std::enable_if_t<std::is_default_constructible_v<OpT>>>
constexpr explicit plug_const_op(const ConstT& s)
: c{s}, composed_op{} // The compiler complains if composed_op is not initialized explicitly
{
}
constexpr plug_const_op(const ConstT& s, BinaryOpT o) : c{s}, composed_op{o} {}
template <typename InT>
constexpr RAFT_INLINE_FUNCTION auto operator()(InT a) const
{
return composed_op(a, c);
}
};
template <typename Type>
using add_const_op = plug_const_op<Type, add_op>;
template <typename Type>
using sub_const_op = plug_const_op<Type, sub_op>;
template <typename Type>
using mul_const_op = plug_const_op<Type, mul_op>;
template <typename Type>
using div_const_op = plug_const_op<Type, div_op>;
template <typename Type>
using div_checkzero_const_op = plug_const_op<Type, div_checkzero_op>;
template <typename Type>
using pow_const_op = plug_const_op<Type, pow_op>;
template <typename Type>
using mod_const_op = plug_const_op<Type, mod_op>;
template <typename Type>
using mod_const_op = plug_const_op<Type, mod_op>;
template <typename Type>
using equal_const_op = plug_const_op<Type, equal_op>;
/**
* @brief Constructs an operator by composing a chain of operators.
*
* Note that all arguments are passed to the innermost operator.
*
* Usage example:
* @code{.cpp}
* #include <raft/core/operators.hpp>
*
* auto op = raft::compose_op(raft::sqrt_op(), raft::abs_op(), raft::cast_op<float>(),
* raft::add_const_op<int>(8));
* std::cout << op(-50) << std::endl; // 6.48074
* @endcode
*
* @tparam OpsT Any number of operation types.
*/
template <typename... OpsT>
struct compose_op {
const std::tuple<OpsT...> ops;
template <typename TupleT = std::tuple<OpsT...>,
typename CondT = std::enable_if_t<std::is_default_constructible_v<TupleT>>>
constexpr compose_op()
{
}
constexpr explicit compose_op(OpsT... ops) : ops{ops...} {}
template <typename... Args>
constexpr RAFT_INLINE_FUNCTION auto operator()(Args&&... args) const
{
return compose<sizeof...(OpsT)>(std::forward<Args>(args)...);
}
private:
template <size_t RemOps, typename... Args>
constexpr RAFT_INLINE_FUNCTION auto compose(Args&&... args) const
{
if constexpr (RemOps > 0) {
return compose<RemOps - 1>(std::get<RemOps - 1>(ops)(std::forward<Args>(args)...));
} else {
return identity_op{}(std::forward<Args>(args)...);
}
}
};
using absdiff_op = compose_op<abs_op, sub_op>;
using sqdiff_op = compose_op<sq_op, sub_op>;
/**
* @brief Constructs an operator by composing an outer op with one inner op for each of its inputs.
*
* Usage example:
* @code{.cpp}
* #include <raft/core/operators.hpp>
*
* raft::map_args_op<raft::add_op, raft::sqrt_op, raft::cast_op<float>> op;
* std::cout << op(42.0f, 10) << std::endl; // 16.4807
* @endcode
*
* @tparam OuterOpT Outer operation type
* @tparam ArgOpsT Operation types for each input of the outer operation
*/
template <typename OuterOpT, typename... ArgOpsT>
struct map_args_op {
const OuterOpT outer_op;
const std::tuple<ArgOpsT...> arg_ops;
template <typename T1 = OuterOpT,
typename T2 = std::tuple<ArgOpsT...>,
typename CondT = std::enable_if_t<std::is_default_constructible_v<T1> &&
std::is_default_constructible_v<T2>>>
constexpr map_args_op()
: outer_op{} // The compiler complains if outer_op is not initialized explicitly
{
}
constexpr explicit map_args_op(OuterOpT outer_op, ArgOpsT... arg_ops)
: outer_op{outer_op}, arg_ops{arg_ops...}
{
}
template <typename... Args>
constexpr RAFT_INLINE_FUNCTION auto operator()(Args&&... args) const
{
constexpr size_t kNumOps = sizeof...(ArgOpsT);
static_assert(kNumOps == sizeof...(Args),
"The number of arguments does not match the number of mapping operators");
return map_args(std::make_index_sequence<kNumOps>{}, std::forward<Args>(args)...);
}
private:
template <size_t... I, typename... Args>
constexpr RAFT_INLINE_FUNCTION auto map_args(std::index_sequence<I...>, Args&&... args) const
{
return outer_op(std::get<I>(arg_ops)(std::forward<Args>(args))...);
}
};
/** @} */
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/logger-inl.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stdarg.h>
#include <algorithm>
#include <memory>
#include <mutex>
#include <sstream>
#include <string>
#include <unordered_map>
#include <stdarg.h>
#include "logger-macros.hpp"
// The logger-ext.hpp file contains the class declaration of the logger class.
// In this case, it is okay to include the logger-ext.hpp file because it
// contains no RAFT_EXPLICIT template instantiations.
#include "logger-ext.hpp"
#define SPDLOG_HEADER_ONLY
#include <raft/core/detail/callback_sink.hpp>
#include <raft/core/detail/macros.hpp> // RAFT_INLINE_CONDITIONAL
#include <spdlog/sinks/stdout_color_sinks.h> // NOLINT
#include <spdlog/spdlog.h> // NOLINT
namespace raft {
namespace detail {
inline std::string format(const char* fmt, va_list& vl)
{
va_list vl_copy;
va_copy(vl_copy, vl);
int length = std::vsnprintf(nullptr, 0, fmt, vl_copy);
assert(length >= 0);
std::vector<char> buf(length + 1);
std::vsnprintf(buf.data(), length + 1, fmt, vl);
return std::string(buf.data());
}
RAFT_INLINE_CONDITIONAL std::string format(const char* fmt, ...)
{
va_list vl;
va_start(vl, fmt);
std::string str = format(fmt, vl);
va_end(vl);
return str;
}
inline int convert_level_to_spdlog(int level)
{
level = std::max(RAFT_LEVEL_OFF, std::min(RAFT_LEVEL_TRACE, level));
return RAFT_LEVEL_TRACE - level;
}
} // namespace detail
class logger::impl { // defined privately here
// ... all private data and functions: all of these
// can now change without recompiling callers ...
public:
std::shared_ptr<spdlog::sinks::callback_sink_mt> sink;
std::shared_ptr<spdlog::logger> spdlogger;
std::string cur_pattern;
int cur_level;
impl(std::string const& name_ = "")
: sink{std::make_shared<spdlog::sinks::callback_sink_mt>()},
spdlogger{std::make_shared<spdlog::logger>(name_, sink)},
cur_pattern()
{
}
}; // class logger::impl
RAFT_INLINE_CONDITIONAL logger::logger(std::string const& name_) : pimpl(new impl(name_))
{
set_pattern(default_log_pattern);
set_level(RAFT_ACTIVE_LEVEL);
}
RAFT_INLINE_CONDITIONAL logger& logger::get(std::string const& name)
{
if (log_map.find(name) == log_map.end()) { log_map[name] = std::make_shared<raft::logger>(name); }
return *log_map[name];
}
RAFT_INLINE_CONDITIONAL void logger::set_level(int level)
{
level = raft::detail::convert_level_to_spdlog(level);
pimpl->spdlogger->set_level(static_cast<spdlog::level::level_enum>(level));
}
RAFT_INLINE_CONDITIONAL void logger::set_pattern(const std::string& pattern)
{
pimpl->cur_pattern = pattern;
pimpl->spdlogger->set_pattern(pattern);
}
RAFT_INLINE_CONDITIONAL void logger::set_callback(void (*callback)(int lvl, const char* msg))
{
pimpl->sink->set_callback(callback);
}
RAFT_INLINE_CONDITIONAL void logger::set_flush(void (*flush)()) { pimpl->sink->set_flush(flush); }
RAFT_INLINE_CONDITIONAL bool logger::should_log_for(int level) const
{
level = raft::detail::convert_level_to_spdlog(level);
auto level_e = static_cast<spdlog::level::level_enum>(level);
return pimpl->spdlogger->should_log(level_e);
}
RAFT_INLINE_CONDITIONAL int logger::get_level() const
{
auto level_e = pimpl->spdlogger->level();
return RAFT_LEVEL_TRACE - static_cast<int>(level_e);
}
RAFT_INLINE_CONDITIONAL std::string logger::get_pattern() const { return pimpl->cur_pattern; }
RAFT_INLINE_CONDITIONAL void logger::log(int level, const char* fmt, ...)
{
level = raft::detail::convert_level_to_spdlog(level);
auto level_e = static_cast<spdlog::level::level_enum>(level);
// explicit check to make sure that we only expand messages when required
if (pimpl->spdlogger->should_log(level_e)) {
va_list vl;
va_start(vl, fmt);
auto msg = raft::detail::format(fmt, vl);
va_end(vl);
pimpl->spdlogger->log(level_e, msg);
}
}
RAFT_INLINE_CONDITIONAL void logger::flush() { pimpl->spdlogger->flush(); }
RAFT_INLINE_CONDITIONAL logger::~logger() {}
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/span.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <cinttypes> // size_t
#include <cstddef> // std::byte
#include <raft/core/mdspan_types.hpp>
#include <raft/core/detail/macros.hpp>
#include <raft/core/detail/span.hpp>
// TODO (cjnolet): Remove thrust dependencies here so host_span can be used without CUDA Toolkit
// being installed. Reference: https://github.com/rapidsai/raft/issues/812.
#include <thrust/distance.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h> // _RAFT_HOST_DEVICE
#include <thrust/iterator/reverse_iterator.h>
#include <type_traits>
namespace raft {
/**
* @defgroup span one-dimensional span type
* @{
*/
/**
* @brief The span class defined in ISO C++20. Iterator is defined as plain pointer and
* most of the methods have bound check on debug build.
*
* @code
* rmm::device_uvector<float> uvec(10, rmm::cuda_stream_default);
* auto view = device_span<float>{uvec.data(), uvec.size()};
* @endcode
*/
template <typename T, bool is_device, std::size_t Extent = dynamic_extent>
class span {
public:
using element_type = T;
using value_type = typename std::remove_cv<T>::type;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using const_pointer = T const*;
using reference = T&;
using const_reference = T const&;
using iterator = pointer;
using const_iterator = const_pointer;
using reverse_iterator = thrust::reverse_iterator<iterator>;
using const_reverse_iterator = thrust::reverse_iterator<const_iterator>;
/**
* @brief Default constructor that constructs a span with size 0 and nullptr.
*/
constexpr span() noexcept = default;
/**
* @brief Constructs a span that is a view over the range [first, first + count);
*/
constexpr span(pointer ptr, size_type count) noexcept : storage_{ptr, count}
{
assert(!(Extent != dynamic_extent && count != Extent));
assert(ptr || count == 0);
}
/**
* @brief Constructs a span that is a view over the range [first, last)
*/
constexpr span(pointer first, pointer last) noexcept
: span{first, static_cast<size_type>(thrust::distance(first, last))}
{
}
/**
* @brief Constructs a span that is a view over the array arr.
*/
template <std::size_t N>
constexpr span(element_type (&arr)[N]) noexcept : span{&arr[0], N}
{
}
/**
* @brief Initialize a span class from another one who's underlying type is convertible
* to element_type.
*/
template <class U,
std::size_t OtherExtent,
class = typename std::enable_if<
detail::is_allowed_element_type_conversion_t<U, T>::value &&
detail::is_allowed_extent_conversion_t<OtherExtent, Extent>::value>>
constexpr span(const span<U, is_device, OtherExtent>& other) noexcept
: span{other.data(), other.size()}
{
}
constexpr span(span const& other) noexcept = default;
constexpr span(span&& other) noexcept = default;
constexpr auto operator=(span const& other) noexcept -> span& = default;
constexpr auto operator=(span&& other) noexcept -> span& = default;
constexpr auto begin() const noexcept -> iterator { return data(); }
constexpr auto end() const noexcept -> iterator { return data() + size(); }
constexpr auto cbegin() const noexcept -> const_iterator { return data(); }
constexpr auto cend() const noexcept -> const_iterator { return data() + size(); }
_RAFT_HOST_DEVICE constexpr auto rbegin() const noexcept -> reverse_iterator
{
return reverse_iterator{end()};
}
_RAFT_HOST_DEVICE constexpr auto rend() const noexcept -> reverse_iterator
{
return reverse_iterator{begin()};
}
_RAFT_HOST_DEVICE constexpr auto crbegin() const noexcept -> const_reverse_iterator
{
return const_reverse_iterator{cend()};
}
_RAFT_HOST_DEVICE constexpr auto crend() const noexcept -> const_reverse_iterator
{
return const_reverse_iterator{cbegin()};
}
// element access
constexpr auto front() const -> reference { return (*this)[0]; }
constexpr auto back() const -> reference { return (*this)[size() - 1]; }
template <typename Index>
constexpr auto operator[](Index _idx) const -> reference
{
assert(static_cast<size_type>(_idx) < size());
return data()[_idx];
}
constexpr auto data() const noexcept -> pointer { return storage_.data(); }
// Observers
[[nodiscard]] constexpr auto size() const noexcept -> size_type { return storage_.size(); }
[[nodiscard]] constexpr auto size_bytes() const noexcept -> size_type
{
return size() * sizeof(T);
}
constexpr auto empty() const noexcept { return size() == 0; }
// Subviews
template <std::size_t Count>
constexpr auto first() const -> span<element_type, is_device, Count>
{
assert(Count <= size());
return {data(), Count};
}
constexpr auto first(std::size_t _count) const -> span<element_type, is_device, dynamic_extent>
{
assert(_count <= size());
return {data(), _count};
}
template <std::size_t Count>
constexpr auto last() const -> span<element_type, is_device, Count>
{
assert(Count <= size());
return {data() + size() - Count, Count};
}
constexpr auto last(std::size_t _count) const -> span<element_type, is_device, dynamic_extent>
{
assert(_count <= size());
return subspan(size() - _count, _count);
}
/*!
* If Count is std::dynamic_extent, r.size() == this->size() - Offset;
* Otherwise r.size() == Count.
*/
template <std::size_t Offset, std::size_t Count = dynamic_extent>
constexpr auto subspan() const
-> span<element_type, is_device, detail::extent_value_t<Extent, Offset, Count>::value>
{
assert((Count == dynamic_extent) ? (Offset <= size()) : (Offset + Count <= size()));
return {data() + Offset, Count == dynamic_extent ? size() - Offset : Count};
}
constexpr auto subspan(size_type _offset, size_type _count = dynamic_extent) const
-> span<element_type, is_device, dynamic_extent>
{
assert((_count == dynamic_extent) ? (_offset <= size()) : (_offset + _count <= size()));
return {data() + _offset, _count == dynamic_extent ? size() - _offset : _count};
}
private:
detail::span_storage<T, Extent> storage_;
};
template <class T, std::size_t X, class U, std::size_t Y, bool is_device>
constexpr auto operator==(span<T, is_device, X> l, span<U, is_device, Y> r) -> bool
{
if (l.size() != r.size()) { return false; }
for (auto l_beg = l.cbegin(), r_beg = r.cbegin(); l_beg != l.cend(); ++l_beg, ++r_beg) {
if (*l_beg != *r_beg) { return false; }
}
return true;
}
template <class T, std::size_t X, class U, std::size_t Y, bool is_device>
constexpr auto operator!=(span<T, is_device, X> l, span<U, is_device, Y> r)
{
return !(l == r);
}
template <class T, std::size_t X, class U, std::size_t Y, bool is_device>
constexpr auto operator<(span<T, is_device, X> l, span<U, is_device, Y> r)
{
return detail::lexicographical_compare<
typename span<T, is_device, X>::iterator,
typename span<U, is_device, Y>::iterator,
thrust::less<typename span<T, is_device, X>::element_type>>(
l.begin(), l.end(), r.begin(), r.end());
}
template <class T, std::size_t X, class U, std::size_t Y, bool is_device>
constexpr auto operator<=(span<T, is_device, X> l, span<U, is_device, Y> r)
{
return !(l > r);
}
template <class T, std::size_t X, class U, std::size_t Y, bool is_device>
constexpr auto operator>(span<T, is_device, X> l, span<U, is_device, Y> r)
{
return detail::lexicographical_compare<
typename span<T, is_device, X>::iterator,
typename span<U, is_device, Y>::iterator,
thrust::greater<typename span<T, is_device, X>::element_type>>(
l.begin(), l.end(), r.begin(), r.end());
}
template <class T, std::size_t X, class U, std::size_t Y, bool is_device>
constexpr auto operator>=(span<T, is_device, X> l, span<U, is_device, Y> r)
{
return !(l < r);
}
/**
* @brief Converts a span into a view of its underlying bytes
*/
template <class T, bool is_device, std::size_t E>
auto as_bytes(span<T, is_device, E> s) noexcept
-> span<const std::byte, is_device, detail::extent_as_bytes_value_t<T, E>::value>
{
return {reinterpret_cast<const std::byte*>(s.data()), s.size_bytes()};
}
/**
* @brief Converts a span into a mutable view of its underlying bytes
*/
template <class T, bool is_device, std::size_t E>
auto as_writable_bytes(span<T, is_device, E> s) noexcept
-> span<std::byte, is_device, detail::extent_as_bytes_value_t<T, E>::value>
{
return {reinterpret_cast<std::byte*>(s.data()), s.size_bytes()};
}
/* @} */
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/cublas_macros.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __RAFT_RT_CUBLAS_MACROS_H
#define __RAFT_RT_CUBLAS_MACROS_H
#pragma once
#include <cublas_v2.h>
#include <raft/core/error.hpp>
///@todo: enable this once we have logger enabled
// #include <cuml/common/logger.hpp>
#include <cstdint>
#define _CUBLAS_ERR_TO_STR(err) \
case err: return #err
namespace raft {
/**
* @ingroup error_handling
* @{
*/
/**
* @brief Exception thrown when a cuBLAS error is encountered.
*/
struct cublas_error : public raft::exception {
explicit cublas_error(char const* const message) : raft::exception(message) {}
explicit cublas_error(std::string const& message) : raft::exception(message) {}
};
/**
* @}
*/
namespace linalg {
namespace detail {
inline const char* cublas_error_to_string(cublasStatus_t err)
{
switch (err) {
_CUBLAS_ERR_TO_STR(CUBLAS_STATUS_SUCCESS);
_CUBLAS_ERR_TO_STR(CUBLAS_STATUS_NOT_INITIALIZED);
_CUBLAS_ERR_TO_STR(CUBLAS_STATUS_ALLOC_FAILED);
_CUBLAS_ERR_TO_STR(CUBLAS_STATUS_INVALID_VALUE);
_CUBLAS_ERR_TO_STR(CUBLAS_STATUS_ARCH_MISMATCH);
_CUBLAS_ERR_TO_STR(CUBLAS_STATUS_MAPPING_ERROR);
_CUBLAS_ERR_TO_STR(CUBLAS_STATUS_EXECUTION_FAILED);
_CUBLAS_ERR_TO_STR(CUBLAS_STATUS_INTERNAL_ERROR);
_CUBLAS_ERR_TO_STR(CUBLAS_STATUS_NOT_SUPPORTED);
_CUBLAS_ERR_TO_STR(CUBLAS_STATUS_LICENSE_ERROR);
default: return "CUBLAS_STATUS_UNKNOWN";
};
}
} // namespace detail
} // namespace linalg
} // namespace raft
#undef _CUBLAS_ERR_TO_STR
/**
* @ingroup assertion
* @{
*/
/**
* @brief Error checking macro for cuBLAS runtime API functions.
*
* Invokes a cuBLAS runtime API function call, if the call does not return
* CUBLAS_STATUS_SUCCESS, throws an exception detailing the cuBLAS error that occurred
*/
#define RAFT_CUBLAS_TRY(call) \
do { \
cublasStatus_t const status = (call); \
if (CUBLAS_STATUS_SUCCESS != status) { \
std::string msg{}; \
SET_ERROR_MSG(msg, \
"cuBLAS error encountered at: ", \
"call='%s', Reason=%d:%s", \
#call, \
status, \
raft::linalg::detail::cublas_error_to_string(status)); \
throw raft::cublas_error(msg); \
} \
} while (0)
// FIXME: Remove after consumers rename
#ifndef CUBLAS_TRY
#define CUBLAS_TRY(call) RAFT_CUBLAS_TRY(call)
#endif
// /**
// * @brief check for cuda runtime API errors but log error instead of raising
// * exception.
// */
#define RAFT_CUBLAS_TRY_NO_THROW(call) \
do { \
cublasStatus_t const status = call; \
if (CUBLAS_STATUS_SUCCESS != status) { \
printf("CUBLAS call='%s' at file=%s line=%d failed with %s\n", \
#call, \
__FILE__, \
__LINE__, \
raft::linalg::detail::cublas_error_to_string(status)); \
} \
} while (0)
/**
* @}
*/
/** FIXME: remove after cuml rename */
#ifndef CUBLAS_CHECK
#define CUBLAS_CHECK(call) CUBLAS_TRY(call)
#endif
/** FIXME: remove after cuml rename */
#ifndef CUBLAS_CHECK_NO_THROW
#define CUBLAS_CHECK_NO_THROW(call) RAFT_CUBLAS_TRY_NO_THROW(call)
#endif
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/logger-ext.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory> // std::unique_ptr
#include <raft/core/detail/macros.hpp> // RAFT_INLINE_CONDITIONAL
#include <string> // std::string
#include <unordered_map> // std::unordered_map
namespace raft {
static const std::string RAFT_NAME = "raft";
static const std::string default_log_pattern("[%L] [%H:%M:%S.%f] %v");
namespace detail {
RAFT_INLINE_CONDITIONAL std::string format(const char* fmt, ...);
}
/**
* @brief The main Logging class for raft library.
*
* This class acts as a thin wrapper over the underlying `spdlog` interface. The
* design is done in this way in order to avoid us having to also ship `spdlog`
* header files in our installation.
*
* @todo This currently only supports logging to stdout. Need to add support in
* future to add custom loggers as well [Issue #2046]
*/
class logger {
public:
// @todo setting the logger once per process with
logger(std::string const& name_ = "");
/**
* @brief Singleton method to get the underlying logger object
*
* @return the singleton logger object
*/
static logger& get(std::string const& name = "");
/**
* @brief Set the logging level.
*
* Only messages with level equal or above this will be printed
*
* @param[in] level logging level
*
* @note The log level will actually be set only if the input is within the
* range [RAFT_LEVEL_TRACE, RAFT_LEVEL_OFF]. If it is not, then it'll
* be ignored. See documentation of decisiontree for how this gets used
*/
void set_level(int level);
/**
* @brief Set the logging pattern
*
* @param[in] pattern the pattern to be set. Refer this link
* https://github.com/gabime/spdlog/wiki/3.-Custom-formatting
* to know the right syntax of this pattern
*/
void set_pattern(const std::string& pattern);
/**
* @brief Register a callback function to be run in place of usual log call
*
* @param[in] callback the function to be run on all logged messages
*/
void set_callback(void (*callback)(int lvl, const char* msg));
/**
* @brief Register a flush function compatible with the registered callback
*
* @param[in] flush the function to use when flushing logs
*/
void set_flush(void (*flush)());
/**
* @brief Tells whether messages will be logged for the given log level
*
* @param[in] level log level to be checked for
* @return true if messages will be logged for this level, else false
*/
bool should_log_for(int level) const;
/**
* @brief Query for the current log level
*
* @return the current log level
*/
int get_level() const;
/**
* @brief Get the current logging pattern
* @return the pattern
*/
std::string get_pattern() const;
/**
* @brief Main logging method
*
* @param[in] level logging level of this message
* @param[in] fmt C-like format string, followed by respective params
*/
void log(int level, const char* fmt, ...);
/**
* @brief Flush logs by calling flush on underlying logger
*/
void flush();
~logger();
private:
logger();
// pimpl pattern:
// https://learn.microsoft.com/en-us/cpp/cpp/pimpl-for-compile-time-encapsulation-modern-cpp?view=msvc-170
class impl;
std::unique_ptr<impl> pimpl;
static inline std::unordered_map<std::string, std::shared_ptr<raft::logger>> log_map;
}; // class logger
/**
* @brief An object used for scoped log level setting
*
* Instances of `raft::log_level_setter` will set RAFT logging to the level
* indicated on construction and will revert to the previous set level on
* destruction.
*/
struct log_level_setter {
explicit log_level_setter(int level)
{
prev_level_ = logger::get(RAFT_NAME).get_level();
logger::get(RAFT_NAME).set_level(level);
}
~log_level_setter() { logger::get(RAFT_NAME).set_level(prev_level_); }
private:
int prev_level_;
}; // class log_level_setter
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/handle.hpp | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_resources.hpp>
namespace raft {
/**
* raft::handle_t is being kept around for backwards
* compatibility and will be removed in a future version.
*
* Extending the `raft::handle_t` instead of `using` to
* minimize needed changes downstream
* (e.g. existing forward declarations, etc...)
*
* Use of `raft::resources` or `raft::handle_t` is preferred.
*/
class handle_t : public raft::device_resources {
public:
handle_t(const handle_t& handle,
std::shared_ptr<rmm::mr::device_memory_resource> workspace_resource)
: device_resources(handle, workspace_resource)
{
}
handle_t(const handle_t& handle) : device_resources{handle} {}
handle_t(handle_t&&) = delete;
handle_t& operator=(handle_t&&) = delete;
/**
* @brief Construct a resources instance with a stream view and stream pool
*
* @param[in] stream_view the default stream (which has the default per-thread stream if
* unspecified)
* @param[in] stream_pool the stream pool used (which has default of nullptr if unspecified)
* @param[in] workspace_resource an optional resource used by some functions for allocating
* temporary workspaces.
*/
handle_t(rmm::cuda_stream_view stream_view = rmm::cuda_stream_per_thread,
std::shared_ptr<rmm::cuda_stream_pool> stream_pool = {nullptr},
std::shared_ptr<rmm::mr::device_memory_resource> workspace_resource = {nullptr})
: device_resources{stream_view, stream_pool, workspace_resource}
{
}
/** Destroys all held-up resources */
~handle_t() override {}
};
} // end NAMESPACE raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/device_resources.hpp | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __RAFT_DEVICE_RESOURCES
#define __RAFT_DEVICE_RESOURCES
#pragma once
#include <memory>
#include <mutex>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <cusolverSp.h>
#include <cusparse.h>
#include <raft/core/comms.hpp>
#include <rmm/cuda_stream_pool.hpp>
#include <rmm/exec_policy.hpp>
#include <raft/core/resource/comms.hpp>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_event.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cuda_stream_pool.hpp>
#include <raft/core/resource/cusolver_dn_handle.hpp>
#include <raft/core/resource/cusolver_sp_handle.hpp>
#include <raft/core/resource/cusparse_handle.hpp>
#include <raft/core/resource/device_id.hpp>
#include <raft/core/resource/device_memory_resource.hpp>
#include <raft/core/resource/device_properties.hpp>
#include <raft/core/resource/sub_comms.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
namespace raft {
/**
* @brief Main resource container object that stores all necessary resources
* used for calling necessary device functions, cuda kernels and/or libraries
*/
class device_resources : public resources {
public:
device_resources(const device_resources& handle,
std::shared_ptr<rmm::mr::device_memory_resource> workspace_resource,
std::optional<std::size_t> allocation_limit = std::nullopt)
: resources{handle}
{
// replace the resource factory for the workspace_resources
resource::set_workspace_resource(*this, workspace_resource, allocation_limit);
}
device_resources(const device_resources& handle) : resources{handle} {}
device_resources(device_resources&&) = delete;
device_resources& operator=(device_resources&&) = delete;
/**
* @brief Construct a resources instance with a stream view and stream pool
*
* @param[in] stream_view the default stream (which has the default per-thread stream if
* unspecified)
* @param[in] stream_pool the stream pool used (which has default of nullptr if unspecified)
* @param[in] workspace_resource an optional resource used by some functions for allocating
* temporary workspaces.
* @param[in] allocation_limit the total amount of memory in bytes available to the temporary
* workspace resources.
*/
device_resources(rmm::cuda_stream_view stream_view = rmm::cuda_stream_per_thread,
std::shared_ptr<rmm::cuda_stream_pool> stream_pool = {nullptr},
std::shared_ptr<rmm::mr::device_memory_resource> workspace_resource = {nullptr},
std::optional<std::size_t> allocation_limit = std::nullopt)
: resources{}
{
resources::add_resource_factory(std::make_shared<resource::device_id_resource_factory>());
resources::add_resource_factory(
std::make_shared<resource::cuda_stream_resource_factory>(stream_view));
resources::add_resource_factory(
std::make_shared<resource::cuda_stream_pool_resource_factory>(stream_pool));
if (workspace_resource) {
resource::set_workspace_resource(*this, workspace_resource, allocation_limit);
}
}
/** Destroys all held-up resources */
virtual ~device_resources() {}
int get_device() const { return resource::get_device_id(*this); }
cublasHandle_t get_cublas_handle() const { return resource::get_cublas_handle(*this); }
cusolverDnHandle_t get_cusolver_dn_handle() const
{
return resource::get_cusolver_dn_handle(*this);
}
cusolverSpHandle_t get_cusolver_sp_handle() const
{
return resource::get_cusolver_sp_handle(*this);
}
cusparseHandle_t get_cusparse_handle() const { return resource::get_cusparse_handle(*this); }
rmm::exec_policy& get_thrust_policy() const { return resource::get_thrust_policy(*this); }
/**
* @brief synchronize a stream on the current container
*/
void sync_stream(rmm::cuda_stream_view stream) const { resource::sync_stream(*this, stream); }
/**
* @brief synchronize main stream on the current container
*/
void sync_stream() const { resource::sync_stream(*this); }
/**
* @brief returns main stream on the current container
*/
rmm::cuda_stream_view get_stream() const { return resource::get_cuda_stream(*this); }
/**
* @brief returns whether stream pool was initialized on the current container
*/
bool is_stream_pool_initialized() const { return resource::is_stream_pool_initialized(*this); }
/**
* @brief returns stream pool on the current container
*/
const rmm::cuda_stream_pool& get_stream_pool() const
{
return resource::get_cuda_stream_pool(*this);
}
std::size_t get_stream_pool_size() const { return resource::get_stream_pool_size(*this); }
/**
* @brief return stream from pool
*/
rmm::cuda_stream_view get_stream_from_stream_pool() const
{
return resource::get_stream_from_stream_pool(*this);
}
/**
* @brief return stream from pool at index
*/
rmm::cuda_stream_view get_stream_from_stream_pool(std::size_t stream_idx) const
{
return resource::get_stream_from_stream_pool(*this, stream_idx);
}
/**
* @brief return stream from pool if size > 0, else main stream on current container
*/
rmm::cuda_stream_view get_next_usable_stream() const
{
return resource::get_next_usable_stream(*this);
}
/**
* @brief return stream from pool at index if size > 0, else main stream on current container
*
* @param[in] stream_idx the required index of the stream in the stream pool if available
*/
rmm::cuda_stream_view get_next_usable_stream(std::size_t stream_idx) const
{
return resource::get_next_usable_stream(*this, stream_idx);
}
/**
* @brief synchronize the stream pool on the current container
*/
void sync_stream_pool() const { return resource::sync_stream_pool(*this); }
/**
* @brief synchronize subset of stream pool
*
* @param[in] stream_indices the indices of the streams in the stream pool to synchronize
*/
void sync_stream_pool(const std::vector<std::size_t> stream_indices) const
{
return resource::sync_stream_pool(*this, stream_indices);
}
/**
* @brief ask stream pool to wait on last event in main stream
*/
void wait_stream_pool_on_stream() const { return resource::wait_stream_pool_on_stream(*this); }
void set_comms(std::shared_ptr<comms::comms_t> communicator)
{
resource::set_comms(*this, communicator);
}
const comms::comms_t& get_comms() const { return resource::get_comms(*this); }
void set_subcomm(std::string key, std::shared_ptr<comms::comms_t> subcomm)
{
resource::set_subcomm(*this, key, subcomm);
}
const comms::comms_t& get_subcomm(std::string key) const
{
return resource::get_subcomm(*this, key);
}
rmm::mr::device_memory_resource* get_workspace_resource() const
{
return resource::get_workspace_resource(*this);
}
bool comms_initialized() const { return resource::comms_initialized(*this); }
const cudaDeviceProp& get_device_properties() const
{
return resource::get_device_properties(*this);
}
}; // class device_resources
/**
* @brief RAII approach to synchronizing across all streams in the current container
*/
class stream_syncer {
public:
explicit stream_syncer(const device_resources& handle) : handle_(handle)
{
resource::sync_stream(handle_);
}
~stream_syncer()
{
handle_.wait_stream_pool_on_stream();
handle_.sync_stream_pool();
}
stream_syncer(const stream_syncer& other) = delete;
stream_syncer& operator=(const stream_syncer& other) = delete;
private:
const device_resources& handle_;
}; // class stream_syncer
} // namespace raft
#endif
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.