repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/explainer/sampling.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing import SimpleImputer
from cuml.internals.input_utils import (
determine_array_type,
get_supported_input_type,
)
from cuml import KMeans
import cuml
from cuml.internals.safe_imports import cpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
issparse = cpu_only_import_from("scipy.sparse", "issparse")
@cuml.internals.api_return_generic()
def kmeans_sampling(X, k, round_values=True, detailed=False, random_state=0):
"""
Adapted from :
https://github.com/slundberg/shap/blob/9411b68e8057a6c6f3621765b89b24d82bee13d4/shap/utils/_legacy.py
Summarize a dataset (X) using weighted k-means.
Parameters
----------
X : cuDF or Pandas DataFrame/Series, numpy arrays or cuda_array_interface
compliant device array.
Data to be summarized, shape (n_samples, n_features)
k : int
Number of means to use for approximation.
round_values : bool; default=True
For all i, round the ith dimension of each mean sample to match the
nearest value from X[:,i]. This ensures discrete features always get
a valid value.
detailed: bool; default=False
To return details of group names and cluster labels of all data points
random_state: int; default=0
Sets the random state.
Returns
-------
summary : Summary of the data, shape (k, n_features)
group_names : Names of the features
labels : Cluster labels of the data points in the original dataset,
shape (n_samples, 1)
"""
output_dtype = get_supported_input_type(X)
_output_dtype_str = determine_array_type(X)
cuml.internals.set_api_output_type(_output_dtype_str)
if output_dtype is None:
raise TypeError(
f"Type of input {type(X)} is not supported. Supported \
dtypes: cuDF DataFrame, cuDF Series, cupy, numba,\
numpy, pandas DataFrame, pandas Series"
)
if "DataFrame" in str(output_dtype):
group_names = X.columns
X = cp.array(X.values, copy=False)
if "Series" in str(output_dtype):
group_names = X.name
X = cp.array(X.values.reshape(-1, 1), copy=False)
else:
# it's either numpy, cupy or numba
X = cp.array(X, copy=False)
try:
# more than one column
group_names = [str(i) for i in range(X.shape[1])]
except IndexError:
# one column
X = X.reshape(-1, 1)
group_names = ["0"]
# in case there are any missing values in data impute them
imp = SimpleImputer(
missing_values=cp.nan, strategy="mean", output_type=_output_dtype_str
)
X = imp.fit_transform(X)
kmeans = KMeans(
n_clusters=k, random_state=random_state, output_type=_output_dtype_str
).fit(X)
if round_values:
for i in range(k):
for j in range(X.shape[1]):
xj = (
X[:, j].toarray().flatten() if issparse(X) else X[:, j]
) # sparse support courtesy of @PrimozGodec
ind = cp.argmin(cp.abs(xj - kmeans.cluster_centers_[i, j]))
kmeans.cluster_centers_[i, j] = X[ind, j]
summary = kmeans.cluster_centers_
labels = kmeans.labels_
if detailed:
return summary, group_names, labels
else:
return summary
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/explainer/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("base.pyx" ${kernel_shap_algo} ${permutation_shap_algo} ${explainer_algo})
add_module_gpu_default("kernel_shap.pyx" ${kernel_shap_algo} ${explainer_algo})
add_module_gpu_default("permutation_shap.pyx" ${permutation_shap_algo} ${explainer_algo})
add_module_gpu_default("tree_shap.pyx" ${tree_shap_algo} ${explainer_algo})
set(linked_libraries
"${cuml_sg_libraries}"
"${CUML_PYTHON_TREELITE_TARGET}"
)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}"
MODULE_PREFIX explainer_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/explainer/tree_shap.pyx | #
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common import input_to_cuml_array
from cuml.internals.array import CumlArray
from cuml.internals.import_utils import has_sklearn
from cuml.internals.input_utils import determine_array_type
from cuml.fil.fil import TreeliteModel
from cuml.ensemble import RandomForestRegressor as curfr
from cuml.ensemble import RandomForestClassifier as curfc
from libc.stdint cimport uintptr_t
import re
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
import treelite
if has_sklearn():
from sklearn.ensemble import RandomForestRegressor as sklrfr
from sklearn.ensemble import RandomForestClassifier as sklrfc
else:
sklrfr = object
sklrfc = object
cdef extern from "treelite/c_api.h":
ctypedef void * ModelHandle
cdef int TreeliteQueryNumClass(ModelHandle handle, size_t * out)
cdef extern from "treelite/c_api_common.h":
cdef const char * TreeliteGetLastError()
cdef extern from "cuml/explainer/tree_shap.hpp" namespace "ML::Explainer":
cdef cppclass TreePathHandle:
pass
cdef cppclass FloatPointer:
pass
cdef TreePathHandle extract_path_info(ModelHandle model) except +
cdef void gpu_treeshap(TreePathHandle path_info,
const FloatPointer data,
size_t n_rows,
size_t n_cols,
FloatPointer out_preds,
size_t out_preds_size) except +
cdef void gpu_treeshap_interventional(TreePathHandle path_info,
const FloatPointer data,
size_t n_rows,
size_t n_cols,
const FloatPointer background_data,
size_t background_n_rows,
size_t background_n_cols,
FloatPointer out_preds,
size_t out_preds_size) except +
cdef void gpu_treeshap_interactions(TreePathHandle path_info,
const FloatPointer data,
size_t n_rows,
size_t n_cols,
FloatPointer out_preds,
size_t out_preds_size) except +
cdef void gpu_treeshap_taylor_interactions(TreePathHandle path_info,
const FloatPointer data,
size_t n_rows,
size_t n_cols,
FloatPointer out_preds,
size_t out_preds_size) except +
cdef FloatPointer type_erase_float_ptr(array):
cdef FloatPointer ptr
if array.dtype == np.float32:
ptr = <FloatPointer > <float*> < uintptr_t > array.ptr
elif array.dtype == np.float64:
ptr = <FloatPointer > <double*> < uintptr_t > array.ptr
else:
raise ValueError("Unsupported dtype")
return ptr
cdef class TreeExplainer:
"""
Model explainer that calculates Shapley values for the predictions of
tree-based models. Shapley values are a method of attributing various input
features to a given model prediction.
Uses GPUTreeShap [1]_ as a back-end to accelerate computation using GPUs.
Different variants of Shapley values exist based on different
interpretations of marginalising out (or conditioning on) features. For the
"tree_path_dependent" approach, see [2]_.
For the "interventional" approach, see [3]_.
We also provide two variants of feature interactions. For the
"shapley-interactions" variant of interactions, see [2]_, for
the "shapley-taylor" variant, see [4]_.
.. [1] Mitchell, Rory, Eibe Frank, and Geoffrey Holmes. "GPUTreeShap:
massively parallel exact calculation of SHAP scores for tree
ensembles." PeerJ Computer Science 8 (2022): e880.
.. [2] Lundberg, Scott M., et al. "From local explanations to global
understanding with explainable AI for trees." Nature machine
intelligence 2.1 (2020): 56-67.
.. [3] Janzing, Dominik, Lenon Minorics, and Patrick Blöbaum. "Feature
relevance quantification in explainable AI: A causal problem."
International Conference on artificial intelligence and statistics.
PMLR, 2020.
.. [4] Sundararajan, Mukund, Kedar Dhamdhere, and Ashish Agarwal.
"The Shapley Taylor Interaction Index." International Conference
on Machine Learning. PMLR, 2020.
Parameters
----------
model : model object
The tree based machine learning model. XGBoost, LightGBM, cuml random
forest and sklearn random forest models are supported. Categorical
features in XGBoost or LightGBM models are natively supported.
data : array or DataFrame
Optional background dataset to use for marginalising out features.
If this argument is supplied, an "interventional" approach is used.
Computation time increases with the size of this background data set,
consider starting with between 100-1000 examples. If this argument is
not supplied, statistics from the tree model are used to marginalise
out features ("tree_path_dependent").
Attributes
----------
expected_value :
Model prediction when all input features are marginalised out. Is a
vector for multiclass problems.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import cuml
>>> from cuml.explainer import TreeExplainer
>>> X = np.array([[0.0, 2.0], [1.0, 0.5]])
>>> y = np.array([0, 1])
>>> model = cuml.ensemble.RandomForestRegressor().fit(X, y)
>>> explainer = TreeExplainer(model=model)
>>> shap_values = explainer.shap_values(X)
"""
cdef public object expected_value
cdef TreePathHandle path_info
cdef size_t num_class
cdef object data
def __init__(self, *, model, data=None):
if data is not None:
self.data, _, _, _ = self._prepare_input(data)
else:
self.data = None
# Handle various kinds of tree model objects
cls = model.__class__
cls_module, cls_name = cls.__module__, cls.__name__
# XGBoost model object
if re.match(
r'xgboost.*$', cls_module):
if cls_name != 'Booster':
model = model.get_booster()
model = treelite.Model.from_xgboost(model)
handle = model.handle.value
# LightGBM model object
if re.match(
r'lightgbm.*$', cls_module):
if cls_name != 'Booster':
model = model.booster_
model = treelite.Model.from_lightgbm(model)
handle = model.handle.value
# cuML RF model object
elif isinstance(model, (curfr, curfc)):
model = model.convert_to_treelite_model()
handle = model.handle
# scikit-learn RF model object
elif isinstance(model, (sklrfr, sklrfc)):
model = treelite.sklearn.import_model(model)
handle = model.handle.value
elif isinstance(model, treelite.Model):
handle = model.handle.value
elif isinstance(model, TreeliteModel):
handle = model.handle
else:
raise ValueError('Unrecognized model object type')
cdef ModelHandle model_ptr = <ModelHandle > <uintptr_t > handle
self.num_class = 0
if TreeliteQueryNumClass(model_ptr, & self.num_class) != 0:
raise RuntimeError('Treelite error: {}'.format(
TreeliteGetLastError()))
self.path_info = extract_path_info(model_ptr)
def _prepare_input(self, X):
try:
return input_to_cuml_array(
X, order='C', check_dtype=[np.float32, np.float64])
except ValueError:
# input can be a DataFrame with mixed types
# in this case coerce to 64-bit
return input_to_cuml_array(
X, order='C', convert_to_dtype=np.float64)
def _determine_output_type(self, X):
X_type = determine_array_type(X)
# Coerce to CuPy / NumPy because we may need to return 3D array
return 'numpy' if X_type == 'numpy' else 'cupy'
def shap_values(self, X) -> CumlArray:
"""
Estimate the SHAP values for a set of samples. For a given row, the
SHAP values plus the `expected_value` attribute sum up to the raw
model prediction. 'Raw model prediction' means before the application
of a link function, for example, the SHAP values of an XGBoost binary
classification will be in the additive logit space as opposed to
probability space.
Parameters
----------
X :
A matrix of samples (# samples x # features) on which to explain
the model's output.
Returns
-------
array
Returns a matrix of SHAP values of shape
(# classes x # samples x # features).
"""
X_m, n_rows, n_cols, dtype = self._prepare_input(X)
# Storing a C-order 3D array in a CumlArray leads to cryptic error
# ValueError: len(shape) != len(strides)
# So we use 2D array here
pred_shape = (n_rows, self.num_class * (n_cols + 1))
preds = CumlArray.empty(
shape=pred_shape, dtype=dtype, order='C')
if self.data is None:
gpu_treeshap(self.path_info, type_erase_float_ptr(X_m),
< size_t > n_rows, < size_t > n_cols,
type_erase_float_ptr(preds), preds.size)
else:
if self.data.dtype != dtype:
raise ValueError(
"Expected background data to have the same dtype as X.")
gpu_treeshap_interventional(
self.path_info,
type_erase_float_ptr(X_m),
< size_t > n_rows, < size_t > n_cols,
type_erase_float_ptr(self.data),
< size_t > self.data.shape[0], < size_t > self.data.shape[1],
type_erase_float_ptr(preds), preds.size)
# Reshape to 3D as appropriate
# To follow the convention of the SHAP package:
# 1. Store the bias term in the 'expected_value' attribute.
# 2. Transpose SHAP values in dimension (group_id, row_id, feature_id)
preds = preds.to_output(
output_type=self._determine_output_type(X))
if self.num_class > 1:
preds = preds.reshape(
(n_rows, self.num_class, n_cols + 1))
preds = preds.transpose((1, 0, 2))
self.expected_value = preds[:, 0, -1]
return preds[:, :, :-1]
else:
assert self.num_class == 1
self.expected_value = preds[0, -1]
return preds[:, :-1]
def shap_interaction_values(
self, X, method='shapley-interactions') -> CumlArray:
"""
Estimate the SHAP interaction values for a set of samples. For a
given row, the SHAP values plus the `expected_value` attribute sum
up to the raw model prediction. 'Raw model prediction' means before
the application of a link function, for example, the SHAP values of
an XGBoost binary classification are in the additive logit space as
opposed to probability space.
Interventional feature marginalisation is not supported.
Parameters
----------
X :
A matrix of samples (# samples x # features) on which to explain
the model's output.
method :
One of ['shapley-interactions', 'shapley-taylor']
Returns
-------
array
Returns a matrix of SHAP values of shape
(# classes x # samples x # features x # features).
"""
X_m, n_rows, n_cols, dtype = self._prepare_input(X)
# Storing a C-order 3D array in a CumlArray leads to cryptic error
# ValueError: len(shape) != len(strides)
# So we use 2D array here
pred_shape = (n_rows, self.num_class * (n_cols + 1)**2)
preds = CumlArray.empty(
shape=pred_shape, dtype=dtype, order='C')
if self.data is None:
if method == 'shapley-interactions':
gpu_treeshap_interactions(
self.path_info,
type_erase_float_ptr(X_m),
< size_t > n_rows, < size_t > n_cols,
type_erase_float_ptr(preds), preds.size)
elif method == 'shapley-taylor':
gpu_treeshap_taylor_interactions(
self.path_info, type_erase_float_ptr(X_m),
< size_t > n_rows, < size_t > n_cols,
type_erase_float_ptr(preds), preds.size)
else:
raise ValueError("Unknown interactions method.")
else:
raise ValueError(
"Interventional algorithm not supported for interactions."
" Please specify data as None in constructor.")
preds = preds.to_output(
output_type=self._determine_output_type(X))
if self.num_class > 1:
preds = preds.reshape(
(n_rows, self.num_class, n_cols + 1, n_cols + 1))
preds = preds.transpose((1, 0, 2, 3))
self.expected_value = preds[:, 0, -1, -1]
return preds[:, :, :-1, :-1]
else:
assert self.num_class == 1
preds = preds.reshape(
(n_rows, n_cols + 1, n_cols + 1))
self.expected_value = preds[0, -1, -1]
return preds[:, :-1, :-1]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/explainer/kernel_shap.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
import time
from cuml.internals.import_utils import has_sklearn
from cuml.internals.input_utils import input_to_cupy_array
from cuml.explainer.base import SHAPBase
from cuml.explainer.common import get_cai_ptr
from cuml.explainer.common import model_func_call
from cuml.linear_model import Lasso
from cuml.linear_model import LinearRegression
from functools import lru_cache
from itertools import combinations
from numbers import Number
from random import randint
from pylibraft.common.handle cimport handle_t
from libc.stdint cimport uintptr_t
from libc.stdint cimport uint64_t
cdef extern from "cuml/explainer/kernel_shap.hpp" namespace "ML":
void kernel_dataset "ML::Explainer::kernel_dataset"(
handle_t& handle,
float* X,
int nrows_X,
int ncols,
float* background,
int nrows_background,
float* combinations,
float* observation,
int* nsamples,
int len_nsamples,
int maxsample,
uint64_t seed) except +
void kernel_dataset "ML::Explainer::kernel_dataset"(
handle_t& handle,
float* X,
int nrows_X,
int ncols,
double* background,
int nrows_background,
double* combinations,
double* observation,
int* nsamples,
int len_nsamples,
int maxsample,
uint64_t seed) except +
class KernelExplainer(SHAPBase):
"""
GPU accelerated of SHAP's kernel explainer.
cuML's SHAP based explainers accelerate the algorithmic part of SHAP.
They are optimized to be used with fast GPU based models, like those in
cuML. By creating the datasets and internal calculations,
alongside minimizing data copies and transfers, they can accelerate
explanations significantly. But they can also be used with
CPU based models, where speedups can still be achieved, but those can be
capped by factors like data transfers and the speed of the models.
KernelExplainer is based on the Python SHAP
package's KernelExplainer class:
https://github.com/slundberg/shap/blob/master/shap/explainers/_kernel.py
Current characteristics of the GPU version:
* Unlike the SHAP package, ``nsamples`` is a parameter at the
initialization of the explainer and there is a small initialization
time.
* Only tabular data is supported for now, via passing the background
dataset explicitly.
* Sparse data support is planned for the near future.
* Further optimizations are in progress. For example, if the background
dataset has constant value columns and the observation has the same
value in some entries, the number of evaluations of the function can
be reduced (this will come in the next version).
Parameters
----------
model : function
Function that takes a matrix of samples (n_samples, n_features) and
computes the output for those samples with shape (n_samples). Function
must use either CuPy or NumPy arrays as input/output.
data : Dense matrix containing floats or doubles.
cuML's kernel SHAP supports tabular data for now, so it expects
a background dataset, as opposed to a shap.masker object.
The background dataset to use for integrating out features.
To determine the impact of a feature, that feature is set to "missing"
and the change in the model output is observed.
Acceptable formats: CUDA array interface compliant objects like
CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas
DataFrame/Series.
nsamples : int (default = 2 * data.shape[1] + 2048)
Number of times to re-evaluate the model when explaining each
prediction. More samples lead to lower variance estimates of the SHAP
values. The "auto" setting uses ``nsamples = 2 * X.shape[1] + 2048``.
link : function or str (default = 'identity')
The link function used to map between the output units of the
model and the SHAP value units. From the SHAP package: The link
function used to map between the output units of the model and the
SHAP value units. By default it is identity, but logit can be useful
so that expectations are computed in probability units while
explanations remain in the (more naturally additive) log-odds units.
For more details on how link functions work see any overview of link
functions for generalized linear models.
random_state: int, RandomState instance or None (default = None)
Seed for the random number generator for dataset creation. Note: due to
the design of the sampling algorithm the concurrency can affect
results, so currently 100% deterministic execution is not guaranteed.
gpu_model : bool or None (default = None)
If None Explainer will try to infer whether `model` can take GPU data
(as CuPy arrays), otherwise it will use NumPy arrays to call `model`.
Set to True to force the explainer to use GPU data, set to False to
force the Explainer to use NumPy data.
handle : pylibraft.common.handle (default = None)
Specifies the handle that holds internal CUDA state for
computations in this model, a new one is created if it is None.
Most importantly, this specifies the CUDA stream that will be used for
the model's computations, so users can run different models
concurrently in different streams by creating handles in several
streams.
dtype : np.float32 or np.float64 (default = None)
Parameter to specify the precision of data to generate to call the
model. If not specified, the explainer will try to get the dtype
of the model, if it cannot be queried, then it will default to
np.float32.
output_type : 'cupy' or 'numpy' (default = 'numpy')
Parameter to specify the type of data to output.
If not specified, the explainer will default to 'numpy' for the time
being to improve compatibility.
Examples
--------
.. code-block:: python
>>> from cuml import SVR
>>> from cuml import make_regression
>>> from cuml import train_test_split
>>>
>>> from cuml.explainer import KernelExplainer
>>>
>>> X, y = make_regression(
... n_samples=102,
... n_features=10,
... noise=0.1,
... random_state=42)
>>>
>>> X_train, X_test, y_train, y_test = train_test_split(
... X,
... y,
... test_size=2,
... random_state=42)
>>>
>>> model = SVR().fit(X_train, y_train)
>>>
>>> cu_explainer = KernelExplainer(
... model=model.predict,
... data=X_train,
... is_gpu_model=True,
... random_state=42)
>>>
>>> cu_shap_values = cu_explainer.shap_values(X_test)
>>> cu_shap_values # doctest: +SKIP
array([[-0.41163236, -0.29839307, -0.31082764, -0.21910861, 0.20798518,
1.525831 , -0.07726735, -0.23897147, -0.5901833 , -0.03319931],
[-0.37491834, -0.22581327, -1.2146976 , 0.03793442, -0.24420738,
-0.4875331 , -0.05438256, 0.16568947, -1.9978098 , -0.19110584]],
dtype=float32)
"""
def __init__(self,
*,
model,
data,
nsamples='auto',
link='identity',
verbose=False,
random_state=None,
is_gpu_model=None,
handle=None,
dtype=None,
output_type=None):
super().__init__(
model=model,
background=data,
order='C',
link=link,
verbose=verbose,
random_state=random_state,
is_gpu_model=is_gpu_model,
handle=handle,
dtype=dtype,
output_type=output_type
)
# default value matching SHAP package
if nsamples == 'auto':
self.nsamples = 2 * self.ncols + 2**11
else:
self.nsamples = nsamples
# Maximum number of samples that user can set
max_samples = 2 ** 32
# restricting maximum number of samples
if self.ncols <= 32:
max_samples = 2 ** self.ncols - 2
# if the user requested more samples than there are subsets in the
# _powerset, we set nsamples to max_samples
if self.nsamples > max_samples:
self.nsamples = max_samples
# Check the ratio between samples we evaluate divided by
# all possible samples to check for need for l1
self.ratio_evaluated = self.nsamples / max_samples
self.nsamples_exact, self.nsamples_random, self.randind = \
_get_number_of_exact_random_samples(ncols=self.ncols,
nsamples=self.nsamples)
# using numpy for powerset and shapley kernel weight calculations
# cost is incurred only once, and generally we only generate
# very few samples of the powerset if M is big.
mat, weight = _powerset(self.ncols, self.randind, self.nsamples_exact,
full_powerset=(self.nsamples_random == 0),
dtype=self.dtype)
# Store the mask and weights as device arrays
# Mask dtype can be independent of Explainer dtype, since model
# is not called on it.
self._mask = cp.zeros((self.nsamples, self.ncols), dtype=np.float32)
self._mask[:self.nsamples_exact] = cp.array(mat)
self._weights = cp.ones(self.nsamples, dtype=self.dtype)
self._weights[:self.nsamples_exact] = cp.array(weight)
def shap_values(self,
X,
l1_reg='auto',
as_list=True):
"""
Interface to estimate the SHAP values for a set of samples.
Corresponds to the SHAP package's legacy interface, and is our main
API currently.
Parameters
----------
X : Dense matrix containing floats or doubles.
Acceptable formats: CUDA array interface compliant objects like
CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas
DataFrame/Series.
l1_reg : str (default: 'auto')
The l1 regularization to use for feature selection.
as_list : bool (default = True)
Set to True to return a list of arrays for multi-dimensional
models (like predict_proba functions) to match the SHAP package
behavior. Set to False to return them as an array of arrays.
Returns
-------
shap_values : array or list
"""
return self._explain(X,
synth_data_shape=(self.nrows * self.nsamples,
self.ncols),
return_as_list=as_list,
l1_reg=l1_reg)
def _explain_single_observation(self,
shap_values,
row,
idx,
l1_reg):
total_timer = time.time()
# Call the model to get the value f(row)
fx = cp.array(
model_func_call(X=row,
model_func=self.model,
gpu_model=self.is_gpu_model))
self.model_call_time = \
self.model_call_time + (time.time() - total_timer)
self._mask[self.nsamples_exact:self.nsamples] = \
cp.zeros((self.nsamples_random, self.ncols), dtype=cp.float32)
# If we need sampled rows, then we call the function that generates
# the samples array with how many samples each row will have
# and its corresponding weight
if self.nsamples_random > 0:
samples, self._weights[self.nsamples_exact:self.nsamples] = \
_generate_nsamples_weights(self.ncols,
self.nsamples,
self.nsamples_exact,
int(self.nsamples_random / 2),
self.randind,
self.dtype)
row, _, _, _ = \
input_to_cupy_array(row, order=self.order)
cdef handle_t* handle_ = \
<handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t row_ptr, bg_ptr, ds_ptr, x_ptr, smp_ptr
row_ptr = get_cai_ptr(row)
bg_ptr = get_cai_ptr(self.background)
ds_ptr = get_cai_ptr(self._synth_data)
if self.nsamples_random > 0:
smp_ptr = get_cai_ptr(samples)
else:
smp_ptr = <uintptr_t> NULL
maxsample = 0
x_ptr = get_cai_ptr(self._mask)
if self.random_state is None:
self.random_state = randint(0, 1e18)
# we default to float32 unless self.dtype is specifically np.float64
if self.dtype == np.float64:
kernel_dataset(
handle_[0],
<float*> x_ptr,
<int> self._mask.shape[0],
<int> self._mask.shape[1],
<double*> bg_ptr,
<int> self.background.shape[0],
<double*> ds_ptr,
<double*> row_ptr,
<int*> smp_ptr,
<int> self.nsamples_random,
<int> maxsample,
<uint64_t> self.random_state)
else:
kernel_dataset(
handle_[0],
<float*> x_ptr,
<int> self._mask.shape[0],
<int> self._mask.shape[1],
<float*> bg_ptr,
<int> self.background.shape[0],
<float*> ds_ptr,
<float*> row_ptr,
<int*> smp_ptr,
<int> self.nsamples_random,
<int> maxsample,
<uint64_t> self.random_state)
self.handle.sync()
model_timer = time.time()
# evaluate model on combinations
y = model_func_call(X=self._synth_data,
model_func=self.model,
gpu_model=self.is_gpu_model)
self.model_call_time = \
self.model_call_time + (time.time() - model_timer)
for i in range(self.model_dimensions):
if self.model_dimensions == 1:
y_hat = y - self._expected_value
exp_val_param = self._expected_value
fx_param = fx[0]
else:
y_hat = y[:, i] - self._expected_value[i]
fx_param = fx[0][i]
exp_val_param = self._expected_value[i]
# get average of each combination of X
y_hat = cp.mean(
cp.array(y_hat).reshape((self.nsamples,
self.background.shape[0])),
axis=1
)
# we need to do l1 regularization if user left it as auto and we
# evaluated less than 20% of the space, or if the user set it
# and we did not evaluate all the space (i.e. nsamples_random == 0)
nonzero_inds = None
if ((self.ratio_evaluated < 0.2 and l1_reg == "auto") or
(self.ratio_evaluated < 1.0 and l1_reg != "auto")):
reg_timer = time.time()
nonzero_inds = _l1_regularization(self._mask,
y_hat,
self._weights,
exp_val_param,
fx_param,
self.link_fn,
l1_reg)
self.l1_reg_time = \
self.l1_reg_time + (time.time() - reg_timer)
# in case all indexes become zero
if len(nonzero_inds) == 0:
return None
reg_timer = time.time()
shap_values[i][idx, :-1] = _weighted_linear_regression(
self._mask,
y_hat,
self._weights,
exp_val_param,
fx_param,
nonzero_inds=nonzero_inds,
handle=self.handle)
# add back the variable that was removed in the weighted
# linear regression preprocessing
if nonzero_inds is None:
shap_values[i][idx, -1] = \
(fx_param - exp_val_param) - cp.sum(
shap_values[i][idx, :-1])
else:
shap_values[i][idx, nonzero_inds[-1]] = \
(fx_param - exp_val_param) - cp.sum(
shap_values[i][idx, :-1])
self.linear_model_time = \
self.linear_model_time + (time.time() - reg_timer)
self.total_time = self.total_time + (time.time() - total_timer)
def _reset_timers(self):
super()._reset_timers()
self.l1_reg_time = 0
self.linear_model_time = 0
def _get_number_of_exact_random_samples(ncols, nsamples):
"""
Function calculates how many rows will be from the powerset (exact)
and how many will be from random samples, based on the nsamples
of the explainer.
"""
cur_nsamples = 0
nsamples_exact = 0
r = 0
# we check how many subsets of the _powerset of self.ncols we can fit
# in self.nsamples. This sets of the powerset are used as indexes
# to generate the mask matrix
while cur_nsamples <= nsamples / 2:
r += 1
nsamples_exact = cur_nsamples
cur_nsamples += int(_binomCoef(ncols, r))
# if we are going to generate a full powerset (i.e. we reached
# bincoef bincoef(ncols, r/2)) we return 2**ncols - 2
if r >= ncols / 2:
nsamples_exact = 2**ncols - 2
else:
nsamples_exact *= 2
# see if we need to have randomly sampled entries in our mask
# and combinations matrices
nsamples_random = \
nsamples - nsamples_exact if r < ncols / 2 else 0
# we save r so we can generate random samples later
return nsamples_exact, nsamples_random, r
@lru_cache(maxsize=None)
def _binomCoef(n, k):
"""
Binomial coefficient function with cache
"""
res = 1
if(k > n - k):
k = n - k
for i in range(k):
res *= (n - i)
res /= (i + 1)
return res
@lru_cache(maxsize=None)
def _shapley_kernel(M, s):
"""
Function that calculates shapley kernel, cached.
"""
# To avoid infinite values
# Based on reference implementation
if(s == 0 or s == M):
return 10000
res = (M - 1) / (_binomCoef(M, s) * s * (M - s))
return res
def _powerset(n, r, nrows, full_powerset=False, dtype=np.float32):
"""
Function to generate the subsets of range(n) up to size r.
"""
N = np.arange(n)
w = np.zeros(nrows, dtype=dtype)
result = np.zeros((nrows, n), dtype=dtype)
idx = 0
upper_limit = n if full_powerset else r
for i in range(1, upper_limit):
for c in combinations(N, i):
result[idx, c] = 1
w[idx] = _shapley_kernel(n, i)
if not full_powerset:
result[idx + 1] = 1 - result[idx]
w[idx + 1] = _shapley_kernel(n, i)
idx += 1
idx += 1
return result, w
def _generate_nsamples_weights(ncols,
nsamples,
nsamples_exact,
nsamples_random,
randind,
dtype):
"""
Function generates an array `samples` of ints of samples and their
weights that can be used for generating X and dataset.
"""
samples = np.random.choice(np.arange(randind,
randind + 2),
nsamples_random)
w = np.empty(nsamples_random * 2, dtype=dtype)
for i in range(len(samples)):
weight = \
_shapley_kernel(ncols, samples[i])
w[i * 2] = weight
w[i * 2 + 1] = weight
samples = cp.array(samples, dtype=np.int32)
w = cp.array(w)
return samples, w
def _l1_regularization(X,
y,
weights,
expected_value,
fx,
link_fn,
l1_reg='auto'):
"""
Function calls LASSO or LARS if l1 regularization is needed.
"""
# create augmented dataset for feature selection
s = cp.sum(X, axis=1)
w_aug = cp.hstack(
(weights * (X.shape[1] - s), weights * s))
w_sqrt_aug = np.sqrt(w_aug)
y = cp.hstack(
(y, y - (link_fn(fx) - link_fn(expected_value))))
y *= w_sqrt_aug
X = cp.transpose(
w_sqrt_aug * cp.transpose(cp.vstack((X, X - 1))))
# Use lasso if Scikit-learn is not present
if not has_sklearn():
if l1_reg == 'auto':
l1_reg = 0.2
elif not isinstance(l1_reg, Number):
raise ImportError("Scikit-learn is required for l1 "
"regularization that is not Lasso.")
nonzero_inds = cp.nonzero(Lasso(alpha=l1_reg).fit(X, y).coef_)[0]
# Else match default behavior of mainline SHAP
elif l1_reg == 'auto':
from sklearn.linear_model import LassoLarsIC
nonzero_inds = np.nonzero(
LassoLarsIC(criterion="aic").fit(cp.asnumpy(X),
cp.asnumpy(y)).coef_)[0]
elif isinstance(l1_reg, str):
if l1_reg.startswith("num_features("):
from sklearn.linear_model import lars_path
r = int(l1_reg[len("num_features("):-1])
nonzero_inds = lars_path(cp.asnumpy(X),
cp.asnumpy(y), max_iter=r)[1]
elif l1_reg in ["aic", "bic"]:
from sklearn.linear_model import LassoLarsIC
nonzero_inds = np.nonzero(
LassoLarsIC(criterion=l1_reg).fit(cp.asnumpy(X),
cp.asnumpy(y)).coef_)[0]
else:
nonzero_inds = cp.nonzero(Lasso(alpha=0.2).fit(X, y).coef_)[0]
return cp.asarray(nonzero_inds)
def _weighted_linear_regression(X,
y,
weights,
expected_value,
fx,
nonzero_inds=None,
handle=None):
"""
Function performs weighted linear regression, the shap values
are the coefficients.
"""
if nonzero_inds is None:
# taken from main SHAP package:
# eliminate one variable with the constraint that all features
# sum to the output, improves result accuracy significantly
y = y - X[:, -1] * (fx - expected_value)
Xw = cp.transpose(
cp.transpose(X[:, :-1]) - X[:, -1])
Xw = Xw * cp.sqrt(weights[:, cp.newaxis])
y = y * cp.sqrt(weights)
shap_vals = LinearRegression(fit_intercept=False,
output_type='cupy',
handle=handle).fit(Xw, y).coef_
else:
# mathematically the same as above, but we need to use the indexes
# from nonzero_inds and some additional arrays
# nonzero_inds tells us which cols of X to use
y = y - X[:, nonzero_inds[-1]] * (fx - expected_value)
if len(nonzero_inds) == 1:
# when only one index is nonzero, use that column
Xw = X[:, nonzero_inds]
else:
Xw = cp.transpose(
cp.transpose(
X[:, nonzero_inds[:-1]]) - X[:, nonzero_inds[-1]])
Xw = Xw * cp.sqrt(weights[:, cp.newaxis])
y = y * cp.sqrt(weights)
X_t = LinearRegression(fit_intercept=False,
output_type='cupy',
handle=handle).fit(Xw, y).coef_
shap_vals = cp.zeros(X.shape[1] - 1)
shap_vals[nonzero_inds[:-1]] = X_t
return shap_vals
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/explainer/base.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import('cudf')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
pandas = cpu_only_import('pandas')
import cuml.internals.logger as logger
from cuml.internals.import_utils import has_shap
from cuml.internals.input_utils import input_to_cupy_array
from cuml.internals.input_utils import input_to_host_array
from cuml.internals.logger import warn
from cuml.explainer.common import get_dtype_from_model_func
from cuml.explainer.common import get_handle_from_cuml_model_func
from cuml.explainer.common import get_link_fn_from_str_or_fn
from cuml.explainer.common import get_tag_from_model_func
from cuml.explainer.common import model_func_call
from cuml.explainer.common import output_list_shap_values
from pylibraft.common.handle cimport handle_t
from libcpp cimport bool
from libc.stdint cimport uintptr_t
cdef extern from "cuml/explainer/permutation_shap.hpp" namespace "ML":
void shap_main_effect_dataset "ML::Explainer::shap_main_effect_dataset"(
const handle_t& handle,
float* dataset,
const float* background,
int nrows,
int ncols,
const float* row,
int* idx,
bool rowMajor) except +
void shap_main_effect_dataset "ML::Explainer::shap_main_effect_dataset"(
const handle_t& handle,
double* dataset,
const double* background,
int nrows,
int ncols,
const double* row,
int* idx,
bool rowMajor) except +
class SHAPBase():
"""
Base class for SHAP based explainers.
Parameters
----------
model : function
Function that takes a matrix of samples (n_samples, n_features) and
computes the output for those samples with shape (n_samples). Function
must use either CuPy or NumPy arrays as input/output.
data : Dense matrix containing floats or doubles.
Background dataset. Dense arrays are supported.
order : 'F', 'C' or None (default = None)
Set to override detection of row ('C') or column ('F') major order,
if None it will be attempted to be inferred from model.
order_default : 'F' or 'C' (default = 'C')
Used when `order` is None. If the order cannot be inferred from the
model, then order is set to `order_default`.
link : function or str (default = 'identity')
The link function used to map between the output units of the
model and the SHAP value units.
random_state: int, RandomState instance or None (default = None)
Seed for the random number generator for dataset creation.
is_gpu_model : bool or None (default = None)
If None Explainer will try to infer whether `model` can take GPU data
(as CuPy arrays), otherwise it will use NumPy arrays to call `model`.
Set to True to force the explainer to use GPU data, set to False to
force the Explainer to use NumPy data.
handle : pylibraft.common.handle
Specifies the handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
dtype : np.float32 or np.float64 (default = None)
Parameter to specify the precision of data to generate to call the
model. If not specified, the explainer will try to get the dtype
of the model, if it cannot be queried, then it will default to
np.float32.
output_type : 'cupy' or 'numpy' (default = None)
Parameter to specify the type of data to output.
If not specified, the explainer will try to see if model is gpu based,
if so it will be set to `cupy`, otherwise it will be set to `numpy`.
For compatibility with SHAP's graphing libraries, specify `numpy`.
"""
def __init__(self,
*,
model,
background,
order=None,
order_default='C',
link='identity',
verbose=False,
random_state=None,
is_gpu_model=None,
handle=None,
dtype=None,
output_type=None):
if verbose is True:
self.verbose = logger.level_debug
elif verbose is False:
self.verbose = logger.level_error
else:
self.verbose = verbose
if self.verbose >= logger.level_debug:
self.time_performance = True
else:
self.time_performance = False
if handle is None:
self.handle = get_handle_from_cuml_model_func(model,
create_new=True)
else:
self.handle = handle
if order is None:
self.order = get_tag_from_model_func(func=model,
tag='preferred_input_order',
default=order_default)
else:
self.order = order
self.link = link
self.link_fn = get_link_fn_from_str_or_fn(link)
self.model = model
if is_gpu_model is None:
# todo (dgd): when sparse support is added, use this tag to see if
# model can accept sparse data
self.is_gpu_model = \
get_tag_from_model_func(func=model,
tag='X_types_gpu',
default=None) is not None
else:
self.is_gpu_model = is_gpu_model
# we are defaulting to numpy for now for compatibility
if output_type is None:
self.output_type = 'numpy'
else:
self.output_type = output_type
# if not dtype is specified, we try to get it from the model
if dtype is None:
self.dtype = get_dtype_from_model_func(func=model,
default=np.float32)
else:
self.dtype = np.dtype(dtype)
if dtype not in [np.float32, np.float64]:
raise ValueError("dtype must be either np.float32 or "
"np.float64.")
self.background, self.nrows, self.ncols, _ = \
input_to_cupy_array(background, order=self.order,
convert_to_dtype=self.dtype)
self.random_state = random_state
if isinstance(background,
pandas.DataFrame) or isinstance(background,
cudf.DataFrame):
self.feature_names = background.columns.to_list()
else:
self.feature_names = [None for _ in range(len(background))]
# evaluate the model in background to get the expected_value
self._expected_value = self.link_fn(
cp.mean(
model_func_call(X=self.background,
model_func=self.model,
gpu_model=self.is_gpu_model),
axis=0
)
)
# public attribute saved as NumPy for compatibility with the legacy
# SHAP potting functions
self.expected_value = cp.asnumpy(self._expected_value)
# Calculate the dimension of the model. For example, `predict_proba`
# functions typically return n values for n classes as opposed to
# 1 valued for a typical `predict`
if len(self._expected_value.shape) == 0:
self.model_dimensions = 1
self.expected_value = float(self.expected_value)
else:
self.model_dimensions = self._expected_value.shape[0]
self._reset_timers()
def _explain(self,
X,
testing=False,
synth_data_shape=None,
free_synth_data=True,
return_as_list=True,
**kwargs):
"""
Function that calls inheriting explainers _explain_single_observation
in each row of X.
Parameters
----------
X : Dense matrix containing floats or doubles.
Acceptable formats: CUDA array interface compliant objects like
CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas
DataFrame/Series.
testing : bool (default: False)
Flag to control random behaviors used by some explainers for
running pytests. Might be removed in a future version, meant only
for testing code.
synth_data_shape : tuple (default: None)
Shape of temporary data needed by inheriting explainer.
free_synth_data : bool (default: True)
Whether to free temporary memory after the call. Useful in case a
workflow requires multiple calls to shap_values with small data
as opposed to fewer calls with bigger data.
**kwargs: dictionary
Specific parameters that the _explain_single_observation of
inheriting classes need.
Returns
-------
shap_values : array
Aray with the shap values, using cuml.internals output type logic.
"""
self._reset_timers()
X = input_to_cupy_array(X,
order=self.order,
convert_to_dtype=self.dtype)[0]
if X.ndim == 1:
X = X.reshape((1, self.ncols))
# shap_values is a list so we can return a list in the case that
# model is a multidimensional-output function
shap_values = []
for i in range(self.model_dimensions):
shap_values.append(cp.zeros(X.shape, dtype=self.dtype))
# Allocate synthetic dataset array once for multiple explanations
if getattr(self, "_synth_data", None) is None and synth_data_shape \
is not None:
self._synth_data = cp.zeros(
shape=synth_data_shape,
dtype=self.dtype,
order=self.order
)
# Explain each observation
for idx, x in enumerate(X):
# use mutability of lists and cupy arrays to get all shap values
self._explain_single_observation(
shap_values=shap_values,
row=x.reshape(1, self.ncols),
idx=idx,
**kwargs
)
if free_synth_data and getattr(self, "synth_data", None) is not None:
del self._synth_data
if return_as_list:
shap_values = output_list_shap_values(
X=shap_values,
dimensions=self.model_dimensions,
output_type=self.output_type
)
return shap_values
def __call__(self,
X,
main_effects=False,
**kwargs):
if not has_shap(min_version="0.37"):
raise ImportError("SHAP >= 0.37 was not found, please install it "
" or use the explainer.shap_values function "
"instead. ")
else:
warn("Support for the new API is in experimental state, tested "
"with SHAP 0.37, but changes in further versions could "
"affect its functioning. The functions explainer.shap_values "
" and explainer.main_effects are the stable calls currently.")
from shap import Explanation
shap_values = self.shap_values(X,
as_list=False,
**kwargs)
# reshaping of arrays to match SHAP's behavior for building
# Explanation objects
if self.model_dimensions > 1:
shap_values == cp.asnumpy(cp.array(shap_values)).reshape(
len(X), X.shape[1], self.model_dimensions
)
base_values = np.tile(self.expected_value, (len(X), 1))
else:
shap_values = cp.asnumpy(shap_values[0])
base_values = np.tile(self.expected_value, len(X))
if main_effects:
main_effect_values = self.main_effects(X)
else:
main_effect_values = None
out = Explanation(
values=shap_values,
base_values=base_values,
data=input_to_host_array(X).array,
feature_names=self.feature_names,
main_effects=main_effect_values
)
return out
def main_effects(self,
X):
"""
A utility method to compute the main effects of a model.
"""
main_effects = []
for idx, x in enumerate(X):
main_effects.append(self._calculate_main_effects(x))
return main_effects
def _calculate_main_effects(self,
main_effect_values,
row,
inds=None):
if inds is None:
inds = cp.arange(len(self.masker), dtype=np.float32)
masked_inputs = cp.empty(
shape=((self.nrows * self.ncols + self.nrows), self.ncols),
dtype=self.dtype,
order=self.masker.order
)
cdef handle_t* handle_ = \
<handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t row_ptr, bg_ptr, idx_ptr, masked_ptr
masked_ptr = masked_inputs.__cuda_array_interface__['data'][0]
bg_ptr = self.masker.ptr
row_ptr = row.ptr
idx_ptr = inds.__cuda_array_interface__['data'][0]
row_major = self.masker.order == "C"
if self.masker.order.dtype == cp.float32:
shap_main_effect_dataset(handle_[0],
<float*> masked_ptr,
<float*> bg_ptr,
<int> self.nrows,
<int> self.ncols,
<float*> row_ptr,
<int*> idx_ptr,
<bool> row_major)
else:
shap_main_effect_dataset(handle_[0],
<double*> masked_ptr,
<double*> bg_ptr,
<int> self.nrows,
<int> self.ncols,
<double*> row_ptr,
<int*> idx_ptr,
<bool> row_major)
self.handle.sync()
main_effects = model_func_call(masked_inputs) - self._expected_value
return main_effects
def _reset_timers(self):
self.total_time = 0
self.model_call_time = 0
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/explainer/permutation_shap.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
import time
from cuml.internals.safe_imports import gpu_only_import_from
cu_df = gpu_only_import_from('cudf', 'DataFrame')
from cuml.explainer.base import SHAPBase
from cuml.explainer.common import get_cai_ptr
from cuml.explainer.common import model_func_call
cuda = gpu_only_import_from('numba', 'cuda')
from cuml.internals.safe_imports import cpu_only_import_from
pd_df = cpu_only_import_from('pandas', 'DataFrame')
from pylibraft.common.handle cimport handle_t
from libcpp cimport bool
from libc.stdint cimport uintptr_t
cdef extern from "cuml/explainer/permutation_shap.hpp" namespace "ML":
void permutation_shap_dataset "ML::Explainer::permutation_shap_dataset"(
const handle_t& handle,
float* dataset,
const float* background,
int n_rows,
int n_cols,
const float* row,
int* idx,
bool rowMajor) except +
void permutation_shap_dataset "ML::Explainer::permutation_shap_dataset"(
const handle_t& handle,
double* dataset,
const double* background,
int n_rows,
int n_cols,
const double* row,
int* idx,
bool rowMajor) except +
void update_perm_shap_values "ML::Explainer::update_perm_shap_values"(
const handle_t& handle,
float* shap_values,
const float* y_hat,
const int ncols,
const int* idx) except +
void update_perm_shap_values "ML::Explainer::update_perm_shap_values"(
const handle_t& handle,
double* shap_values,
const double* y_hat,
const int ncols,
const int* idx) except +
class PermutationExplainer(SHAPBase):
"""
GPU accelerated version of SHAP's PermutationExplainer
cuML's SHAP based explainers accelerate the algorithmic part of SHAP.
They are optimized to be used with fast GPU based models, like those in
cuML. By creating the datasets and internal calculations,
alongside minimizing data copies and transfers, they can accelerate
explanations significantly. But they can also be used with
CPU based models, where speedups can still be achieved, but those can be
capped by factors like data transfers and the speed of the models.
PermutationExplainer is algorithmically similar and based on the Python
SHAP package kernel explainer:
https://github.com/slundberg/shap/blob/master/shap/explainers/_kernel.py
This method approximates the Shapley values by iterating through
permutations of the inputs. From the SHAP library docs: it guarantees
local accuracy (additivity) by iterating completely through entire
permutations of the features in both forward and reverse directions.
Current characteristics of the GPU version:
* Only tabular data is supported for now, via passing the background
dataset explicitly.
* Hierarchical clustering for Owen values are planned for the near
future.
* Sparse data support is planned for the near future.
**Setting the random seed**:
This explainer uses CuPy to generate the permutations that are used, so
to have reproducible results use `CuPy's seeding mechanism
<https://docs.cupy.dev/en/stable/reference/generated/cupy.random.seed.html>`_.
Parameters
----------
model : function
A callable python object that executes the model given a set of input
data samples.
masker : Dense matrix containing floats or doubles.
cuML's permutation SHAP supports tabular data for now, so it expects
a background dataset, as opposed to a shap.masker object. To respect
a hierarchical structure of the data, use the (temporary) parameter
`masker_type`
Acceptable formats: CUDA array interface compliant objects like
CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas
DataFrame/Series.
masker_type: {'independent', 'partition'} default = 'independent'
If 'independent' is used, then this is equivalent to SHAP's
independent masker and the algorithm is fully GPU accelerated.
If 'partition' then it is equivalent to SHAP's Partition masker,
which respects a hierarchical structure in the background data.
link : function or str (default = 'identity')
The link function used to map between the output units of the
model and the SHAP value units. From the SHAP package: The link
function used to map between the output units of the model and the
SHAP value units. By default it is identity, but logit can be useful
so that expectations are computed in probability units while
explanations remain in the (more naturally additive) log-odds units.
For more details on how link functions work see any overview of link
functions for generalized linear models.
gpu_model : bool or None (default = None)
If None Explainer will try to infer whether `model` can take GPU data
(as CuPy arrays), otherwise it will use NumPy arrays to call `model`.
Set to True to force the explainer to use GPU data, set to False to
force the Explainer to use NumPy data.
handle : pylibraft.common.handle (default = None)
Specifies the handle that holds internal CUDA state for
computations in this model, a new one is created if it is None.
Most importantly, this specifies the CUDA stream that will be used for
the model's computations, so users can run different models
concurrently in different streams by creating handles in several
streams.
dtype : np.float32 or np.float64 (default = None)
Parameter to specify the precision of data to generate to call the
model. If not specified, the explainer will try to get the dtype
of the model, if it cannot be queried, then it will default to
np.float32.
output_type : 'cupy' or 'numpy' (default = 'numpy')
Parameter to specify the type of data to output.
If not specified, the explainer will default to 'numpy' for the time
being to improve compatibility.
Examples
--------
.. code-block:: python
>>> from cuml import SVR
>>> from cuml import make_regression
>>> from cuml import train_test_split
>>> from cuml.explainer import PermutationExplainer
>>> X, y = make_regression(
... n_samples=102,
... n_features=10,
... noise=0.1,
... random_state=42)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X,
... y,
... test_size=2,
... random_state=42)
>>> model = SVR().fit(X_train, y_train)
>>> cu_explainer = PermutationExplainer(
... model=model.predict,
... data=X_train,
... random_state=42)
>>> cu_shap_values = cu_explainer.shap_values(X_test)
>>> cu_shap_values # doctest: +SKIP
array([[ 0.16611198, 0.74156773, 0.05906528, 0.30015892, 2.5425286 ,
0.0970122 , 0.12258395, 2.1998262 , -0.02968234, -0.8669155 ],
[-0.10587756, 0.77705824, -0.08259875, -0.71874434, 1.781551 ,
-0.05454511, 0.11826539, -1.1734306 , -0.09629871, 0.4571011]],
dtype=float32)
"""
def __init__(self,
*,
model,
data,
masker_type='independent',
link='identity',
handle=None,
is_gpu_model=None,
random_state=None,
dtype=None,
output_type=None,
verbose=False,):
super().__init__(
order='C',
model=model,
background=data,
link=link,
verbose=verbose,
is_gpu_model=is_gpu_model,
handle=handle,
dtype=dtype,
output_type=output_type
)
def shap_values(self,
X,
npermutations=10,
as_list=True,
**kwargs):
"""
Interface to estimate the SHAP values for a set of samples.
Corresponds to the SHAP package's legacy interface, and is our main
API currently.
Parameters
----------
X : Dense matrix containing floats or doubles.
Acceptable formats: CUDA array interface compliant objects like
CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas
DataFrame/Series.
npermutations : int (default = 10)
Number of times to cycle through all the features, re-evaluating
the model at each step. Each cycle evaluates the model function
2 * (# features + 1) times on a data matrix of (# background
data samples) rows. An exception to this is when
PermutationExplainer can avoid evaluating the model because a
feature's value is the same in X and the background dataset
(which is common for example with sparse features).
as_list : bool (default = True)
Set to True to return a list of arrays for multi-dimensional
models (like predict_proba functions) to match the SHAP package
shap_values API behavior.
Set to False to return them as an array of arrays.
Returns
-------
shap_values : array or list
"""
return self._explain(X,
synth_data_shape=(
(2 * self.ncols * self.nrows + self.nrows),
self.ncols
),
npermutations=npermutations,
return_as_list=as_list,
**kwargs)
def _explain_single_observation(self,
shap_values,
row,
idx,
npermutations=10,
testing=False):
total_timer = time.time()
inds = cp.arange(self.ncols, dtype=cp.int32)
cdef handle_t* handle_ = \
<handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t row_ptr, bg_ptr, idx_ptr, ds_ptr, shap_ptr, y_hat_ptr
if self.random_state is not None:
cp.random.seed(seed=self.random_state)
for _ in range(npermutations):
if not testing:
cp.random.shuffle(inds)
ds_ptr = get_cai_ptr(self._synth_data)
bg_ptr = get_cai_ptr(self.background)
row_ptr = get_cai_ptr(row)
idx_ptr = get_cai_ptr(inds)
row_major = self.order == "C"
if self.dtype == cp.float32:
permutation_shap_dataset(handle_[0],
<float*> ds_ptr,
<float*> bg_ptr,
<int> self.nrows,
<int> self.ncols,
<float*> row_ptr,
<int*> idx_ptr,
<bool> row_major)
else:
permutation_shap_dataset(handle_[0],
<double*> ds_ptr,
<double*> bg_ptr,
<int> self.nrows,
<int> self.ncols,
<double*> row_ptr,
<int*> idx_ptr,
<bool> row_major)
self.handle.sync()
# evaluate model on combinations
model_timer = time.time()
y = model_func_call(X=self._synth_data,
model_func=self.model,
gpu_model=self.is_gpu_model)
self.model_call_time = \
self.model_call_time + (time.time() - model_timer)
for i in range(self.model_dimensions):
# reshape the results to coincide with each entry of the
# permutation
if self.model_dimensions == 1:
y_hat = y.reshape(2 * self.ncols + 1, len(self.background))
else:
y_hat = y[:, i].reshape(2 * self.ncols + 1,
len(self.background))
# we get the average of each entry
y_hat = cp.mean(cp.asarray(self.link_fn(y_hat)),
axis=1).astype(self.dtype)
shap_ptr = get_cai_ptr(shap_values[i][idx])
y_hat_ptr = get_cai_ptr(y_hat)
if self.dtype == cp.float32:
update_perm_shap_values(handle_[0],
<float*> shap_ptr,
<float*> y_hat_ptr,
<int> self.ncols,
<int*> idx_ptr)
else:
update_perm_shap_values(handle_[0],
<double*> shap_ptr,
<double*> y_hat_ptr,
<int> self.ncols,
<int*> idx_ptr)
self.handle.sync()
for i in range(self.model_dimensions):
shap_values[i][idx] = shap_values[i][idx] / (2 * npermutations)
self.total_time = self.total_time + (time.time() - total_timer)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/explainer/__init__.py | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.explainer.kernel_shap import KernelExplainer
from cuml.explainer.permutation_shap import PermutationExplainer
from cuml.explainer.sampling import kmeans_sampling
from cuml.explainer.tree_shap import TreeExplainer
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("ann.pyx" ${ann_algo} ${neighbors_algo})
add_module_gpu_default("kneighbors_classifier.pyx" ${kneighbors_classifier_algo} ${neighbors_algo})
add_module_gpu_default("kneighbors_regressor.pyx" ${kneighbors_regressor_algo} ${neighbors_algo})
add_module_gpu_default("nearest_neighbors.pyx" ${nearest_neighbors_algo} ${neighbors_algo})
if(NOT SINGLEGPU)
list(APPEND cython_sources
kneighbors_classifier_mg.pyx
kneighbors_regressor_mg.pyx
nearest_neighbors_mg.pyx
)
endif()
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_mg_libraries}"
MODULE_PREFIX neighbors_
ASSOCIATED_TARGETS cuml
)
foreach(target IN LISTS targets_using_numpy)
target_include_directories(${target} PRIVATE "${Python_NumPy_INCLUDE_DIRS}")
endforeach()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/kneighbors_regressor_mg.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import typing
from cuml.internals.array import CumlArray
import cuml.internals.logger as logger
from cuml.internals import api_base_return_generic_skipall
from cuml.neighbors.nearest_neighbors_mg import NearestNeighborsMG
from pylibraft.common.handle cimport handle_t
from cuml.common.opg_data_utils_mg cimport *
from libcpp cimport bool
from libcpp.vector cimport vector
from libc.stdint cimport uintptr_t
from cython.operator cimport dereference as deref
from libc.stdlib cimport free
cdef extern from "cuml/neighbors/knn_mg.hpp" namespace \
"ML::KNN::opg":
cdef void knn_regress(
handle_t &handle,
vector[floatData_t*] *out,
vector[floatData_t*] &idx_data,
PartDescriptor &idx_desc,
vector[floatData_t*] &query_data,
PartDescriptor &query_desc,
vector[float_ptr_vector] &y,
bool rowMajorIndex,
bool rowMajorQuery,
int k,
int n_outputs,
size_t batch_size,
bool verbose
) except +
class KNeighborsRegressorMG(NearestNeighborsMG):
"""
Multi-node Multi-GPU K-Nearest Neighbors Regressor Model.
K-Nearest Neighbors Regressor is an instance-based learning technique,
that keeps training samples around for prediction, rather than trying
to learn a generalizable set of model parameters.
"""
def __init__(self, **kwargs):
super(KNeighborsRegressorMG, self).__init__(**kwargs)
@api_base_return_generic_skipall
def predict(
self,
index,
index_parts_to_ranks,
index_nrows,
query,
query_parts_to_ranks,
query_nrows,
ncols,
n_outputs,
rank,
convert_dtype
) -> typing.List[CumlArray]:
"""
Predict outputs for a query from previously stored index
and index labels.
The process is done in a multi-node multi-GPU fashion.
Parameters
----------
index: [__cuda_array_interface__] of local index partitions
index_parts_to_ranks: mappings of index partitions to ranks
index_nrows: number of index rows
query: [__cuda_array_interface__] of local query partitions
query_parts_to_ranks: mappings of query partitions to ranks
query_nrows: number of query rows
ncols: number of columns
n_outputs: number of outputs columns
rank: rank of current worker
convert_dtype: since only float32 inputs are supported, should
the input be automatically converted?
Returns
-------
predictions : labels
"""
# Detect type
self.get_out_type(index, query)
# Build input arrays and descriptors for native code interfacing
input = type(self).gen_local_input(
index, index_parts_to_ranks, index_nrows, query,
query_parts_to_ranks, query_nrows, ncols, rank, convert_dtype)
# Build input labels arrays and descriptors for native code interfacing
labels = type(self).gen_local_labels(index, convert_dtype, dtype='float32')
query_cais = input['cais']['query']
local_query_rows = list(map(lambda x: x.shape[0], query_cais))
# Build labels output array for native code interfacing
cdef vector[floatData_t*] *out_result_local_parts \
= new vector[floatData_t*]()
output_cais = []
for n_rows in local_query_rows:
o_cai = CumlArray.zeros(shape=(n_rows, n_outputs),
order="C", dtype='float32')
output_cais.append(o_cai)
out_result_local_parts.push_back(new floatData_t(
<float*><uintptr_t>o_cai.ptr, n_rows * n_outputs))
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
is_verbose = logger.should_log_for(logger.level_debug)
# Launch distributed operations
knn_regress(
handle_[0],
out_result_local_parts,
deref(<vector[floatData_t*]*><uintptr_t>
input['index']['local_parts']),
deref(<PartDescriptor*><uintptr_t>input['index']['desc']),
deref(<vector[floatData_t*]*><uintptr_t>
input['query']['local_parts']),
deref(<PartDescriptor*><uintptr_t>input['query']['desc']),
deref(<vector[float_ptr_vector]*><uintptr_t>labels['labels']),
<bool>False, # column-major index
<bool>False, # column-major query
<int>self.n_neighbors,
<int>n_outputs,
<size_t>self.batch_size,
<bool>is_verbose
)
self.handle.sync()
# Release memory
type(self).free_mem(input)
free(<void*><uintptr_t>labels['labels'])
for i in range(out_result_local_parts.size()):
free(<void*>out_result_local_parts.at(i))
free(<void*><uintptr_t>out_result_local_parts)
return output_cais
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/kneighbors_regressor.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.neighbors.nearest_neighbors import NearestNeighbors
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.common import input_to_cuml_array
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.mixins import RegressorMixin
from cuml.common.doc_utils import generate_docstring
from cuml.internals.mixins import FMajorInputTagMixin
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cython.operator cimport dereference as deref
from libcpp.vector cimport vector
from pylibraft.common.handle cimport handle_t
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from libc.stdint cimport uintptr_t, int64_t
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
import rmm
cimport cuml.common.cuda
cdef extern from "cuml/neighbors/knn.hpp" namespace "ML":
void knn_regress(
handle_t &handle,
float *out,
int64_t *knn_indices,
vector[float *] &y,
size_t n_rows,
size_t n_samples,
int k,
) except +
class KNeighborsRegressor(RegressorMixin,
FMajorInputTagMixin,
NearestNeighbors):
"""
K-Nearest Neighbors Regressor is an instance-based learning technique,
that keeps training samples around for prediction, rather than trying
to learn a generalizable set of model parameters.
The K-Nearest Neighbors Regressor will compute the average of the
labels for the k closest neighbors and use it as the label.
Parameters
----------
n_neighbors : int (default=5)
Default number of neighbors to query
algorithm : string (default='auto')
The query algorithm to use. Valid options are:
- ``'auto'``: to automatically select brute-force or
random ball cover based on data shape and metric
- ``'rbc'``: for the random ball algorithm, which partitions
the data space and uses the triangle inequality to lower the
number of potential distances. Currently, this algorithm
supports 2d Euclidean and Haversine.
- ``'brute'``: for brute-force, slow but produces exact results
- ``'ivfflat'``: for inverted file, divide the dataset in partitions
and perform search on relevant partitions only
- ``'ivfpq'``: for inverted file and product quantization,
same as inverted list, in addition the vectors are broken
in n_features/M sub-vectors that will be encoded thanks
to intermediary k-means clusterings. This encoding provide
partial information allowing faster distances calculations
metric : string (default='euclidean').
Distance metric to use.
weights : string (default='uniform')
Sample weights to use. Currently, only the uniform strategy is
supported.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Examples
--------
.. code-block:: python
>>> from cuml.neighbors import KNeighborsRegressor
>>> from cuml.datasets import make_regression
>>> from cuml.model_selection import train_test_split
>>> X, y = make_regression(n_samples=100, n_features=10,
... random_state=5)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, train_size=0.80, random_state=5)
>>> knn = KNeighborsRegressor(n_neighbors=10)
>>> knn.fit(X_train, y_train)
KNeighborsRegressor()
>>> knn.predict(X_test) # doctest: +SKIP
array([ 14.770798 , 51.8834 , 66.15657 , 46.978275 ,
21.589611 , -14.519918 , -60.25534 , -20.856869 ,
29.869623 , -34.83317 , 0.45447388, 120.39675 ,
109.94834 , 63.57794 , -17.956171 , 78.77663 ,
30.412262 , 32.575233 , 74.72834 , 122.276855 ],
dtype=float32)
Notes
-----
For additional docs, see `scikitlearn's KNeighborsClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html>`_.
"""
y = CumlArrayDescriptor()
def __init__(self, *, weights="uniform", handle=None, verbose=False,
output_type=None, **kwargs):
super().__init__(
handle=handle,
verbose=verbose,
output_type=output_type,
**kwargs)
self.y = None
self.weights = weights
if weights != "uniform":
raise ValueError("Only uniform weighting strategy "
"is supported currently.")
@generate_docstring(convert_dtype_cast='np.float32')
def fit(self, X, y, convert_dtype=True) -> "KNeighborsRegressor":
"""
Fit a GPU index for k-nearest neighbors regression model.
"""
self._set_target_dtype(y)
super(KNeighborsRegressor, self).fit(X, convert_dtype=convert_dtype)
self.y, _, _, _ = \
input_to_cuml_array(y, order='F', check_dtype=np.float32,
convert_to_dtype=(np.float32
if convert_dtype
else None))
return self
@generate_docstring(convert_dtype_cast='np.float32',
return_values={'name': 'X_new',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, n_features)'})
def predict(self, X, convert_dtype=True) -> CumlArray:
"""
Use the trained k-nearest neighbors regression model to
predict the labels for X
"""
if (convert_dtype):
cuml.internals.set_api_output_dtype(self._get_target_dtype())
knn_indices = self.kneighbors(X, return_distance=False,
convert_dtype=convert_dtype)
inds, n_rows, _n_cols, _dtype = \
input_to_cuml_array(knn_indices, order='C', check_dtype=np.int64,
convert_to_dtype=(np.int64
if convert_dtype
else None))
cdef uintptr_t inds_ctype = inds.ptr
res_cols = 1 if len(self.y.shape) == 1 else self.y.shape[1]
res_shape = n_rows if res_cols == 1 else (n_rows, res_cols)
results = CumlArray.zeros(res_shape, dtype=np.float32,
order="C",
index=inds.index)
cdef uintptr_t results_ptr = results.ptr
cdef uintptr_t y_ptr
cdef vector[float*] *y_vec = new vector[float*]()
for col_num in range(res_cols):
col = self.y if res_cols == 1 else self.y[:, col_num]
y_ptr = col.ptr
y_vec.push_back(<float*>y_ptr)
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
knn_regress(
handle_[0],
<float*>results_ptr,
<int64_t*>inds_ctype,
deref(y_vec),
<size_t>self.n_samples_fit_,
<size_t>n_rows,
<int>self.n_neighbors
)
self.handle.sync()
return results
def get_param_names(self):
return super().get_param_names() + ["weights"]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/ann.pxd | #
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from libc.stdint cimport uintptr_t
from libcpp cimport bool
cdef extern from "raft/spatial/knn/ann_common.h" \
namespace "raft::spatial::knn":
cdef cppclass knnIndex:
pass
cdef cppclass knnIndexParam:
pass
ctypedef enum QuantizerType:
QT_8bit,
QT_4bit,
QT_8bit_uniform,
QT_4bit_uniform,
QT_fp16,
QT_8bit_direct,
QT_6bit
cdef cppclass IVFParam(knnIndexParam):
int nlist
int nprobe
cdef cppclass IVFFlatParam(IVFParam):
pass
cdef cppclass IVFPQParam(IVFParam):
int M
int n_bits
bool usePrecomputedTables
cdef check_algo_params(algo, params)
cdef build_ivfflat_algo_params(params, automated)
cdef build_ivfpq_algo_params(params, automated, additional_info)
cdef build_algo_params(algo, params, additional_info)
cdef destroy_algo_params(ptr)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/kernel_density.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.exceptions import NotFittedError
from cuml.internals.import_utils import has_scipy
from cuml.metrics import pairwise_distances
from cuml.internals.base import Base
from cuml.internals.input_utils import input_to_cuml_array
from cuml.internals.input_utils import input_to_cupy_array
from cuml.internals.safe_imports import gpu_only_import_from
import math
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
cuda = gpu_only_import_from("numba", "cuda")
if has_scipy():
from scipy.special import gammainc
VALID_KERNELS = [
"gaussian",
"tophat",
"epanechnikov",
"exponential",
"linear",
"cosine",
]
@cp.fuse()
def gaussian_log_kernel(x, h):
return -(x * x) / (2 * h * h)
@cp.fuse()
def tophat_log_kernel(x, h):
"""
if x < h:
return 0.0
else:
return -FLOAT_MIN
"""
y = (x >= h) * np.finfo(x.dtype).min
return y
@cp.fuse()
def epanechnikov_log_kernel(x, h):
# don't call log(0) otherwise we get NaNs
z = cp.maximum(1.0 - (x * x) / (h * h), 1e-30)
y = (x < h) * cp.log(z)
y += (x >= h) * np.finfo(y.dtype).min
return y
@cp.fuse()
def exponential_log_kernel(x, h):
return -x / h
@cp.fuse()
def linear_log_kernel(x, h):
# don't call log(0) otherwise we get NaNs
z = cp.maximum(1.0 - x / h, 1e-30)
y = (x < h) * cp.log(z)
y += (x >= h) * np.finfo(y.dtype).min
return y
@cp.fuse()
def cosine_log_kernel(x, h):
# don't call log(0) otherwise we get NaNs
z = cp.maximum(cp.cos(0.5 * np.pi * x / h), 1e-30)
y = (x < h) * cp.log(z)
y += (x >= h) * np.finfo(y.dtype).min
return y
log_probability_kernels_ = {
"gaussian": gaussian_log_kernel,
"tophat": tophat_log_kernel,
"epanechnikov": epanechnikov_log_kernel,
"exponential": exponential_log_kernel,
"linear": linear_log_kernel,
"cosine": cosine_log_kernel,
}
def logVn(n):
return 0.5 * n * np.log(np.pi) - math.lgamma(0.5 * n + 1)
def logSn(n):
return np.log(2 * np.pi) + logVn(n - 1)
def norm_log_probabilities(log_probabilities, kernel, h, d):
if kernel == "gaussian":
factor = 0.5 * d * np.log(2 * np.pi)
elif kernel == "tophat":
factor = logVn(d)
elif kernel == "epanechnikov":
factor = logVn(d) + np.log(2.0 / (d + 2.0))
elif kernel == "exponential":
factor = logSn(d - 1) + math.lgamma(d)
elif kernel == "linear":
factor = logVn(d) - np.log(d + 1.0)
elif kernel == "cosine":
factor = 0.0
tmp = 2.0 / np.pi
for k in range(1, d + 1, 2):
factor += tmp
tmp *= -(d - k) * (d - k - 1) * (2.0 / np.pi) ** 2
factor = np.log(factor) + logSn(d - 1)
else:
raise ValueError("Unsupported kernel.")
return log_probabilities - (factor + d * np.log(h))
@cuda.jit()
def logsumexp_kernel(distances, log_probabilities):
i = cuda.grid(1)
if i >= log_probabilities.size:
return
max_exp = distances[i, 0]
for j in range(1, distances.shape[1]):
if distances[i, j] > max_exp:
max_exp = distances[i, j]
sum = 0.0
for j in range(0, distances.shape[1]):
sum += math.exp(distances[i, j] - max_exp)
log_probabilities[i] = math.log(sum) + max_exp
class KernelDensity(Base):
"""
Kernel Density Estimation. Computes a non-parametric density estimate
from a finite data sample, smoothing the estimate according to a
bandwidth parameter.
Parameters
----------
bandwidth : float, default=1.0
The bandwidth of the kernel.
kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', \
'cosine'}, default='gaussian'
The kernel to use.
metric : str, default='euclidean'
The distance metric to use. Note that not all metrics are
valid with all algorithms. Note that the normalization of the density
output is correct only for the Euclidean distance metric. Default
is 'euclidean'.
metric_params : dict, default=None
Additional parameters to be passed to the tree for use with the
metric.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the
CUDA stream that will be used for the model's computations, so
users can run different models concurrently in different streams
by creating handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Examples
--------
.. code-block:: python
>>> from cuml.neighbors import KernelDensity
>>> import cupy as cp
>>> rng = cp.random.RandomState(42)
>>> X = rng.random_sample((100, 3))
>>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X)
>>> log_density = kde.score_samples(X[:3])
"""
def __init__(
self,
*,
bandwidth=1.0,
kernel="gaussian",
metric="euclidean",
metric_params=None,
output_type=None,
handle=None,
verbose=False,
):
super(KernelDensity, self).__init__(
verbose=verbose, handle=handle, output_type=output_type
)
self.bandwidth = bandwidth
self.kernel = kernel
self.metric = metric
self.metric_params = metric_params
if bandwidth <= 0:
raise ValueError("bandwidth must be positive")
if kernel not in VALID_KERNELS:
raise ValueError("invalid kernel: '{0}'".format(kernel))
def get_param_names(self):
return super().get_param_names() + [
"bandwidth",
"kernel",
"metric",
"metric_params",
]
def fit(self, X, y=None, sample_weight=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
List of sample weights attached to the data X.
Returns
-------
self : object
Returns the instance itself.
"""
if sample_weight is not None:
self.sample_weight_ = input_to_cupy_array(
sample_weight, check_dtype=[cp.float32, cp.float64]
).array
if self.sample_weight_.min() <= 0:
raise ValueError("sample_weight must have positive values")
else:
self.sample_weight_ = None
self.X_ = input_to_cupy_array(
X, order="C", check_dtype=[cp.float32, cp.float64]
).array
return self
def score_samples(self, X):
"""Compute the log-likelihood of each sample under the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray of shape (n_samples,)
Log-likelihood of each sample in `X`. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
if not hasattr(self, "X_"):
raise NotFittedError()
X_cuml = input_to_cuml_array(X)
if self.metric_params:
if len(self.metric_params) != 1:
raise ValueError(
"Cuml only supports metrics with a single arg."
)
metric_arg = list(self.metric_params.values())[0]
distances = pairwise_distances(
X_cuml.array,
self.X_,
metric=self.metric,
metric_arg=metric_arg,
)
else:
distances = pairwise_distances(
X_cuml.array, self.X_, metric=self.metric
)
distances = cp.asarray(distances)
h = self.bandwidth
if self.kernel in log_probability_kernels_:
distances = log_probability_kernels_[self.kernel](distances, h)
else:
raise ValueError("Unsupported kernel.")
log_probabilities = cp.zeros(distances.shape[0])
if self.sample_weight_ is not None:
distances += cp.log(self.sample_weight_)
logsumexp_kernel.forall(log_probabilities.size)(
distances, log_probabilities
)
# Note that sklearns user guide is wrong
# It says the (unnormalised) probability output for
# the kernel density is sum(K(x,h)).
# In fact what they implement is (1/n)*sum(K(x,h))
# Here we divide by n in normal probability space
# Which becomes -log(n) in log probability space
sum_weights = (
cp.sum(self.sample_weight_)
if self.sample_weight_ is not None
else distances.shape[1]
)
log_probabilities -= np.log(sum_weights)
# norm
if len(X_cuml.array.shape) == 1:
# if X is one dimensional, we have 1 feature
dimension = 1
else:
dimension = X_cuml.array.shape[1]
log_probabilities = norm_log_probabilities(
log_probabilities, self.kernel, h, dimension
)
return log_probabilities
def score(self, X, y=None):
"""Compute the total log-likelihood under the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : None
Ignored.
Returns
-------
logprob : float
Total log-likelihood of the data in X. This is normalized to be a
probability density, so the value will be low for high-dimensional
data.
"""
return cp.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""
Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels,
and the Euclidean metric.
Parameters
----------
n_samples : int, default=1
Number of samples to generate.
random_state : int, cupy RandomState instance or None, default=None
Returns
-------
X : cupy array of shape (n_samples, n_features)
List of samples.
"""
if not hasattr(self, "X_"):
raise NotFittedError()
supported_kernels = ["gaussian", "tophat"]
if self.kernel not in supported_kernels or self.metric != "euclidean":
raise NotImplementedError(
"Only {} kernels, and the euclidean"
" metric are supported.".format(supported_kernels)
)
if isinstance(random_state, cp.random.RandomState):
rng = random_state
else:
rng = cp.random.RandomState(random_state)
u = rng.uniform(0, 1, size=n_samples)
if self.sample_weight_ is None:
i = (u * self.X_.shape[0]).astype(np.int64)
else:
cumsum_weight = cp.cumsum(self.sample_weight_)
sum_weight = cumsum_weight[-1]
i = cp.searchsorted(cumsum_weight, u * sum_weight)
if self.kernel == "gaussian":
return cp.atleast_2d(rng.normal(self.X_[i], self.bandwidth))
elif self.kernel == "tophat":
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
has_scipy(raise_if_unavailable=True)
dim = self.X_.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = cp.einsum("ij,ij->i", X, X).get()
# do this on the CPU because we don't have
# a gammainc function readily available
correction = cp.array(
gammainc(0.5 * dim, 0.5 * s_sq) ** (1.0 / dim)
* self.bandwidth
/ np.sqrt(s_sq)
)
return self.X_[i] + X * correction[:, np.newaxis]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/kneighbors_classifier.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import typing
from cuml.neighbors.nearest_neighbors import NearestNeighbors
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.common import input_to_cuml_array
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.mixins import ClassifierMixin
from cuml.common.doc_utils import generate_docstring
from cuml.internals.mixins import FMajorInputTagMixin
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cython.operator cimport dereference as deref
from pylibraft.common.handle cimport handle_t
from libcpp.vector cimport vector
rmm = gpu_only_import('rmm')
from libc.stdint cimport uintptr_t, int64_t
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
cimport cuml.common.cuda
cdef extern from "cuml/neighbors/knn.hpp" namespace "ML":
void knn_classify(
handle_t &handle,
int* out,
int64_t *knn_indices,
vector[int*] &y,
size_t n_index_rows,
size_t n_samples,
int k
) except +
void knn_class_proba(
handle_t &handle,
vector[float*] &out,
int64_t *knn_indices,
vector[int*] &y,
size_t n_index_rows,
size_t n_samples,
int k
) except +
class KNeighborsClassifier(ClassifierMixin,
FMajorInputTagMixin,
NearestNeighbors):
"""
K-Nearest Neighbors Classifier is an instance-based learning technique,
that keeps training samples around for prediction, rather than trying
to learn a generalizable set of model parameters.
Parameters
----------
n_neighbors : int (default=5)
Default number of neighbors to query
algorithm : string (default='auto')
The query algorithm to use. Currently, only 'brute' is supported.
metric : string (default='euclidean').
Distance metric to use.
weights : string (default='uniform')
Sample weights to use. Currently, only the uniform strategy is
supported.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Examples
--------
.. code-block:: python
>>> from cuml.neighbors import KNeighborsClassifier
>>> from cuml.datasets import make_blobs
>>> from cuml.model_selection import train_test_split
>>> X, y = make_blobs(n_samples=100, centers=5,
... n_features=10, random_state=5)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, train_size=0.80, random_state=5)
>>> knn = KNeighborsClassifier(n_neighbors=10)
>>> knn.fit(X_train, y_train)
KNeighborsClassifier()
>>> knn.predict(X_test) # doctest: +SKIP
array([1., 2., 2., 3., 4., 2., 4., 4., 2., 3., 1., 4., 3., 1., 3., 4., 3., # noqa: E501
4., 1., 3.], dtype=float32)
Notes
-----
For additional docs, see `scikitlearn's KNeighborsClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html>`_.
"""
y = CumlArrayDescriptor()
classes_ = CumlArrayDescriptor()
def __init__(self, *, weights="uniform", handle=None, verbose=False,
output_type=None, **kwargs):
super().__init__(
handle=handle,
verbose=verbose,
output_type=output_type,
**kwargs)
self.y = None
self.classes_ = None
self.weights = weights
if weights != "uniform":
raise ValueError("Only uniform weighting strategy is "
"supported currently.")
@generate_docstring(convert_dtype_cast='np.float32')
@cuml.internals.api_base_return_any(set_output_dtype=True)
def fit(self, X, y, convert_dtype=True) -> "KNeighborsClassifier":
"""
Fit a GPU index for k-nearest neighbors classifier model.
"""
super(KNeighborsClassifier, self).fit(X, convert_dtype)
self.y, _, _, _ = \
input_to_cuml_array(y, order='F', check_dtype=np.int32,
convert_to_dtype=(np.int32
if convert_dtype
else None))
self.classes_ = cp.unique(self.y)
return self
@generate_docstring(convert_dtype_cast='np.float32',
return_values={'name': 'X_new',
'type': 'dense',
'description': 'Labels predicted',
'shape': '(n_samples, 1)'})
@cuml.internals.api_base_return_array(get_output_dtype=True)
def predict(self, X, convert_dtype=True) -> CumlArray:
"""
Use the trained k-nearest neighbors classifier to
predict the labels for X
"""
knn_indices = self.kneighbors(X, return_distance=False,
convert_dtype=convert_dtype)
inds, n_rows, _, _ = \
input_to_cuml_array(knn_indices, order='C', check_dtype=np.int64,
convert_to_dtype=(np.int64
if convert_dtype
else None))
cdef uintptr_t inds_ctype = inds.ptr
out_cols = self.y.shape[1] if len(self.y.shape) == 2 else 1
out_shape = (n_rows, out_cols) if out_cols > 1 else n_rows
classes = CumlArray.zeros(out_shape, dtype=np.int32, order="C",
index=inds.index)
cdef vector[int*] *y_vec = new vector[int*]()
# If necessary, separate columns of y to support multilabel
# classification
cdef uintptr_t y_ptr
for i in range(out_cols):
col = self.y[:, i] if out_cols > 1 else self.y
y_ptr = col.ptr
y_vec.push_back(<int*>y_ptr)
cdef uintptr_t classes_ptr = classes.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
knn_classify(
handle_[0],
<int*> classes_ptr,
<int64_t*>inds_ctype,
deref(y_vec),
<size_t>self.n_samples_fit_,
<size_t>n_rows,
<int>self.n_neighbors
)
self.handle.sync()
return classes
@generate_docstring(convert_dtype_cast='np.float32',
return_values={'name': 'X_new',
'type': 'dense',
'description': 'Labels probabilities',
'shape': '(n_samples, 1)'})
@cuml.internals.api_base_return_generic()
def predict_proba(
self,
X,
convert_dtype=True) -> typing.Union[CumlArray, typing.Tuple]:
"""
Use the trained k-nearest neighbors classifier to
predict the label probabilities for X
"""
knn_indices = self.kneighbors(X, return_distance=False,
convert_dtype=convert_dtype)
inds, n_rows, _, _ = \
input_to_cuml_array(knn_indices, order='C',
check_dtype=np.int64,
convert_to_dtype=(np.int64
if convert_dtype
else None))
cdef uintptr_t inds_ctype = inds.ptr
out_cols = self.y.shape[1] if len(self.y.shape) == 2 else 1
cdef vector[int*] *y_vec = new vector[int*]()
cdef vector[float*] *out_vec = new vector[float*]()
out_classes = []
cdef uintptr_t classes_ptr
cdef uintptr_t y_ptr
for out_col in range(out_cols):
col = self.y[:, out_col] if out_cols > 1 else self.y
classes = CumlArray.zeros((n_rows,
len(cp.unique(cp.asarray(col)))),
dtype=np.float32,
order="C",
index=inds.index)
out_classes.append(classes)
classes_ptr = classes.ptr
out_vec.push_back(<float*>classes_ptr)
y_ptr = col.ptr
y_vec.push_back(<int*>y_ptr)
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
knn_class_proba(
handle_[0],
deref(out_vec),
<int64_t*>inds_ctype,
deref(y_vec),
<size_t>self.n_samples_fit_,
<size_t>n_rows,
<int>self.n_neighbors
)
self.handle.sync()
final_classes = []
for out_class in out_classes:
final_classes.append(out_class)
return final_classes[0] \
if len(final_classes) == 1 else tuple(final_classes)
def get_param_names(self):
return super().get_param_names() + ["weights"]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/__init__.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.import_utils import has_dask
from cuml.neighbors.nearest_neighbors import NearestNeighbors
from cuml.neighbors.nearest_neighbors import kneighbors_graph
from cuml.neighbors.kneighbors_classifier import KNeighborsClassifier
from cuml.neighbors.kneighbors_regressor import KNeighborsRegressor
from cuml.neighbors.kernel_density import (
KernelDensity,
VALID_KERNELS,
logsumexp_kernel,
)
VALID_METRICS = {
"brute": set(
[
"l2",
"euclidean",
"l1",
"cityblock",
"manhattan",
"taxicab",
# TODO: add "braycurtis" after https://github.com/rapidsai/raft/issues/1285
"canberra",
"minkowski",
"lp",
"chebyshev",
"linf",
"jensenshannon",
"cosine",
"correlation",
"inner_product",
"sqeuclidean",
"haversine",
]
),
"rbc": set(["euclidean", "haversine", "l2"]),
"ivfflat": set(
[
"l2",
"euclidean",
"sqeuclidean",
"inner_product",
"cosine",
"correlation",
]
),
"ivfpq": set(
[
"l2",
"euclidean",
"sqeuclidean",
"inner_product",
"cosine",
"correlation",
]
),
}
VALID_METRICS_SPARSE = {
"brute": set(
[
"euclidean",
"l2",
"inner_product",
"l1",
"cityblock",
"manhattan",
"taxicab",
"canberra",
"linf",
"chebyshev",
"jaccard",
"minkowski",
"lp",
"cosine",
"hellinger",
]
)
}
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/kneighbors_classifier_mg.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import typing
from cuml.internals.array import CumlArray
import cuml.internals.logger as logger
from cuml.internals import api_base_return_generic_skipall
from cuml.neighbors.nearest_neighbors_mg import NearestNeighborsMG
from pylibraft.common.handle cimport handle_t
from cuml.common.opg_data_utils_mg cimport *
from cuml.common import input_to_cuml_array
from libcpp cimport bool
from libcpp.vector cimport vector
from libc.stdint cimport uintptr_t
from cython.operator cimport dereference as deref
from libc.stdlib cimport free
cdef extern from "cuml/neighbors/knn_mg.hpp" namespace \
"ML::KNN::opg":
cdef void knn_classify(
handle_t &handle,
vector[intData_t*] *out,
vector[float_ptr_vector] *probas,
vector[floatData_t*] &idx_data,
PartDescriptor &idx_desc,
vector[floatData_t*] &query_data,
PartDescriptor &query_desc,
vector[int_ptr_vector] &y,
vector[int*] &uniq_labels,
vector[int] &n_unique,
bool rowMajorIndex,
bool rowMajorQuery,
bool probas_only,
int k,
size_t batch_size,
bool verbose
) except +
class KNeighborsClassifierMG(NearestNeighborsMG):
"""
Multi-node Multi-GPU K-Nearest Neighbors Classifier Model.
K-Nearest Neighbors Classifier is an instance-based learning technique,
that keeps training samples around for prediction, rather than trying
to learn a generalizable set of model parameters.
"""
def __init__(self, **kwargs):
super(KNeighborsClassifierMG, self).__init__(**kwargs)
@api_base_return_generic_skipall
def predict(
self,
index,
index_parts_to_ranks,
index_nrows,
query,
query_parts_to_ranks,
query_nrows,
uniq_labels,
n_unique,
ncols,
rank,
convert_dtype
) -> typing.List[CumlArray]:
"""
Predict labels for a query from previously stored index
and index labels.
The process is done in a multi-node multi-GPU fashion.
Parameters
----------
index: [__cuda_array_interface__] of local index partitions
index_parts_to_ranks: mappings of index partitions to ranks
index_nrows: number of index rows
query: [__cuda_array_interface__] of local query partitions
query_parts_to_ranks: mappings of query partitions to ranks
query_nrows: number of query rows
uniq_labels: array of arrays of possible labels for columns
n_unique: array with number of possible labels for each columns
ncols: number of columns
rank: rank of current worker
n_neighbors: number of nearest neighbors to query
convert_dtype: since only float32 inputs are supported, should
the input be automatically converted?
Returns
-------
predictions : labels
"""
# Detect type
self.get_out_type(index, query)
# Build input arrays and descriptors for native code interfacing
input = type(self).gen_local_input(
index, index_parts_to_ranks, index_nrows, query,
query_parts_to_ranks, query_nrows, ncols, rank, convert_dtype)
# Build input labels arrays and descriptors for native code interfacing
labels = type(self).gen_local_labels(index, convert_dtype, 'int32')
query_cais = input['cais']['query']
local_query_rows = list(map(lambda x: x.shape[0], query_cais))
# Build uniq_labels_vec vector for native code interfacing
uniq_labels_d, _, _, _ = \
input_to_cuml_array(uniq_labels, order='C', check_dtype='int32',
convert_to_dtype='int32')
cdef int* ptr = <int*><uintptr_t>uniq_labels_d.ptr
cdef vector[int*] *uniq_labels_vec = new vector[int*]()
for i in range(uniq_labels_d.shape[0]):
uniq_labels_vec.push_back(<int*>ptr)
ptr += <int>uniq_labels_d.shape[1]
# Build n_unique_vec vector for native code interfacing
cdef vector[int] *n_unique_vec = \
new vector[int]()
for uniq_label in n_unique:
n_unique_vec.push_back(uniq_label)
n_outputs = len(n_unique)
# Build labels output array for native code interfacing
cdef vector[intData_t*] *out_result_local_parts \
= new vector[intData_t*]()
output_cais = []
for n_rows in local_query_rows:
o_cai = CumlArray.zeros(shape=(n_rows, n_outputs),
order="C", dtype='int32')
output_cais.append(o_cai)
out_result_local_parts.push_back(new intData_t(
<int*><uintptr_t>o_cai.ptr, n_rows * n_outputs))
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
is_verbose = logger.should_log_for(logger.level_debug)
knn_classify(
handle_[0],
out_result_local_parts,
<vector[float_ptr_vector]*>0,
deref(<vector[floatData_t*]*><uintptr_t>
input['index']['local_parts']),
deref(<PartDescriptor*><uintptr_t>input['index']['desc']),
deref(<vector[floatData_t*]*><uintptr_t>
input['query']['local_parts']),
deref(<PartDescriptor*><uintptr_t>input['query']['desc']),
deref(<vector[int_ptr_vector]*><uintptr_t>labels['labels']),
deref(<vector[int*]*><uintptr_t>uniq_labels_vec),
deref(<vector[int]*><uintptr_t>n_unique_vec),
<bool>False, # column-major index
<bool>False, # column-major query
<bool>False,
<int>self.n_neighbors,
<size_t>self.batch_size,
<bool>is_verbose
)
self.handle.sync()
# Release memory
type(self).free_mem(input)
free(<void*><uintptr_t>labels['labels'])
type(self)._free_unique(
<uintptr_t>uniq_labels_vec, <uintptr_t>n_unique_vec)
for i in range(out_result_local_parts.size()):
free(<void*>out_result_local_parts.at(i))
free(<void*><uintptr_t>out_result_local_parts)
return output_cais
@api_base_return_generic_skipall
def predict_proba(self, index, index_parts_to_ranks, index_nrows,
query, query_parts_to_ranks, query_nrows,
uniq_labels, n_unique, ncols, rank,
convert_dtype) -> tuple:
"""
Predict labels for a query from previously stored index
and index labels.
The process is done in a multi-node multi-GPU fashion.
Parameters
----------
index: [__cuda_array_interface__] of local index and labels partitions
index_parts_to_ranks: mappings of index partitions to ranks
index_nrows: number of total index rows
query: [__cuda_array_interface__] of local query partitions
query_parts_to_ranks: mappings of query partitions to ranks
query_nrows: number of total query rows
uniq_labels: array of labels of a column
n_unique: array with number of possible labels for each columns
ncols: number of columns
rank: int rank of current worker
convert_dtype: since only float32 inputs are supported, should
the input be automatically converted?
Returns
-------
predictions : labels, indices, distances
"""
# Detect type
self.get_out_type(index, query)
# Build input arrays and descriptors for native code interfacing
input = type(self).gen_local_input(
index, index_parts_to_ranks, index_nrows, query,
query_parts_to_ranks, query_nrows, ncols, rank, convert_dtype)
# Build input labels arrays and descriptors for native code interfacing
labels = type(self).gen_local_labels(index, convert_dtype, dtype='int32')
# Build uniq_labels_vec vector for native code interfacing
uniq_labels_d, _, _, _ = \
input_to_cuml_array(uniq_labels, order='C', check_dtype='int32',
convert_to_dtype='int32')
cdef int* ptr = <int*><uintptr_t>uniq_labels_d.ptr
cdef vector[int*] *uniq_labels_vec = new vector[int*]()
for i in range(uniq_labels_d.shape[0]):
uniq_labels_vec.push_back(<int*>ptr)
ptr += <int>uniq_labels_d.shape[1]
# Build n_unique_vec vector for native code interfacing
cdef vector[int] *n_unique_vec = \
new vector[int]()
for uniq_label in n_unique:
n_unique_vec.push_back(uniq_label)
query_cais = input['cais']['query']
local_query_rows = list(map(lambda x: x.shape[0], query_cais))
n_local_queries = len(local_query_rows)
cdef vector[float_ptr_vector] *probas_local_parts \
= new vector[float_ptr_vector](n_local_queries)
n_outputs = len(n_unique)
# Build probas output array for native code interfacing
proba_cais = [[] for i in range(n_outputs)]
for query_idx, n_rows in enumerate(local_query_rows):
for target_idx, n_classes in enumerate(n_unique):
p_cai = CumlArray.zeros(shape=(n_rows, n_classes),
order="C", dtype='float32')
proba_cais[target_idx].append(p_cai)
probas_local_parts.at(query_idx).push_back(<float*><uintptr_t>
p_cai.ptr)
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
is_verbose = logger.should_log_for(logger.level_debug)
# Launch distributed operations
knn_classify(
handle_[0],
<vector[intData_t*]*>0,
probas_local_parts,
deref(<vector[floatData_t*]*><uintptr_t>
input['index']['local_parts']),
deref(<PartDescriptor*><uintptr_t>input['index']['desc']),
deref(<vector[floatData_t*]*><uintptr_t>
input['query']['local_parts']),
deref(<PartDescriptor*><uintptr_t>input['query']['desc']),
deref(<vector[int_ptr_vector]*><uintptr_t>labels['labels']),
deref(<vector[int*]*><uintptr_t>uniq_labels_vec),
deref(<vector[int]*><uintptr_t>n_unique_vec),
<bool>False, # column-major index
<bool>False, # column-major query
<bool>True,
<int>self.n_neighbors,
<size_t>self.batch_size,
<bool>is_verbose
)
self.handle.sync()
# Release memory
type(self).free_mem(input)
free(<void*><uintptr_t>labels['labels'])
type(self)._free_unique(
<uintptr_t>uniq_labels_vec, <uintptr_t>n_unique_vec)
free(<void*><uintptr_t>probas_local_parts)
return tuple(proba_cais)
@staticmethod
def _free_unique(uniq_labels, n_unique):
free(<void*><uintptr_t>uniq_labels)
free(<void*><uintptr_t>n_unique)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/nearest_neighbors_mg.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import typing
from cuml.internals.safe_imports import gpu_only_import_from
cudfDataFrame = gpu_only_import_from('cudf', 'DataFrame')
from cuml.internals.array import CumlArray
from cuml.common import input_to_cuml_array
from cuml.internals import api_base_return_generic_skipall
import cuml.internals.logger as logger
from cuml.neighbors import NearestNeighbors
from pylibraft.common.handle cimport handle_t
from cuml.common.opg_data_utils_mg cimport *
from cuml.common.opg_data_utils_mg import _build_part_inputs
from libcpp cimport bool
from libcpp.vector cimport vector
from libc.stdint cimport uintptr_t
from cython.operator cimport dereference as deref
from libc.stdlib cimport free
cdef extern from "cuml/neighbors/knn_mg.hpp" namespace \
"ML::KNN::opg":
cdef void knn(
handle_t &handle,
vector[int64Data_t*] *out_I,
vector[floatData_t*] *out_D,
vector[floatData_t*] &idx_data,
PartDescriptor &idx_desc,
vector[floatData_t*] &query_data,
PartDescriptor &query_desc,
bool rowMajorIndex,
bool rowMajorQuery,
int k,
size_t batch_size,
bool verbose
) except +
class NearestNeighborsMG(NearestNeighbors):
"""
Multi-node multi-GPU Nearest Neighbors kneighbors query.
NOTE: This implementation of NearestNeighbors is meant to be
used with an initialized cumlCommunicator instance inside an
existing distributed system. Refer to the Dask NearestNeighbors
implementation in `cuml.dask.neighbors.nearest_neighbors`.
The end-user API for multi-node multi-GPU NearestNeighbors is
`cuml.dask.neighbors.NearestNeighbors`
"""
def __init__(self, *, batch_size=2000000, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
@api_base_return_generic_skipall
def kneighbors(
self,
index,
index_parts_to_ranks,
index_nrows,
query,
query_parts_to_ranks,
query_nrows,
ncols,
rank,
n_neighbors,
convert_dtype
) -> typing.Tuple[typing.List[CumlArray], typing.List[CumlArray]]:
"""
Query the kneighbors of an index
Parameters
----------
index: [__cuda_array_interface__] of local index partitions
index_parts_to_ranks: mappings of index partitions to ranks
index_nrows: number of index rows
query: [__cuda_array_interface__] of local query partitions
query_parts_to_ranks: mappings of query partitions to ranks
query_nrows: number of query rows
ncols: number of columns
rank: rank of current worker
n_neighbors: number of nearest neighbors to query
convert_dtype: since only float32 inputs are supported, should
the input be automatically converted?
Returns
-------
predictions : indices and distances
"""
# Detect type
self.get_out_type(index, query)
self.n_neighbors = self.n_neighbors if n_neighbors is None \
else n_neighbors
# Build input arrays and descriptors for native code interfacing
input = type(self).gen_local_input(
index, index_parts_to_ranks, index_nrows, query,
query_parts_to_ranks, query_nrows, ncols, rank, convert_dtype)
query_cais = input['cais']['query']
local_query_rows = list(map(lambda x: x.shape[0], query_cais))
# Build indices and distances outputs for native code interfacing
result = type(self).alloc_local_output(local_query_rows, self.n_neighbors)
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
is_verbose = logger.should_log_for(logger.level_debug)
# Launch distributed operations
knn(
handle_[0],
<vector[int64Data_t*]*><uintptr_t>result['indices'],
<vector[floatData_t*]*><uintptr_t>result['distances'],
deref(<vector[floatData_t*]*><uintptr_t>
input['index']['local_parts']),
deref(<PartDescriptor*><uintptr_t>input['index']['desc']),
deref(<vector[floatData_t*]*><uintptr_t>
input['query']['local_parts']),
deref(<PartDescriptor*><uintptr_t>input['query']['desc']),
<bool>False, # column-major index
<bool>False, # column-major query
<int>self.n_neighbors,
<size_t>self.batch_size,
<bool>is_verbose
)
self.handle.sync()
# Release memory
type(self).free_mem(input, result)
return result['cais']['distances'], result['cais']['indices']
def get_out_type(self, index, query):
if len(index) > 0:
self._set_base_attributes(output_type=index[0])
if len(query) > 0:
self._set_base_attributes(output_type=query[0])
@staticmethod
def gen_local_input(index, index_parts_to_ranks, index_nrows,
query, query_parts_to_ranks, query_nrows,
ncols, rank, convert_dtype):
index_dask = [d[0] if isinstance(d, (list, tuple))
else d for d in index]
index_cai, index_local_parts, index_desc = \
_build_part_inputs(index_dask, index_parts_to_ranks, index_nrows,
ncols, rank, convert_dtype)
query_cai, query_local_parts, query_desc = \
_build_part_inputs(query, query_parts_to_ranks, query_nrows,
ncols, rank, convert_dtype)
return {
'index': {
'local_parts': <uintptr_t>index_local_parts,
'desc': <uintptr_t>index_desc
},
'query': {
'local_parts': <uintptr_t>query_local_parts,
'desc': <uintptr_t>query_desc
},
'cais': {
'index': index_cai,
'query': query_cai
},
}
@staticmethod
def gen_local_labels(index, convert_dtype, dtype):
cdef vector[int_ptr_vector] *out_local_parts_i32
cdef vector[float_ptr_vector] *out_local_parts_f32
outputs = [d[1] for d in index]
n_out = len(outputs)
if dtype == 'int32':
out_local_parts_i32 = new vector[int_ptr_vector](<int>n_out)
elif dtype == 'float32':
out_local_parts_f32 = new vector[float_ptr_vector](<int>n_out)
else:
raise ValueError('Wrong dtype')
def to_cupy(data):
data, _, _, _ = input_to_cuml_array(data)
return data.to_output('cupy')
outputs_cai = []
for i, arr in enumerate(outputs):
arr = to_cupy(arr)
n_features = arr.shape[1] if arr.ndim != 1 else 1
for j in range(n_features):
col = arr[:, j] if n_features != 1 else arr
out_ai, _, _, _ = \
input_to_cuml_array(col, order="F",
convert_to_dtype=(dtype
if convert_dtype
else None),
check_dtype=[dtype])
outputs_cai.append(out_ai)
if dtype == 'int32':
out_local_parts_i32.at(i).push_back(<int*><uintptr_t>
out_ai.ptr)
else:
out_local_parts_f32.at(i).push_back(<float*><uintptr_t>
out_ai.ptr)
return {
'labels':
<uintptr_t>out_local_parts_i32 if dtype == 'int32'
else <uintptr_t>out_local_parts_f32,
'cais': [outputs_cai]
}
@staticmethod
def alloc_local_output(local_query_rows, n_neighbors):
cdef vector[int64Data_t*] *indices_local_parts \
= new vector[int64Data_t*]()
cdef vector[floatData_t*] *dist_local_parts \
= new vector[floatData_t*]()
indices_cai = []
dist_cai = []
for n_rows in local_query_rows:
i_cai = CumlArray.zeros(shape=(n_rows, n_neighbors),
order="C", dtype='int64')
d_cai = CumlArray.zeros(shape=(n_rows, n_neighbors),
order="C", dtype='float32')
indices_cai.append(i_cai)
dist_cai.append(d_cai)
indices_local_parts.push_back(new int64Data_t(
<int64_t*><uintptr_t>i_cai.ptr, n_rows * n_neighbors))
dist_local_parts.push_back(new floatData_t(
<float*><uintptr_t>d_cai.ptr, n_rows * n_neighbors))
return {
'indices': <uintptr_t>indices_local_parts,
'distances': <uintptr_t>dist_local_parts,
'cais': {
'indices': indices_cai,
'distances': dist_cai
}
}
@staticmethod
def free_mem(input, result=None):
cdef floatData_t *f_ptr
cdef vector[floatData_t*] *f_lp
for input_type in ['index', 'query']:
ilp = input[input_type]['local_parts']
f_lp = <vector[floatData_t *]*><uintptr_t>ilp
for i in range(f_lp.size()):
f_ptr = f_lp.at(i)
free(<void*>f_ptr)
free(<void*><uintptr_t>f_lp)
free(<void*><uintptr_t>input[input_type]['desc'])
cdef int64Data_t *i64_ptr
cdef vector[int64Data_t*] *i64_lp
if result:
f_lp = <vector[floatData_t *]*><uintptr_t>result['distances']
for i in range(f_lp.size()):
f_ptr = f_lp.at(i)
free(<void*>f_ptr)
free(<void*><uintptr_t>f_lp)
i64_lp = <vector[int64Data_t *]*><uintptr_t>result['indices']
for i in range(i64_lp.size()):
i64_ptr = i64_lp.at(i)
free(<void*>i64_ptr)
free(<void*><uintptr_t>i64_lp)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/ann.pyx | #
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from libc.stdint cimport uintptr_t
from libcpp cimport bool
cdef check_algo_params(algo, params):
def check_param_list(params, param_list):
for param in param_list:
if not hasattr(params, param):
ValueError('algo_params misconfigured : {} \
parameter unset'.format(param))
if algo == 'ivfflat':
check_param_list(params, ['nlist', 'nprobe'])
elif algo == "ivfpq":
check_param_list(params, ['nlist', 'nprobe', 'M', 'n_bits',
'usePrecomputedTables'])
cdef build_ivfflat_algo_params(params, automated):
cdef IVFFlatParam* algo_params = new IVFFlatParam()
if automated:
params = {
'nlist': 8,
'nprobe': 2
}
algo_params.nlist = <int> params['nlist']
algo_params.nprobe = <int> params['nprobe']
return <uintptr_t>algo_params
cdef build_ivfpq_algo_params(params, automated, additional_info):
cdef IVFPQParam* algo_params = new IVFPQParam()
if automated:
allowedSubquantizers = [1, 2, 3, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48]
allowedSubDimSize = {1, 2, 3, 4, 6, 8, 10, 12, 16, 20, 24, 28, 32}
N = additional_info['n_samples']
D = additional_info['n_features']
params = {
'nlist': 8,
'nprobe': 3
}
for n_subq in allowedSubquantizers:
if D % n_subq == 0 and (D / n_subq) in allowedSubDimSize:
params['usePrecomputedTables'] = False
params['M'] = n_subq
break
if 'M' not in params:
for n_subq in allowedSubquantizers:
if D % n_subq == 0:
params['usePrecomputedTables'] = True
params['M'] = n_subq
break
# n_bits should be in set {4, 5, 6, 8} since FAISS 1.7
params['n_bits'] = 4
for n_bits in [4, 5, 6, 8]:
min_train_points = (2 ** n_bits) * 39
if N >= min_train_points:
params['n_bits'] = n_bits
break
algo_params.nlist = <int> params['nlist']
algo_params.nprobe = <int> params['nprobe']
algo_params.M = <int> params['M']
algo_params.n_bits = <int> params['n_bits']
algo_params.usePrecomputedTables = \
<bool> params['usePrecomputedTables']
return <uintptr_t>algo_params
cdef build_algo_params(algo, params, additional_info):
automated = params is None or params == 'auto'
if not automated:
check_algo_params(algo, params)
cdef knnIndexParam* algo_params = <knnIndexParam*> 0
if algo == 'ivfflat':
algo_params = <knnIndexParam*><uintptr_t> \
build_ivfflat_algo_params(params, automated)
if algo == 'ivfpq':
algo_params = <knnIndexParam*><uintptr_t> \
build_ivfpq_algo_params(params, automated, additional_info)
return <uintptr_t>algo_params
cdef destroy_algo_params(ptr):
cdef knnIndexParam* algo_params = <knnIndexParam*> <uintptr_t> ptr
del algo_params
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/neighbors/nearest_neighbors.pyx | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import typing
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
cupyx = gpu_only_import('cupyx')
import math
import cuml.internals
from cuml.internals.base import UniversalBase
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.array import CumlArray
from cuml.internals.array_sparse import SparseCumlArray
from cuml.common.doc_utils import generate_docstring
from cuml.common.doc_utils import insert_into_docstring
from cuml.internals.mixins import CMajorInputTagMixin
from cuml.internals.input_utils import input_to_cupy_array
from cuml.common import input_to_cuml_array
from cuml.common.sparse_utils import is_sparse
from cuml.common.sparse_utils import is_dense
from cuml.metrics.distance_type cimport DistanceType
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
from cuml.neighbors.ann cimport *
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
rmm = gpu_only_import('rmm')
cimport cuml.common.cuda
IF GPUBUILD == 1:
import warnings
from cython.operator cimport dereference as deref
from libcpp cimport bool
from libc.stdint cimport uintptr_t, int64_t, uint32_t
from libcpp.vector cimport vector
from pylibraft.common.handle cimport handle_t
cdef extern from "raft/spatial/knn/ball_cover_types.hpp" \
namespace "raft::spatial::knn":
cdef cppclass BallCoverIndex[int64_t, float, uint32_t]:
BallCoverIndex(const handle_t &handle,
float *X,
uint32_t n_rows,
uint32_t n_cols,
DistanceType metric) except +
cdef extern from "cuml/neighbors/knn.hpp" namespace "ML":
void brute_force_knn(
const handle_t &handle,
vector[float*] &inputs,
vector[int] &sizes,
int D,
float *search_items,
int n,
int64_t *res_I,
float *res_D,
int k,
bool rowMajorIndex,
bool rowMajorQuery,
DistanceType metric,
float metric_arg
) except +
void rbc_build_index(
const handle_t &handle,
BallCoverIndex[int64_t, float, uint32_t] &index,
) except +
void rbc_knn_query(
const handle_t &handle,
BallCoverIndex[int64_t, float, uint32_t] &index,
uint32_t k,
float *search_items,
uint32_t n_search_items,
int64_t *out_inds,
float *out_dists
) except +
void approx_knn_build_index(
handle_t &handle,
knnIndex* index,
knnIndexParam* params,
DistanceType metric,
float metricArg,
float *index_array,
int n,
int D
) except +
void approx_knn_search(
handle_t &handle,
float *distances,
int64_t* indices,
knnIndex* index,
int k,
const float *query_array,
int n
) except +
cdef extern from "cuml/neighbors/knn_sparse.hpp" namespace "ML::Sparse":
void brute_force_knn(handle_t &handle,
const int *idxIndptr,
const int *idxIndices,
const float *idxData,
size_t idxNNZ,
int n_idx_rows,
int n_idx_cols,
const int *queryIndptr,
const int *queryIndices,
const float *queryData,
size_t queryNNZ,
int n_query_rows,
int n_query_cols,
int *output_indices,
float *output_dists,
int k,
size_t batch_size_index,
size_t batch_size_query,
DistanceType metric,
float metricArg) except +
class NearestNeighbors(UniversalBase,
CMajorInputTagMixin):
"""
NearestNeighbors is an queries neighborhoods from a given set of
datapoints. Currently, cuML supports k-NN queries, which define
the neighborhood as the closest `k` neighbors to each query point.
Parameters
----------
n_neighbors : int (default=5)
Default number of neighbors to query
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
algorithm : string (default='auto')
The query algorithm to use. Valid options are:
- ``'auto'``: to automatically select brute-force or
random ball cover based on data shape and metric
- ``'rbc'``: for the random ball algorithm, which partitions
the data space and uses the triangle inequality to lower the
number of potential distances. Currently, this algorithm
supports Haversine (2d) and Euclidean in 2d and 3d.
- ``'brute'``: for brute-force, slow but produces exact results
- ``'ivfflat'``: for inverted file, divide the dataset in partitions
and perform search on relevant partitions only
- ``'ivfpq'``: for inverted file and product quantization,
same as inverted list, in addition the vectors are broken
in n_features/M sub-vectors that will be encoded thanks
to intermediary k-means clusterings. This encoding provide
partial information allowing faster distances calculations
metric : string (default='euclidean').
Distance metric to use. Supported distances are ['l1, 'cityblock',
'taxicab', 'manhattan', 'euclidean', 'l2', 'braycurtis', 'canberra',
'minkowski', 'chebyshev', 'jensenshannon', 'cosine', 'correlation']
p : float (default=2)
Parameter for the Minkowski metric. When p = 1, this is equivalent to
manhattan distance (l1), and euclidean distance (l2) for p = 2. For
arbitrary p, minkowski distance (lp) is used.
algo_params : dict, optional (default=None)
Used to configure the nearest neighbor algorithm to be used.
If set to None, parameters will be generated automatically.
Parameters for algorithm ``'brute'`` when inputs are sparse:
- batch_size_index : (int) number of rows in each batch of \
index array
- batch_size_query : (int) number of rows in each batch of \
query array
Parameters for algorithm ``'ivfflat'``:
- nlist: (int) number of cells to partition dataset into
- nprobe: (int) at query time, number of cells used for search
Parameters for algorithm ``'ivfpq'``:
- nlist: (int) number of cells to partition dataset into
- nprobe: (int) at query time, number of cells used for search
- M: (int) number of subquantizers
- n_bits: (int) bits allocated per subquantizer
- usePrecomputedTables : (bool) whether to use precomputed tables
metric_expanded : bool
Can increase performance in Minkowski-based (Lp) metrics (for p > 1)
by using the expanded form and not computing the n-th roots.
This is currently ignored.
metric_params : dict, optional (default = None)
This is currently ignored.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Examples
--------
.. code-block:: python
>>> import cudf
>>> from cuml.neighbors import NearestNeighbors
>>> from cuml.datasets import make_blobs
>>> X, _ = make_blobs(n_samples=5, centers=5,
... n_features=10, random_state=42)
>>> # build a cudf Dataframe
>>> X_cudf = cudf.DataFrame(X)
>>> # fit model
>>> model = NearestNeighbors(n_neighbors=3)
>>> model.fit(X)
NearestNeighbors()
>>> # get 3 nearest neighbors
>>> distances, indices = model.kneighbors(X_cudf)
>>> # print results
>>> print(indices)
0 1 2
0 0 1 3
1 1 0 2
2 2 4 0
3 3 0 2
4 4 2 3
>>> print(distances) # doctest: +SKIP
0 1 2
0 0.007812 24.786566 26.399996
1 0.000000 24.786566 30.045017
2 0.007812 5.458400 27.051241
3 0.000000 26.399996 27.543869
4 0.000000 5.458400 29.583437
Notes
-----
Warning: Approximate Nearest Neighbor methods might be unstable
in this version of cuML. This is due to a known issue in
the FAISS release that this cuML version is linked to.
(see cuML issue #4020)
Warning: For compatibility with libraries that rely on scikit-learn,
kwargs allows for passing of arguments that are not explicit in the
class constructor, such as 'n_jobs', but they have no effect on behavior.
For an additional example see `the NearestNeighbors notebook
<https://github.com/rapidsai/cuml/blob/main/notebooks/nearest_neighbors_demo.ipynb>`_.
For additional docs, see `scikit-learn's NearestNeighbors
<https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html#sklearn.neighbors.NearestNeighbors>`_.
"""
_cpu_estimator_import_path = 'sklearn.neighbors.NearestNeighbors'
_fit_X = CumlArrayDescriptor(order='C')
@device_interop_preparation
def __init__(self, *,
n_neighbors=5,
verbose=False,
handle=None,
algorithm="auto",
metric="euclidean",
p=2,
algo_params=None,
metric_params=None,
output_type=None,
**kwargs):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.n_neighbors = n_neighbors
self.n_indices = 0
self.metric = metric
self.metric_params = metric_params
self.algo_params = algo_params
self.p = p
self.algorithm = algorithm
self._fit_method = self.algorithm
self.selected_algorithm_ = algorithm
self.algo_params = algo_params
self.knn_index = None
@generate_docstring(X='dense_sparse')
@enable_device_interop
def fit(self, X, convert_dtype=True) -> "NearestNeighbors":
"""
Fit GPU index for performing nearest neighbor queries.
"""
if len(X.shape) != 2:
raise ValueError("data should be two dimensional")
self.n_samples_fit_, self.n_features_in_ = X.shape
if self.algorithm == "auto":
if (self.n_features_in_ == 2 or self.n_features_in_ == 3) and \
not is_sparse(X) and self.effective_metric_ in \
cuml.neighbors.VALID_METRICS["rbc"] and \
math.sqrt(X.shape[0]) >= self.n_neighbors:
self._fit_method = "rbc"
else:
self._fit_method = "brute"
if self.algorithm == "rbc" and self.n_features_in_ > 3:
raise ValueError("The rbc algorithm is not supported for"
" >3 dimensions currently.")
if is_sparse(X):
valid_metrics = cuml.neighbors.VALID_METRICS_SPARSE
valid_metric_str = "_SPARSE"
self._fit_X = SparseCumlArray(X, convert_to_dtype=cp.float32,
convert_format=False)
else:
valid_metrics = cuml.neighbors.VALID_METRICS
valid_metric_str = ""
self._fit_X, _, _, _ = \
input_to_cuml_array(X, order='C', check_dtype=np.float32,
convert_to_dtype=(np.float32
if convert_dtype
else None))
self._output_index = self._fit_X.index
self.feature_names_in_ = self._fit_X.index
if self.effective_metric_ not in \
valid_metrics[self._fit_method]:
raise ValueError("Metric %s is not valid. "
"Use sorted(cuml.neighbors.VALID_METRICS%s[%s]) "
"to get valid options." %
(valid_metric_str,
self.effective_metric_,
self._fit_method))
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><uintptr_t> self.handle.getHandle()
cdef knnIndexParam* algo_params = <knnIndexParam*> 0
if self._fit_method in ['ivfflat', 'ivfpq']:
warnings.warn("\nWarning: Approximate Nearest Neighbor methods "
"might be unstable in this version of cuML. "
"This is due to a known issue in the FAISS "
"release that this cuML version is linked to. "
"(see cuML issue #4020)")
if not is_dense(X):
raise ValueError("Approximate Nearest Neighbors methods "
"require dense data")
additional_info = {'n_samples': self.n_samples_fit_,
'n_features': self.n_features_in_}
knn_index = new knnIndex()
self.knn_index = <uintptr_t> knn_index
algo_params = <knnIndexParam*><uintptr_t> \
build_algo_params(self._fit_method, self.algo_params,
additional_info)
metric = self._build_metric_type(self.effective_metric_)
approx_knn_build_index(handle_[0],
<knnIndex*>knn_index,
<knnIndexParam*>algo_params,
<DistanceType>metric,
<float>self.p,
<float*><uintptr_t>self._fit_X.ptr,
<int>self.n_samples_fit_,
<int>self.n_features_in_)
self.handle.sync()
destroy_algo_params(<uintptr_t>algo_params)
del self._fit_X
elif self._fit_method == "rbc":
metric = self._build_metric_type(self.effective_metric_)
rbc_index = new BallCoverIndex[int64_t, float, uint32_t](
handle_[0], <float*><uintptr_t>self._fit_X.ptr,
<uint32_t>self.n_samples_fit_, <uint32_t>self.n_features_in_,
<DistanceType>metric)
rbc_build_index(handle_[0],
deref(rbc_index))
self.knn_index = <uintptr_t>rbc_index
self.n_indices = 1
return self
def get_param_names(self):
return super().get_param_names() + \
["n_neighbors", "algorithm", "metric",
"p", "metric_params", "algo_params", "n_jobs"]
def get_attr_names(self):
return ['_fit_X', 'effective_metric_', 'effective_metric_params_',
'n_samples_fit_', 'n_features_in_', 'feature_names_in_',
'_fit_method']
@staticmethod
def _build_metric_type(metric):
if metric == "euclidean" or metric == "l2":
m = DistanceType.L2SqrtExpanded
elif metric == "sqeuclidean":
m = DistanceType.L2Expanded
elif metric in ["cityblock", "l1", "manhattan", 'taxicab']:
m = DistanceType.L1
elif metric == "braycurtis":
m = DistanceType.BrayCurtis
elif metric == "canberra":
m = DistanceType.Canberra
elif metric == "minkowski" or metric == "lp":
m = DistanceType.LpUnexpanded
elif metric == "chebyshev" or metric == "linf":
m = DistanceType.Linf
elif metric == "jensenshannon":
m = DistanceType.JensenShannon
elif metric == "cosine":
m = DistanceType.CosineExpanded
elif metric == "correlation":
m = DistanceType.CorrelationExpanded
elif metric == "inner_product":
m = DistanceType.InnerProduct
elif metric == "jaccard":
m = DistanceType.JaccardExpanded
elif metric == "hellinger":
m = DistanceType.HellingerExpanded
elif metric == "haversine":
m = DistanceType.Haversine
else:
raise ValueError("Metric %s is not supported" % metric)
return m
@insert_into_docstring(parameters=[('dense', '(n_samples, n_features)')],
return_values=[('dense', '(n_samples, n_features)'),
('dense',
'(n_samples, n_features)')])
@enable_device_interop
def kneighbors(
self,
X=None,
n_neighbors=None,
return_distance=True,
convert_dtype=True,
two_pass_precision=False
) -> typing.Union[CumlArray, typing.Tuple[CumlArray, CumlArray]]:
"""
Query the GPU index for the k nearest neighbors of column vectors in X.
Parameters
----------
X : {}
n_neighbors : Integer
Number of neighbors to search. If not provided, the n_neighbors
from the model instance is used (default=10)
return_distance: Boolean
If False, distances will not be returned
convert_dtype : bool, optional (default = True)
When set to True, the kneighbors method will automatically
convert the inputs to np.float32.
two_pass_precision : bool, optional (default = False)
When set to True, a slow second pass will be used to improve the
precision of results returned for searches using L2-derived
metrics. FAISS uses the Euclidean distance decomposition trick to
compute distances in this case, which may result in numerical
errors for certain data. In particular, when several samples
are close to the query sample (relative to typical inter-sample
distances), numerical instability may cause the computed distance
between the query and itself to be larger than the computed
distance between the query and another sample. As a result, the
query is not returned as the nearest neighbor to itself. If this
flag is set to true, distances to the query vectors will be
recomputed with high precision for all retrieved samples, and the
results will be re-sorted accordingly. Note that for large values
of k or large numbers of query vectors, this correction becomes
impractical in terms of both runtime and memory. It should be used
with care and only when strictly necessary (when precise results
are critical and samples may be tightly clustered).
Returns
-------
distances : {}
The distances of the k-nearest neighbors for each column vector
in X
indices : {}
The indices of the k-nearest neighbors for each column vector in X
"""
return self._kneighbors_internal(X, n_neighbors, return_distance,
convert_dtype,
two_pass_precision=two_pass_precision)
def _kneighbors_internal(self, X=None, n_neighbors=None,
return_distance=True, convert_dtype=True,
_output_type=None, two_pass_precision=False):
"""
Query the GPU index for the k nearest neighbors of column vectors in X.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Dense matrix (floats or doubles) of shape (n_samples, n_features).
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
n_neighbors : Integer
Number of neighbors to search. If not provided, the n_neighbors
from the model instance is used (default=10)
return_distance: Boolean
If False, distances will not be returned
convert_dtype : bool, optional (default = True)
When set to True, the kneighbors method will automatically
convert the inputs to np.float32.
_output_cumlarray : bool, optional (default = False)
When set to True, the class self.output_type is overwritten
and this method returns the output as a cumlarray
two_pass_precision : bool, optional (default = False)
When set to True, a slow second pass will be used to improve the
precision of results returned for searches using L2-derived
metrics. FAISS uses the Euclidean distance decomposition trick to
compute distances in this case, which may result in numerical
errors for certain data. In particular, when several samples
are close to the query sample (relative to typical inter-sample
distances), numerical instability may cause the computed distance
between the query and itself to be larger than the computed
distance between the query and another sample. As a result, the
query is not returned as the nearest neighbor to itself. If this
flag is set to true, distances to the query vectors will be
recomputed with high precision for all retrieved samples, and the
results will be re-sorted accordingly. Note that for large values
of k or large numbers of query vectors, this correction becomes
impractical in terms of both runtime and memory. It should be used
with care and only when strictly necessary (when precise results
are critical and samples may be tightly clustered).
Returns
-------
distances: cupy ndarray
The distances of the k-nearest neighbors for each column vector
in X
indices: cupy ndarray
The indices of the k-nearest neighbors for each column vector in X
"""
n_neighbors = self.n_neighbors if n_neighbors is None else n_neighbors
use_training_data = X is None
if X is None:
X = self._fit_X
n_neighbors += 1
if (n_neighbors is None and self.n_neighbors is None) \
or n_neighbors <= 0:
raise ValueError("k or n_neighbors must be a positive integers")
if n_neighbors > self.n_samples_fit_:
raise ValueError("n_neighbors must be <= number of "
"samples in index")
if X is None:
raise ValueError("Model needs to be trained "
"before calling kneighbors()")
if X.shape[1] != self.n_features_in_:
raise ValueError("Dimensions of X need to match dimensions of "
"indices (%d)" % self.n_features_in_)
if hasattr(self, '_fit_X') and isinstance(self._fit_X,
SparseCumlArray):
D_ndarr, I_ndarr = self._kneighbors_sparse(X, n_neighbors)
else:
D_ndarr, I_ndarr = self._kneighbors_dense(X, n_neighbors,
convert_dtype)
self.handle.sync()
out_type = _output_type \
if _output_type is not None else self._get_output_type(X)
if two_pass_precision:
metric = self._build_metric_type(self.effective_metric_)
metric_is_l2_based = (
metric == DistanceType.L2SqrtExpanded or
metric == DistanceType.L2Expanded or
(metric == DistanceType.LpUnexpanded and self.p == 2)
)
# FAISS employs imprecise distance algorithm only for L2-based
# expanded metrics. This code correct numerical instabilities
# that could arise.
if metric_is_l2_based:
index = I_ndarr.index
X = input_to_cupy_array(X).array
I_cparr = I_ndarr.to_output('cupy')
self_diff = X[I_cparr] - X[:, cp.newaxis, :]
precise_distances = cp.sum(
self_diff * self_diff, axis=2
)
correct_order = cp.argsort(precise_distances, axis=1)
D_cparr = cp.take_along_axis(precise_distances,
correct_order,
axis=1)
I_cparr = cp.take_along_axis(I_cparr, correct_order, axis=1)
D_ndarr = cuml.common.input_to_cuml_array(D_cparr).array
D_ndarr.index = index
I_ndarr = cuml.common.input_to_cuml_array(I_cparr).array
I_ndarr.index = index
I_ndarr = I_ndarr.to_output(out_type)
D_ndarr = D_ndarr.to_output(out_type)
# drop first column if using training data as X
# this will need to be moved to the C++ layer (cuml issue #2562)
if use_training_data:
if out_type in {'cupy', 'numpy', 'numba'}:
I_ndarr = I_ndarr[:, 1:]
D_ndarr = D_ndarr[:, 1:]
else:
I_ndarr.drop(I_ndarr.columns[0], axis=1)
D_ndarr.drop(D_ndarr.columns[0], axis=1)
return (D_ndarr, I_ndarr) if return_distance else I_ndarr
def _kneighbors_dense(self, X, n_neighbors, convert_dtype=None):
if not is_dense(X):
raise ValueError("A NearestNeighbors model trained on dense "
"data requires dense input to kneighbors()")
_metric = self._build_metric_type(self.effective_metric_)
X_m, N, _, _ = \
input_to_cuml_array(X, order='C', check_dtype=np.float32,
convert_to_dtype=(np.float32 if convert_dtype
else False))
# Need to establish result matrices for indices (Nxk)
# and for distances (Nxk)
I_ndarr = CumlArray.zeros((N, n_neighbors), dtype=np.int64, order="C",
index=X_m.index)
D_ndarr = CumlArray.zeros((N, n_neighbors),
dtype=np.float32, order="C",
index=X_m.index)
cdef uintptr_t _I_ptr = I_ndarr.ptr
cdef uintptr_t _D_ptr = D_ndarr.ptr
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef vector[float*] *inputs = new vector[float*]()
cdef vector[int] *sizes = new vector[int]()
cdef knnIndex* knn_index = <knnIndex*> 0
cdef BallCoverIndex[int64_t, float, uint32_t]* rbc_index = \
<BallCoverIndex[int64_t, float, uint32_t]*> 0
fallback_to_brute = self._fit_method == "rbc" and \
n_neighbors > math.sqrt(self.n_samples_fit_)
if fallback_to_brute:
warnings.warn("algorithm='rbc' requires sqrt(%s) be "
"> n_neighbors (%s). falling back to "
"brute force search" %
(self.n_samples_fit_, n_neighbors))
if self._fit_method == 'brute' or fallback_to_brute:
inputs.push_back(<float*><uintptr_t>self._fit_X.ptr)
sizes.push_back(<int>self.n_samples_fit_)
brute_force_knn(
handle_[0],
deref(inputs),
deref(sizes),
<int>self.n_features_in_,
<float*><uintptr_t>X_m.ptr,
<int>N,
<int64_t*>_I_ptr,
<float*>_D_ptr,
<int>n_neighbors,
True,
True,
<DistanceType>_metric,
# minkowski order is currently the only metric argument.
<float>self.p
)
elif self._fit_method == "rbc":
rbc_index = <BallCoverIndex[int64_t, float, uint32_t]*>\
<uintptr_t>self.knn_index
rbc_knn_query(handle_[0],
deref(rbc_index),
<uint32_t> n_neighbors,
<float*><uintptr_t>X_m.ptr,
<uint32_t> N,
<int64_t*>_I_ptr,
<float*>_D_ptr)
else:
knn_index = <knnIndex*><uintptr_t> self.knn_index
approx_knn_search(
handle_[0],
<float*>_D_ptr,
<int64_t*>_I_ptr,
<knnIndex*>knn_index,
<int>n_neighbors,
<float*><uintptr_t>X_m.ptr,
<int>N
)
return D_ndarr, I_ndarr
def _kneighbors_sparse(self, X, n_neighbors):
if isinstance(self._fit_X, SparseCumlArray) and not is_sparse(X):
raise ValueError("A NearestNeighbors model trained on sparse "
"data requires sparse input to kneighbors()")
batch_size_index = 10000
if self.algo_params is not None and \
"batch_size_index" in self.algo_params:
batch_size_index = self.algo_params['batch_size_index']
batch_size_query = 10000
if self.algo_params is not None and \
"batch_size_query" in self.algo_params:
batch_size_query = self.algo_params['batch_size_query']
X_m = SparseCumlArray(X, convert_to_dtype=cp.float32,
convert_format=False)
_metric = self._build_metric_type(self.effective_metric_)
cdef uintptr_t _idx_indptr = self._fit_X.indptr.ptr
cdef uintptr_t _idx_indices = self._fit_X.indices.ptr
cdef uintptr_t _idx_data = self._fit_X.data.ptr
cdef uintptr_t _search_indptr = X_m.indptr.ptr
cdef uintptr_t _search_indices = X_m.indices.ptr
cdef uintptr_t _search_data = X_m.data.ptr
# Need to establish result matrices for indices (Nxk)
# and for distances (Nxk)
I_ndarr = CumlArray.zeros((X_m.shape[0], n_neighbors),
dtype=np.int32, order="C")
D_ndarr = CumlArray.zeros((X_m.shape[0], n_neighbors),
dtype=np.float32, order="C")
cdef uintptr_t _I_ptr = I_ndarr.ptr
cdef uintptr_t _D_ptr = D_ndarr.ptr
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
brute_force_knn(handle_[0],
<int*> _idx_indptr,
<int*> _idx_indices,
<float*> _idx_data,
self._fit_X.nnz,
self.n_samples_fit_,
self.n_features_in_,
<int*> _search_indptr,
<int*> _search_indices,
<float*> _search_data,
X_m.nnz,
X_m.shape[0],
X_m.shape[1],
<int*>_I_ptr,
<float*>_D_ptr,
n_neighbors,
<size_t>batch_size_index,
<size_t>batch_size_query,
<DistanceType> _metric,
<float>self.p)
return D_ndarr, I_ndarr
@insert_into_docstring(parameters=[('dense', '(n_samples, n_features)')])
@enable_device_interop
def kneighbors_graph(self,
X=None,
n_neighbors=None,
mode='connectivity') -> SparseCumlArray:
"""
Find the k nearest neighbors of column vectors in X and return as
a sparse matrix in CSR format.
Parameters
----------
X : {}
n_neighbors : Integer
Number of neighbors to search. If not provided, the n_neighbors
from the model instance is used
mode : string (default='connectivity')
Values in connectivity matrix: 'connectivity' returns the
connectivity matrix with ones and zeros, 'distance' returns the
edges as the distances between points with the requested metric.
Returns
-------
A : sparse graph in CSR format, shape = (n_samples, n_samples_fit)
n_samples_fit is the number of samples in the fitted data where
A[i, j] is assigned the weight of the edge that connects i to j.
Values will either be ones/zeros or the selected distance metric.
Return types are either cupy's CSR sparse graph (device) or
numpy's CSR sparse graph (host)
"""
if not self._fit_X:
raise ValueError('This NearestNeighbors instance has not been '
'fitted yet, call "fit" before using this '
'estimator')
if n_neighbors is None:
n_neighbors = self.n_neighbors
if mode == 'connectivity':
indices = self._kneighbors_internal(X, n_neighbors,
return_distance=False,
_output_type="cupy")
n_samples = indices.shape[0]
distances = cp.ones(n_samples * n_neighbors, dtype=np.float32)
elif mode == 'distance':
distances, indices = self._kneighbors_internal(X, n_neighbors,
_output_type="cupy")
distances = cp.ravel(distances)
else:
raise ValueError('Unsupported mode, must be one of "connectivity"'
' or "distance" but got "%s" instead' % mode)
n_samples = indices.shape[0]
indices = cp.ravel(indices)
n_nonzero = n_samples * n_neighbors
rowptr = cp.arange(0, n_nonzero + 1, n_neighbors)
sparse_csr = cupyx.scipy.sparse.csr_matrix((distances,
cp.ravel(
cp.asarray(indices)),
rowptr),
shape=(n_samples,
self.n_samples_fit_))
return sparse_csr
@property
def effective_metric_(self):
return self.metric
@effective_metric_.setter
def effective_metric_(self, val):
self.metric = val
@property
def effective_metric_params_(self):
metric_params = self.metric_params
return metric_params if metric_params else {}
@effective_metric_params_.setter
def effective_metric_params_(self, val):
self.metric_params = val
def __del__(self):
cdef knnIndex* knn_index = <knnIndex*>0
cdef BallCoverIndex* rbc_index = <BallCoverIndex*>0
kidx = self.__dict__['knn_index'] \
if 'knn_index' in self.__dict__ else None
if kidx is not None:
if self._fit_method in ["ivfflat", "ivfpq"]:
knn_index = <knnIndex*><uintptr_t>kidx
del knn_index
else:
rbc_index = <BallCoverIndex*><uintptr_t>kidx
del rbc_index
@cuml.internals.api_return_sparse_array()
def kneighbors_graph(X=None, n_neighbors=5, mode='connectivity', verbose=False,
handle=None, algorithm="brute", metric="euclidean", p=2,
include_self=False, metric_params=None):
"""
Computes the (weighted) graph of k-Neighbors for points in X.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Dense matrix (floats or doubles) of shape (n_samples, n_features).
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
n_neighbors : Integer
Number of neighbors to search. If not provided, the n_neighbors
from the model instance is used (default=5)
mode : string (default='connectivity')
Values in connectivity matrix: 'connectivity' returns the
connectivity matrix with ones and zeros, 'distance' returns the
edges as the distances between points with the requested metric.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
algorithm : string (default='brute')
The query algorithm to use. Valid options are:
- ``'auto'``: to automatically select brute-force or
random ball cover based on data shape and metric
- ``'rbc'``: for the random ball algorithm, which partitions
the data space and uses the triangle inequality to lower the
number of potential distances. Currently, this algorithm
supports 2d Euclidean and Haversine.
- ``'brute'``: for brute-force, slow but produces exact results
- ``'ivfflat'``: for inverted file, divide the dataset in partitions
and perform search on relevant partitions only
- ``'ivfpq'``: for inverted file and product quantization,
same as inverted list, in addition the vectors are broken
in n_features/M sub-vectors that will be encoded thanks
to intermediary k-means clusterings. This encoding provide
partial information allowing faster distances calculations
metric : string (default='euclidean').
Distance metric to use. Supported distances are ['l1, 'cityblock',
'taxicab', 'manhattan', 'euclidean', 'l2', 'braycurtis', 'canberra',
'minkowski', 'chebyshev', 'jensenshannon', 'cosine', 'correlation']
p : float (default=2) Parameter for the Minkowski metric. When p = 1, this
is equivalent to manhattan distance (l1), and euclidean distance (l2)
for p = 2. For arbitrary p, minkowski distance (lp) is used.
include_self : bool or 'auto' (default=False)
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
metric_params : dict, optional (default = None) This is currently ignored.
Returns
-------
A : sparse graph in CSR format, shape = (n_samples, n_samples_fit)
n_samples_fit is the number of samples in the fitted data where
A[i, j] is assigned the weight of the edge that connects i to j.
Values will either be ones/zeros or the selected distance metric.
Return types are either cupy's CSR sparse graph (device) or
numpy's CSR sparse graph (host)
"""
# Set the default output type to "cupy". This will be ignored if the user
# has set `cuml.global_settings.output_type`. Only necessary for array
# generation methods that do not take an array as input
cuml.internals.set_api_output_type("cupy")
X = NearestNeighbors(
n_neighbors=n_neighbors,
verbose=verbose,
handle=handle,
algorithm=algorithm,
metric=metric,
p=p,
metric_params=metric_params,
output_type=cuml.global_settings.root_cm.output_type
).fit(X)
if include_self == 'auto':
include_self = mode == 'connectivity'
with cuml.internals.exit_internal_api():
if not include_self:
query = None
else:
query = X._fit_X
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/kernel_ridge/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("kernel_ridge.pyx")
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
MODULE_PREFIX kernel_ridge_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/kernel_ridge/kernel_ridge.pyx | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
import warnings
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import
from cupyx import lapack, geterr, seterr
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.base import Base
from cuml.internals.mixins import RegressorMixin
from cuml.common.doc_utils import generate_docstring
from cuml.common import input_to_cuml_array
from cuml.metrics import pairwise_kernels
cp = gpu_only_import('cupy')
linalg = gpu_only_import_from('cupy', 'linalg')
np = cpu_only_import('numpy')
# cholesky solve with fallback to least squares for singular problems
def _safe_solve(K, y):
try:
# we need to set the error mode of cupy to raise
# otherwise we silently get an array of NaNs
err_mode = geterr()["linalg"]
seterr(linalg="raise")
dual_coef = lapack.posv(K, y)
# Perform following check as a workaround for cusolver issue to be
# fixed in a future CUDA version
if cp.all(cp.isnan(dual_coef)):
raise np.linalg.LinAlgError
seterr(linalg=err_mode)
except np.linalg.LinAlgError:
warnings.warn(
"Singular matrix in solving dual problem. Using "
"least-squares solution instead."
)
dual_coef = linalg.lstsq(K, y, rcond=None)[0]
return dual_coef
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
K = cp.array(K, dtype=np.float64)
alpha = cp.atleast_1d(alpha)
one_alpha = alpha.size == 1
has_sw = sample_weight is not None
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = cp.sqrt(cp.atleast_1d(sample_weight))
y = y * sw[:, cp.newaxis]
K *= cp.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[:: n_samples + 1] += alpha[0]
dual_coef = _safe_solve(K, y)
if has_sw:
dual_coef *= sw[:, cp.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = cp.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[:: n_samples + 1] += current_alpha
dual_coef[:] = _safe_solve(K, target).ravel()
K.flat[:: n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[cp.newaxis, :]
return dual_coefs.T
class KernelRidge(Base, RegressorMixin):
"""
Kernel ridge regression (KRR) performs l2 regularised ridge regression
using the kernel trick. The kernel trick allows the estimator to learn a
linear function in the space induced by the kernel. This may be a
non-linear function in the original feature space (when a non-linear
kernel is used).
This estimator supports multi-output regression (when y is 2 dimensional).
See the sklearn user guide for more information.
Parameters
----------
alpha : float or array-like of shape (n_targets,), default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
If an array is passed, penalties are assumed to be specific
to the targets.
kernel : str or callable, default="linear"
Kernel mapping used internally. This parameter is directly passed to
:class:`~cuml.metrics.pairwise_kernel`.
If `kernel` is a string, it must be one of the metrics
in `cuml.metrics.PAIRWISE_KERNEL_FUNCTIONS` or "precomputed".
If `kernel` is "precomputed", X is assumed to be a kernel matrix.
`kernel` may be a callable numba device function. If so, is called on
each pair of instances (rows) and the resulting value recorded.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of str to any, default=None
Additional parameters (keyword arguments) for kernel function passed
as callable object.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the
CUDA stream that will be used for the model's computations, so
users can run different models concurrently in different streams
by creating handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets)
Representation of weight vector(s) in kernel space
X_fit_ : ndarray of shape (n_samples, n_features)
Training data, which is also required for prediction. If
kernel == "precomputed" this is instead the precomputed
training matrix, of shape (n_samples, n_samples).
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from cuml.kernel_ridge import KernelRidge
>>> from numba import cuda
>>> import math
>>> n_samples, n_features = 10, 5
>>> rng = cp.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> model = KernelRidge(kernel="poly").fit(X, y)
>>> pred = model.predict(X)
>>> @cuda.jit(device=True)
... def custom_rbf_kernel(x, y, gamma=None):
... if gamma is None:
... gamma = 1.0 / len(x)
... sum = 0.0
... for i in range(len(x)):
... sum += (x[i] - y[i]) ** 2
... return math.exp(-gamma * sum)
>>> model = KernelRidge(kernel=custom_rbf_kernel,
... kernel_params={"gamma": 2.0}).fit(X, y)
>>> pred = model.predict(X)
"""
dual_coef_ = CumlArrayDescriptor()
def __init__(
self,
*,
alpha=1,
kernel="linear",
gamma=None,
degree=3,
coef0=1,
kernel_params=None,
output_type=None,
handle=None,
verbose=False
):
super().__init__(handle=handle, verbose=verbose,
output_type=output_type)
self.alpha = cp.asarray(alpha)
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def get_param_names(self):
return super().get_param_names() + [
"alpha",
"kernel",
"gamma",
"degree",
"coef0",
"kernel_params",
]
def _get_kernel(self, X, Y=None):
if isinstance(self.kernel, str):
params = {"gamma": self.gamma,
"degree": self.degree, "coef0": self.coef0}
else:
params = self.kernel_params or {}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@generate_docstring()
def fit(self, X, y, sample_weight=None,
convert_dtype=True) -> "KernelRidge":
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
X_m, n_rows, self.n_cols, self.dtype = input_to_cuml_array(
X, check_dtype=[np.float32, np.float64]
)
y_m, _, _, _ = input_to_cuml_array(
y,
check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype else None),
check_rows=n_rows,
)
if self.n_cols < 1:
msg = "X matrix must have at least a column"
raise TypeError(msg)
K = self._get_kernel(X_m)
self.dual_coef_ = _solve_cholesky_kernel(
K, cp.asarray(y_m), self.alpha, sample_weight
)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X_m
return self
def predict(self, X):
"""
Predict using the kernel ridge model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples. If kernel == "precomputed" this is instead a
precomputed kernel matrix, shape = [n_samples,
n_samples_fitted], where n_samples_fitted is the number of
samples used in the fitting for this estimator.
Returns
-------
C : array of shape (n_samples,) or (n_samples, n_targets)
Returns predicted values.
"""
X_m, _, _, _ = input_to_cuml_array(
X, check_dtype=[np.float32, np.float64])
K = self._get_kernel(X_m, self.X_fit_)
return cp.dot(cp.asarray(K), cp.asarray(self.dual_coef_))
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/kernel_ridge/__init__.py | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.kernel_ridge.kernel_ridge import KernelRidge
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/comm/serialize.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml
import cudf.comm.serialize # noqa: F401
try:
from distributed.protocol import dask_deserialize, dask_serialize
from distributed.protocol.serialize import pickle_dumps, pickle_loads
from distributed.protocol.cuda import cuda_deserialize, cuda_serialize
from distributed.protocol import register_generic
from cuml.internals.array_sparse import SparseCumlArray
from cuml.ensemble import RandomForestRegressor
from cuml.ensemble import RandomForestClassifier
from cuml.naive_bayes import MultinomialNB
# Registering RF Regressor and Classifier to use pickling even when
# Base is serialized with Dask or CUDA serializations
@dask_serialize.register(RandomForestRegressor)
@cuda_serialize.register(RandomForestRegressor)
def rfr_serialize(rf):
return pickle_dumps(rf)
@dask_deserialize.register(RandomForestRegressor)
@cuda_deserialize.register(RandomForestRegressor)
def rfr_deserialize(header, frames):
return pickle_loads(header, frames)
@dask_serialize.register(RandomForestClassifier)
@cuda_serialize.register(RandomForestClassifier)
def rfc_serialize(rf):
return pickle_dumps(rf)
@dask_deserialize.register(RandomForestClassifier)
@cuda_deserialize.register(RandomForestClassifier)
def rfc_deserialize(header, frames):
return pickle_loads(header, frames)
register_generic(SparseCumlArray, "cuda", cuda_serialize, cuda_deserialize)
register_generic(SparseCumlArray, "dask", dask_serialize, dask_deserialize)
register_generic(cuml.Base, "cuda", cuda_serialize, cuda_deserialize)
register_generic(cuml.Base, "dask", dask_serialize, dask_deserialize)
register_generic(MultinomialNB, "cuda", cuda_serialize, cuda_deserialize)
register_generic(MultinomialNB, "dask", dask_serialize, dask_deserialize)
except ImportError:
# distributed is probably not installed on the system
pass
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/thirdparty_adapters/adapters.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cupyx.scipy import sparse as gpu_sparse
from scipy import sparse as cpu_sparse
from scipy.sparse import csc_matrix as cpu_coo_matrix
from scipy.sparse import csc_matrix as cpu_csc_matrix
from cuml.internals.safe_imports import cpu_only_import_from
from cupyx.scipy.sparse import csc_matrix as gpu_coo_matrix
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.global_settings import GlobalSettings
from cuml.internals.input_utils import input_to_cupy_array, input_to_host_array
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
gpu_csr_matrix = gpu_only_import_from("cupyx.scipy.sparse", "csr_matrix")
gpu_csc_matrix = gpu_only_import_from("cupyx.scipy.sparse", "csc_matrix")
cpu_csr_matrix = cpu_only_import_from("scipy.sparse", "csr_matrix")
pdDataFrame = cpu_only_import_from("pandas", "DataFrame")
cuDataFrame = gpu_only_import_from("cudf", "DataFrame")
numeric_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.intp,
np.uintp,
np.float32,
np.float64,
np.complex64,
np.complex128,
]
def check_sparse(array, accept_sparse=False, accept_large_sparse=True):
"""Checks that the sparse array is valid
Parameters
----------
accept_sparse : string, boolean or list/tuple of strings (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool (default=True)
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
Returns
-------
None or raise error
"""
if accept_sparse is True:
return
err_msg = (
"This algorithm does not support the sparse "
+ "input in the current configuration."
)
is_sparse = cpu_sparse.issparse(array) or gpu_sparse.issparse(array)
if is_sparse:
if accept_sparse is False:
raise ValueError(err_msg)
if not accept_large_sparse:
if (
array.indices.dtype != cp.int32
or array.indptr.dtype != cp.int32
):
raise ValueError(err_msg)
if isinstance(accept_sparse, (tuple, list)):
if array.format not in accept_sparse:
raise ValueError(err_msg)
elif array.format != accept_sparse:
raise ValueError(err_msg)
def check_dtype(array, dtypes="numeric"):
"""Checks that the input dtype is part of acceptable dtypes
Parameters
----------
array : object
Input object to check / convert.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
Returns
-------
dtype or raise error
"""
if dtypes is None:
if not isinstance(array, cuDataFrame):
return array.dtype
else:
return array.dtypes.tolist()[0]
if dtypes == "numeric":
dtypes = numeric_types
if isinstance(dtypes, (list, tuple)):
# fp16 is not supported, so remove from the list of dtypes if present
dtypes = [d for d in dtypes if d != np.float16]
if not isinstance(array, (pdDataFrame, cuDataFrame)):
if array.dtype not in dtypes:
return dtypes[0]
elif any([dt not in dtypes for dt in array.dtypes.tolist()]):
return dtypes[0]
if not isinstance(array, (pdDataFrame, cuDataFrame)):
return array.dtype
else:
return array.dtypes.tolist()[0]
elif dtypes == np.float16:
raise NotImplementedError("Float16 not supported by cuML")
else:
# Single dtype to convert to
return dtypes
def check_finite(array, force_all_finite=True):
"""Checks that the input is finite if necessary
Parameters
----------
array : object
Input object to check / convert.
force_all_finite : boolean or 'allow-nan', (default=True)
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
``force_all_finite`` accepts the string ``'allow-nan'``.
Returns
-------
None or raise error
"""
if force_all_finite is True:
if not cp.all(cp.isfinite(array)):
raise ValueError("Non-finite value encountered in array")
elif force_all_finite == "allow-nan":
if cp.any(cp.isinf(array)):
raise ValueError("Non-finite value encountered in array")
def check_array(
array,
accept_sparse=False,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
warn_on_dtype=None,
estimator=None,
):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, boolean or list/tuple of strings (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool (default=True)
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean or 'allow-nan', (default=True)
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
``force_all_finite`` accepts the string ``'allow-nan'``.
ensure_2d : boolean (default=True)
Whether to raise a value error if array is not 2D.
allow_nd : boolean (default=False)
Whether to allow array.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : unused parameter
Returns
-------
array_converted : object
The converted and validated array.
"""
if dtype == "numeric":
dtype = numeric_types
correct_dtype = check_dtype(array, dtype)
if (
not isinstance(array, (pdDataFrame, cuDataFrame))
and copy
and not order
and hasattr(array, "flags")
):
if array.flags["F_CONTIGUOUS"]:
order = "F"
elif array.flags["C_CONTIGUOUS"]:
order = "C"
if not order:
order = "F"
hasshape = hasattr(array, "shape")
if ensure_2d and hasshape:
if len(array.shape) != 2:
raise ValueError("Not 2D")
if not allow_nd and hasshape:
if len(array.shape) > 2:
raise ValueError("More than 2 dimensions detected")
if ensure_min_samples > 0 and hasshape:
if array.shape[0] < ensure_min_samples:
raise ValueError("Not enough samples")
if ensure_min_features > 0 and hasshape and len(array.shape) == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError(
"Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, array.shape, ensure_min_features)
)
is_sparse = cpu_sparse.issparse(array) or gpu_sparse.issparse(array)
if is_sparse:
check_sparse(array, accept_sparse, accept_large_sparse)
if array.format == "csr":
if GlobalSettings().memory_type.is_device_accessible:
new_array = gpu_csr_matrix(array, copy=copy)
else:
new_array = cpu_csr_matrix(array, copy=copy)
elif array.format == "csc":
if GlobalSettings().memory_type.is_device_accessible:
new_array = gpu_csc_matrix(array, copy=copy)
else:
new_array = cpu_csc_matrix(array, copy=copy)
elif array.format == "coo":
if GlobalSettings().memory_type.is_device_accessible:
new_array = gpu_coo_matrix(array, copy=copy)
else:
new_array = cpu_coo_matrix(array, copy=copy)
else:
raise ValueError("Sparse matrix format not supported")
check_finite(new_array.data, force_all_finite)
if correct_dtype != new_array.dtype:
new_array = new_array.astype(correct_dtype)
return new_array
else:
if GlobalSettings().memory_type.is_device_accessible:
X, n_rows, n_cols, dtype = input_to_cupy_array(
array, order=order, deepcopy=copy, fail_on_null=False
)
else:
X, n_rows, n_cols, dtype = input_to_host_array(
array, order=order, deepcopy=copy, fail_on_null=False
)
if correct_dtype != dtype:
X = X.astype(correct_dtype)
check_finite(X, force_all_finite)
return X
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if value_to_mask == "NaN" or cp.isnan(value_to_mask):
return cp.isnan(X)
else:
return X == value_to_mask
def _masked_column_median(arr, masked_value):
"""Compute the median of each column in the 2D array arr, ignoring any
instances of masked_value"""
mask = _get_mask(arr, masked_value)
if arr.size == 0:
return cp.full(arr.shape[1], cp.nan)
if not cp.isnan(masked_value):
arr_sorted = arr.copy()
# If nan is not the missing value, any column with nans should
# have a median of nan
nan_cols = cp.any(cp.isnan(arr), axis=0)
arr_sorted[mask] = cp.nan
arr_sorted.sort(axis=0)
else:
nan_cols = cp.full(arr.shape[1], False)
# nans are always sorted to end of array and the sort call
# copies the data
arr_sorted = cp.sort(arr, axis=0)
count_missing_values = mask.sum(axis=0)
# Ignore missing values in determining "halfway" index of sorted
# array
n_elems = arr.shape[0] - count_missing_values
# If no elements remain after removing missing value, median for
# that column is nan
nan_cols = cp.logical_or(nan_cols, n_elems <= 0)
col_index = cp.arange(arr_sorted.shape[1])
median = (
arr_sorted[cp.floor_divide(n_elems - 1, 2), col_index]
+ arr_sorted[cp.floor_divide(n_elems, 2), col_index]
) / 2
median[nan_cols] = cp.nan
return median
def _masked_column_mean(arr, masked_value):
"""Compute the mean of each column in the 2D array arr, ignoring any
instances of masked_value"""
mask = _get_mask(arr, masked_value)
count_missing_values = mask.sum(axis=0)
n_elems = arr.shape[0] - count_missing_values
mean = cp.nansum(arr, axis=0)
if not cp.isnan(masked_value):
mean -= count_missing_values * masked_value
mean /= n_elems
return mean
def _masked_column_mode(arr, masked_value):
"""Determine the most frequently appearing element in each column in the 2D
array arr, ignoring any instances of masked_value"""
mask = _get_mask(arr, masked_value)
n_features = arr.shape[1]
most_frequent = np.empty(n_features, dtype=arr.dtype)
for i in range(n_features):
feature_mask_idxs = cp.where(~mask[:, i])[0]
values, counts = cp.unique(
arr[feature_mask_idxs, i], return_counts=True
)
count_max = counts.max()
if count_max > 0:
value = values[counts == count_max].min()
else:
value = cp.nan
most_frequent[i] = value
return cp.array(most_frequent)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/thirdparty_adapters/sparsefuncs_fast.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import ceil
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cpx = gpu_only_import("cupyx")
cuda = gpu_only_import_from("numba", "cuda")
def csr_mean_variance_axis0(X):
"""Compute mean and variance on the axis 0 of a CSR matrix
Parameters
----------
X : sparse CSR matrix
Input array
Returns
-------
mean and variance
"""
X = X.tocsc()
means, variances, _ = _csc_mean_variance_axis0(X)
return means, variances
def csc_mean_variance_axis0(X):
"""Compute mean and variance on the axis 0 of a CSC matrix
Parameters
----------
X : sparse CSC matrix
Input array
Returns
-------
mean and variance
"""
means, variances, _ = _csc_mean_variance_axis0(X)
return means, variances
def _csc_mean_variance_axis0(X):
"""Compute mean, variance and nans count on the axis 0 of a CSC matrix
Parameters
----------
X : sparse CSC matrix
Input array
Returns
-------
mean, variance, nans count
"""
n_samples, n_features = X.shape
means = cp.empty(n_features)
variances = cp.empty(n_features)
counts_nan = cp.empty(n_features)
start = X.indptr[0]
for i, end in enumerate(X.indptr[1:]):
col = X.data[start:end]
_count_zeros = n_samples - col.size
_count_nans = (col != col).sum()
_mean = cp.nansum(col) / (n_samples - _count_nans)
_variance = cp.nansum((col - _mean) ** 2)
_variance += _count_zeros * (_mean**2)
_variance /= n_samples - _count_nans
means[i] = _mean
variances[i] = _variance
counts_nan[i] = _count_nans
start = end
return means, variances, counts_nan
@cuda.jit
def norm_step2_k(indptr, data, norm):
"""Apply normalization
Parameters
----------
indptr : array
indptr of sparse matrix
data : array
data of sparse matrix
norm: array
norm by which to divide columns
"""
row_i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
inrow_idx = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
if row_i >= indptr.shape[0] - 1:
return
start = indptr[row_i]
end = indptr[row_i + 1]
if inrow_idx >= (end - start):
return
data[start + inrow_idx] /= norm[row_i]
@cuda.jit
def l1_step1_k(indptr, data, norm):
"""Compute norm for L1 normalization"""
row_i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
inrow_idx = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
if row_i >= indptr.shape[0] - 1:
return
start = indptr[row_i]
end = indptr[row_i + 1]
if inrow_idx >= (end - start):
return
val = abs(data[start + inrow_idx])
cuda.atomic.add(norm, row_i, val)
def inplace_csr_row_normalize_l1(X):
"""Normalize CSR matrix inplace with L1 norm
Parameters
----------
X : sparse CSR matrix
Input array
Returns
-------
Normalized matrix
"""
n_rows = X.indptr.shape[0]
max_nnz = cp.diff(X.indptr).max()
tpb = (32, 32)
bpg_x = ceil(n_rows / tpb[0])
bpg_y = ceil(max_nnz / tpb[1])
bpg = (bpg_x, bpg_y)
norm = cp.zeros(n_rows - 1, dtype=X.dtype)
l1_step1_k[bpg, tpb](X.indptr, X.data, norm)
norm_step2_k[bpg, tpb](X.indptr, X.data, norm)
@cuda.jit
def l2_step1_k(indptr, data, norm):
"""Compute norm for L2 normalization"""
row_i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
inrow_idx = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
if row_i >= indptr.shape[0] - 1:
return
start = indptr[row_i]
end = indptr[row_i + 1]
if inrow_idx >= (end - start):
return
val = data[start + inrow_idx]
val *= val
cuda.atomic.add(norm, row_i, val)
def inplace_csr_row_normalize_l2(X):
"""Normalize CSR matrix inplace with L2 norm
Parameters
----------
X : sparse CSR matrix
Input array
Returns
-------
Normalized matrix
"""
n_rows = X.indptr.shape[0]
max_nnz = cp.diff(X.indptr).max()
tpb = (32, 32)
bpg_x = ceil(n_rows / tpb[0])
bpg_y = ceil(max_nnz / tpb[1])
bpg = (bpg_x, bpg_y)
norm = cp.zeros(n_rows - 1, dtype=X.dtype)
l2_step1_k[bpg, tpb](X.indptr, X.data, norm)
norm = cp.sqrt(norm)
norm_step2_k[bpg, tpb](X.indptr, X.data, norm)
@cuda.jit(device=True, inline=True)
def _deg2_column(d, i, j, interaction_only):
"""Compute the index of the column for a degree 2 expansion
d is the dimensionality of the input data, i and j are the indices
for the columns involved in the expansion.
"""
if interaction_only:
return int(d * i - (i**2 + 3 * i) / 2 - 1 + j)
else:
return int(d * i - (i**2 + i) / 2 + j)
@cuda.jit(device=True, inline=True)
def _deg3_column(d, i, j, k, interaction_only):
"""Compute the index of the column for a degree 3 expansion
d is the dimensionality of the input data, i, j and k are the indices
for the columns involved in the expansion.
"""
if interaction_only:
return int(
(
3 * d**2 * i
- 3 * d * i**2
+ i**3
+ 11 * i
- 3 * j**2
- 9 * j
)
/ 6
+ i**2
- 2 * d * i
+ d * j
- d
+ k
)
else:
return int(
(3 * d**2 * i - 3 * d * i**2 + i**3 - i - 3 * j**2 - 3 * j)
/ 6
+ d * j
+ k
)
@cuda.jit
def perform_expansion(
indptr,
indices,
data,
expanded_data,
expanded_indices,
d,
interaction_only,
degree,
expanded_indptr,
):
"""Kernel applying polynomial expansion on CSR matrix"""
row_i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
inrow_idx = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y
if row_i >= indptr.shape[0] - 1:
return
expanded_index = expanded_indptr[row_i] + inrow_idx
if expanded_index >= expanded_indptr[row_i + 1]:
return
row_starts = indptr[row_i]
row_ends = indptr[row_i + 1]
i_ptr = row_starts
j_ptr = -1
k_ptr = inrow_idx
if degree == 2:
j_ptr = inrow_idx
for i in range(row_starts, row_ends):
diff = row_ends - i - interaction_only
if j_ptr >= diff:
j_ptr -= diff
else:
i_ptr = i
break
j_ptr += i_ptr + interaction_only
else:
# degree == 3
diff = 0
for i in range(row_starts, row_ends):
for j in range(i + interaction_only, row_ends):
diff = row_ends - j - interaction_only
if k_ptr >= diff:
k_ptr -= diff
else:
j_ptr = j
i_ptr = i
break
if j_ptr != -1:
break
k_ptr += j_ptr + interaction_only
i = indices[i_ptr]
j = indices[j_ptr]
if degree == 2:
col = _deg2_column(d, i, j, interaction_only)
expanded_indices[expanded_index] = col
expanded_data[expanded_index] = data[i_ptr] * data[j_ptr]
else:
# degree == 3
k = indices[k_ptr]
col = _deg3_column(d, i, j, k, interaction_only)
expanded_indices[expanded_index] = col
expanded_data[expanded_index] = data[i_ptr] * data[j_ptr] * data[k_ptr]
def csr_polynomial_expansion(X, interaction_only, degree):
"""Apply polynomial expansion on CSR matrix
Parameters
----------
X : sparse CSR matrix
Input array
Returns
-------
New expansed matrix
"""
assert degree in (2, 3)
interaction_only = 1 if interaction_only else 0
d = X.shape[1]
if degree == 2:
expanded_dimensionality = int((d**2 + d) / 2 - interaction_only * d)
else:
expanded_dimensionality = int(
(d**3 + 3 * d**2 + 2 * d) / 6 - interaction_only * d**2
)
if expanded_dimensionality == 0:
return None
assert expanded_dimensionality > 0
nnz = cp.diff(X.indptr)
if degree == 2:
total_nnz = (nnz**2 + nnz) / 2 - interaction_only * nnz
else:
total_nnz = (
nnz**3 + 3 * nnz**2 + 2 * nnz
) / 6 - interaction_only * nnz**2
del nnz
nnz_cumsum = total_nnz.cumsum(dtype=cp.int64)
total_nnz_max = int(total_nnz.max())
total_nnz = int(total_nnz.sum())
num_rows = X.indptr.shape[0] - 1
expanded_data = cp.empty(shape=total_nnz, dtype=X.data.dtype)
expanded_indices = cp.empty(shape=total_nnz, dtype=X.indices.dtype)
expanded_indptr = cp.empty(shape=num_rows + 1, dtype=X.indptr.dtype)
expanded_indptr[0] = X.indptr[0]
expanded_indptr[1:] = nnz_cumsum
tpb = (32, 32)
bpg_x = ceil(X.indptr.shape[0] / tpb[0])
bpg_y = ceil(total_nnz_max / tpb[1])
bpg = (bpg_x, bpg_y)
perform_expansion[bpg, tpb](
X.indptr,
X.indices,
X.data,
expanded_data,
expanded_indices,
d,
interaction_only,
degree,
expanded_indptr,
)
return cpx.scipy.sparse.csr_matrix(
(expanded_data, expanded_indices, expanded_indptr),
shape=(num_rows, expanded_dimensionality),
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/thirdparty_adapters/__init__.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .adapters import (
check_array,
_get_mask,
_masked_column_median,
_masked_column_mean,
_masked_column_mode,
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/kernel_utils.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.internals.logger as logger
from uuid import uuid1
import functools
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
# Mapping of common PyData dtypes to their corresponding C-primitive
dtype_str_map = {
np.dtype("float32"): "float",
np.dtype("float64"): "double",
np.dtype("int32"): "int",
np.dtype("int64"): "long long int",
"float32": "float",
"float64": "double",
"int32": "int",
"int64": "long long int",
}
extern_prefix = r'extern "C" __global__'
def get_dtype_str(dtype):
if dtype not in dtype_str_map:
raise ValueError(f"{dtype} is not a valid type for this kernel.")
return dtype_str_map[dtype]
def get_dtype_strs(dtypes):
return list(map(get_dtype_str, dtypes))
@functools.lru_cache(maxsize=5000)
def cuda_kernel_factory(nvrtc_kernel_str, dtypes, kernel_name=None):
"""
A factory wrapper function to perform some of the boiler-plate involved in
making cuPy RawKernels type-agnostic.
Until a better method is created, either by RAPIDS or cuPy, this function
will perform a string search and replace of c-based datatype primitives
in ``nvrtc_kernel_str`` using a numerical placeholder (eg. {0}, {1}) for
the dtype in the corresponding index of tuple ``dtypes``.
Note that the extern, function scope, and function name should not be
included in the kernel string. These will be added by this function and
the function name will be made unique, based on the given dtypes.
Examples
--------
The following kernel string with dtypes = [float, double, int]
({0} *a, {1} *b, {2} *c) {}
Will become
(float *a, double *b, int *c) {}
Parameters
----------
nvrtc_kernel_str : string valid nvrtc kernel string without extern, scope,
or function name.
dtypes : tuple of dtypes to search and replace.
kernel_name : string prefix and function name to use. Note that when
this not set (or is set to None), a UUID will
be used, which will stop this function from
being memoized.
Returns
-------
kernel_name : string unique function name created for kernel,
raw_kernel : cupy.RawKernel object ready for use
"""
dtype_strs = get_dtype_strs(dtypes)
for idx, dtype in enumerate(dtypes):
nvrtc_kernel_str = nvrtc_kernel_str.replace(
"{%d}" % idx, dtype_strs[idx]
)
kernel_name = f"""{uuid1()
if kernel_name is None
else kernel_name}_{
"".join(dtype_strs).replace(" ", "_")
}"""
nvrtc_kernel_str = "%s\nvoid %s%s" % (
extern_prefix,
kernel_name,
nvrtc_kernel_str,
)
if logger.should_log_for(logger.level_debug):
logger.debug(str(nvrtc_kernel_str))
return cp.RawKernel(nvrtc_kernel_str, kernel_name)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/sparse_utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.import_utils import has_scipy
from cuml.internals.safe_imports import gpu_only_import
cupyx = gpu_only_import("cupyx")
if has_scipy():
import scipy.sparse
def is_sparse(X):
"""
Return true if X is sparse, false otherwise.
Parameters
----------
X : array-like, sparse-matrix
Returns
-------
is_sparse : boolean
is the input sparse?
"""
is_scipy_sparse = has_scipy() and scipy.sparse.isspmatrix(X)
return cupyx.scipy.sparse.isspmatrix(X) or is_scipy_sparse
def is_dense(X):
return not is_sparse(X)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/pointer_utils.pyx | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from libc.stdint cimport uintptr_t
cdef extern from "ml_cuda_utils.h" namespace "ML":
cdef int get_device(void *ptr) except +
def device_of_gpu_matrix(g):
cdef uintptr_t cptr = g.device_ctypes_pointer.value
return get_device(<void*> cptr)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("cuda.pyx")
add_module_gpu_default("handle.pyx")
add_module_gpu_default("pointer_utils.pyx")
if(NOT SINGLEGPU)
list(APPEND cython_sources
opg_data_utils_mg.pyx
)
endif()
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_mg_libraries}"
MODULE_PREFIX common_
ASSOCIATED_TARGETS cuml
)
if(${CUML_UNIVERSAL})
# todo: ml_cuda_utils.h should be in the include folder of cuML or the functionality
# moved to another file, pointer_utils.pyx needs it
# https://github.com/rapidsai/cuml/issues/4841
target_include_directories(common_pointer_utils PRIVATE "../../../cpp/src/")
endif()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/handle.pyx | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from pylibraft.common.handle import Handle as raftHandle
Handle = raftHandle
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/opg_data_utils_mg.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.common.opg_data_utils_mg cimport *
from libc.stdlib cimport malloc, free
from libc.stdint cimport uintptr_t
from cuml.common import input_to_cuml_array
from cython.operator cimport dereference as deref
from cuml.internals.array import CumlArray
def build_data_t(arys):
"""
Function to create a floatData_t** or doubleData_t** from a list of
cumlArrays
Parameters
----------
arys: list of cumlArrays of the same dtype, np.float32 or np.float64
Returns
-------
ptr: vector pointer of either a floatData_t* or doubleData_t*,
depending on dtype of input
"""
cdef vector[floatData_t *] *data_f32 = new vector[floatData_t *]()
cdef vector[doubleData_t *] *data_f64 = new vector[doubleData_t *]()
cdef uintptr_t ary_ptr
cdef floatData_t *data_f
cdef doubleData_t *data_d
cdef uintptr_t data_ptr
if arys[0].dtype == np.float32:
for idx in range(len(arys)):
data_f = <floatData_t*> malloc(sizeof(floatData_t))
ary_ptr = arys[idx].ptr
data_f.ptr = <float*> ary_ptr
data_f.totalSize = len(arys[idx])
data_f32.push_back(data_f)
data_ptr = <uintptr_t> data_f32
return data_ptr
elif arys[0].dtype == np.float64:
for idx in range(len(arys)):
data_d = <doubleData_t*> malloc(sizeof(doubleData_t))
ary_ptr = arys[idx].ptr
data_d.ptr = <double*> ary_ptr
data_d.totalSize = len(arys[idx])
data_f64.push_back(data_d)
data_ptr = <uintptr_t> data_f64
return data_ptr
else:
raise TypeError('build_data_t: Arrays passed must be np.float32 or \
np.float64')
def free_data_t(data_t, dtype):
"""
Function to free a vector of floatData_t* or doubleData_t*
Parameters
----------
data_t: a vector of floatData_t* or doubleData_t*
dtype: np.float32 or np.float64 indicating whether data_t is a
floatData_t* or doubleData_t*
"""
cdef uintptr_t data_ptr = data_t
cdef vector[floatData_t*] *d32
cdef vector[doubleData_t*] *d64
if dtype == np.float32:
d32 = <vector[floatData_t*]*> data_ptr
for x_i in range(d32.size()):
free(d32.at(x_i))
free(d32)
else:
d64 = <vector[doubleData_t*]*> data_ptr
for x_i in range(d64.size()):
free(d64.at(x_i))
free(d64)
def build_rank_size_pair(parts_to_sizes, rank):
"""
Function to build a vector<rankSizePair*> mapping the rank to the
sizes of partitions
Parameters
----------
parts_to_sizes: array of tuples in the format: [(rank,size)]
rank: rank to be mapped
Returns
--------
ptr: vector pointer of the RankSizePair*
"""
cdef vector[RankSizePair*] *rsp_vec = new vector[RankSizePair*]()
for idx, rankToSize in enumerate(parts_to_sizes):
rank, size = rankToSize
rsp = <RankSizePair*> malloc(sizeof(RankSizePair))
rsp.rank = <int>rank
rsp.size = <size_t>size
rsp_vec.push_back(rsp)
cdef uintptr_t rsp_ptr = <uintptr_t> rsp_vec
return rsp_ptr
def free_rank_size_pair(rank_size_t):
"""
Function to free a vector of rankSizePair*
Parameters
----------
rank_size_t: vector of rankSizePair* to be freed.
"""
cdef uintptr_t rank_size_ptr = rank_size_t
cdef vector[RankSizePair *] *rsp_vec \
= <vector[RankSizePair *]*> rank_size_ptr
for x_i in range(rsp_vec.size()):
free(rsp_vec.at(x_i))
free(rsp_vec)
def build_part_descriptor(m, n, rank_size_t, rank):
"""
Function to build a shared PartDescriptor object
Parameters
----------
m: total number of rows across all workers
n: number of cols
rank_size_t: vector of rankSizePair * to be used for
building the part descriptor
rank: rank to be mapped
Returns
--------
ptr: PartDescriptor object
"""
cdef uintptr_t rank_size_ptr = rank_size_t
cdef vector[RankSizePair *] *rsp_vec \
= <vector[RankSizePair *]*> rank_size_ptr
cdef PartDescriptor *descriptor \
= new PartDescriptor(<size_t>m,
<size_t>n,
<vector[RankSizePair*]>deref(rsp_vec),
<int>rank)
cdef uintptr_t desc_ptr = <uintptr_t>descriptor
return desc_ptr
def free_part_descriptor(descriptor_ptr):
"""
Function to free a PartDescriptor*
Parameters
----------
descriptor_ptr: PartDescriptor* to be freed
"""
cdef PartDescriptor *desc_c \
= <PartDescriptor*><size_t>descriptor_ptr
free(desc_c)
def build_pred_or_trans_arys(arys, order, dtype):
output_arys = []
for i in range(len(arys)):
out = CumlArray.zeros(arys[i].shape,
order=order,
dtype=dtype)
output_arys.append(out)
return output_arys
def _build_part_inputs(cuda_arr_ifaces,
parts_to_ranks,
m, n, local_rank,
convert_dtype):
cuml_arr_ifaces = []
for arr in cuda_arr_ifaces:
X_m, _, _, _ = \
input_to_cuml_array(arr, order="F",
convert_to_dtype=(np.float32
if convert_dtype
else None),
check_dtype=[np.float32])
cuml_arr_ifaces.append(X_m)
cdef vector[floatData_t*] *local_parts = new vector[floatData_t*]()
for arr in cuml_arr_ifaces:
data = <floatData_t*>malloc(sizeof(floatData_t))
data.ptr = <float*><uintptr_t>arr.ptr
data.totalSize = <size_t>arr.shape[0]*arr.shape[1]*sizeof(float)
local_parts.push_back(data)
cdef vector[RankSizePair*] partsToRanks
for idx, rankToSize in enumerate(parts_to_ranks):
rank, size = rankToSize
rsp = <RankSizePair*>malloc(sizeof(RankSizePair))
rsp.rank = <int>rank
rsp.size = <size_t>size
partsToRanks.push_back(rsp)
cdef PartDescriptor *descriptor = \
new PartDescriptor(<size_t>m,
<size_t>n,
<vector[RankSizePair*]>partsToRanks,
<int>local_rank)
return cuml_arr_ifaces, <uintptr_t>local_parts, <uintptr_t>descriptor
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/sparsefuncs.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import cuml
from cuml.internals.input_utils import input_to_cuml_array, input_to_cupy_array
from cuml.internals.memory_utils import with_cupy_rmm
from cuml.internals.import_utils import has_scipy
from cuml.common.kernel_utils import cuda_kernel_factory
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import gpu_only_import_from
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
cp_csr_matrix = gpu_only_import_from("cupyx.scipy.sparse", "csr_matrix")
cp_coo_matrix = gpu_only_import_from("cupyx.scipy.sparse", "coo_matrix")
cp_csc_matrix = gpu_only_import_from("cupyx.scipy.sparse", "csc_matrix")
if has_scipy():
from scipy.sparse import csr_matrix, coo_matrix, csc_matrix
else:
from cuml.common.import_utils import DummyClass
csr_matrix = DummyClass
coo_matrix = DummyClass
csc_matrix = DummyClass
def _map_l1_norm_kernel(dtype):
"""Creates cupy RawKernel for csr_raw_normalize_l1 function."""
map_kernel_str = r"""
({0} *data, {1} *indices, {2} *indptr, int n_samples) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= n_samples) return;
{0} sum = 0.0;
for(int i = indptr[tid]; i < indptr[tid+1]; i++) {
sum += fabs(data[i]);
}
if(sum == 0) return;
for(int i = indptr[tid]; i < indptr[tid+1]; i++) {
data[i] /= sum;
}
}
"""
return cuda_kernel_factory(map_kernel_str, dtype, "map_l1_norm_kernel")
def _map_l2_norm_kernel(dtype):
"""Creates cupy RawKernel for csr_raw_normalize_l2 function."""
map_kernel_str = r"""
({0} *data, {1} *indices, {2} *indptr, int n_samples) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= n_samples) return;
{0} sum = 0.0;
for(int i = indptr[tid]; i < indptr[tid+1]; i++) {
sum += (data[i] * data[i]);
}
if(sum == 0) return;
sum = sqrt(sum);
for(int i = indptr[tid]; i < indptr[tid+1]; i++) {
data[i] /= sum;
}
}
"""
return cuda_kernel_factory(map_kernel_str, dtype, "map_l2_norm_kernel")
@cuml.internals.api_return_any()
def csr_row_normalize_l1(X, inplace=True):
"""Row normalize for csr matrix using the l1 norm"""
if not inplace:
X = X.copy()
kernel = _map_l1_norm_kernel((X.dtype, X.indices.dtype, X.indptr.dtype))
kernel(
(math.ceil(X.shape[0] / 32),),
(32,),
(X.data, X.indices, X.indptr, X.shape[0]),
)
return X
@cuml.internals.api_return_any()
def csr_row_normalize_l2(X, inplace=True):
"""Row normalize for csr matrix using the l2 norm"""
if not inplace:
X = X.copy()
kernel = _map_l2_norm_kernel((X.dtype, X.indices.dtype, X.indptr.dtype))
kernel(
(math.ceil(X.shape[0] / 32),),
(32,),
(X.data, X.indices, X.indptr, X.shape[0]),
)
return X
@cuml.internals.api_return_any()
def csr_diag_mul(X, y, inplace=True):
"""Multiply a sparse X matrix with diagonal matrix y"""
if not inplace:
X = X.copy()
# grab underlying dense ar from y
y = y.data[0]
X.data *= y[X.indices]
return X
@cuml.internals.api_return_any()
def create_csr_matrix_from_count_df(
count_df, empty_doc_ids, n_doc, n_features, dtype=np.float32
):
"""
Create a sparse matrix from the count of tokens by document
Parameters
----------
count_df = cudf.DataFrame({'count':..., 'doc_id':.., 'token':.. })
sorted by doc_id and token
empty_doc_ids = cupy array containing doc_ids with no tokens
n_doc: Total number of documents
n_features: Number of features
dtype: Output dtype
"""
data = count_df["count"].values
indices = count_df["token"].values
doc_token_counts = count_df["doc_id"].value_counts().reset_index()
del count_df
doc_token_counts = doc_token_counts.rename(
{"doc_id": "token_counts", "index": "doc_id"}, axis=1
).sort_values(by="doc_id")
token_counts = _insert_zeros(
doc_token_counts["token_counts"], empty_doc_ids
)
indptr = token_counts.cumsum()
indptr = cp.pad(indptr, (1, 0), "constant")
return cupyx.scipy.sparse.csr_matrix(
arg1=(data, indices, indptr), dtype=dtype, shape=(n_doc, n_features)
)
def _insert_zeros(ary, zero_indices):
"""
Create a new array of len(ary + zero_indices) where zero_indices
indicates indexes of 0s in the new array. Ary is used to fill the rest.
Examples
--------
_insert_zeros([1, 2, 3], [1, 3]) => [1, 0, 2, 0, 3]
"""
if len(zero_indices) == 0:
return ary.values
new_ary = cp.zeros((len(ary) + len(zero_indices)), dtype=cp.int32)
# getting mask of non-zeros
data_mask = ~cp.in1d(
cp.arange(0, len(new_ary), dtype=cp.int32), zero_indices
)
new_ary[data_mask] = ary
return new_ary
@with_cupy_rmm
def extract_knn_graph(knn_graph):
"""
Converts KNN graph from CSR, COO and CSC formats into separate
distance and indice arrays. Input can be a cupy sparse graph (device)
or a numpy sparse graph (host).
"""
if isinstance(knn_graph, (csc_matrix, cp_csc_matrix)):
knn_graph = cupyx.scipy.sparse.csr_matrix(knn_graph)
n_samples = knn_graph.shape[0]
reordering = knn_graph.data.reshape((n_samples, -1))
reordering = reordering.argsort()
n_neighbors = reordering.shape[1]
reordering += (cp.arange(n_samples) * n_neighbors)[:, np.newaxis]
reordering = reordering.flatten()
knn_graph.indices = knn_graph.indices[reordering]
knn_graph.data = knn_graph.data[reordering]
knn_indices = None
if isinstance(knn_graph, (csr_matrix, cp_csr_matrix)):
knn_indices = knn_graph.indices
elif isinstance(knn_graph, (coo_matrix, cp_coo_matrix)):
knn_indices = knn_graph.col
if knn_indices is not None:
knn_dists = knn_graph.data
return knn_indices, knn_dists
else:
return None
@with_cupy_rmm
def extract_pairwise_dists(pw_dists, n_neighbors):
"""
Extract the nearest neighbors distances and indices
from a pairwise distance matrix.
Parameters
----------
pw_dists: paiwise distances matrix of shape (n_samples, n_samples)
n_neighbors: number of nearest neighbors
(inspired from Scikit-Learn code)
"""
pw_dists, _, _, _ = input_to_cupy_array(pw_dists)
n_rows = pw_dists.shape[0]
sample_range = cp.arange(n_rows)[:, None]
knn_indices = cp.argpartition(pw_dists, n_neighbors - 1, axis=1)
knn_indices = knn_indices[:, :n_neighbors]
argdist = cp.argsort(pw_dists[sample_range, knn_indices])
knn_indices = knn_indices[sample_range, argdist]
knn_dists = pw_dists[sample_range, knn_indices]
return knn_indices, knn_dists
@with_cupy_rmm
def extract_knn_infos(knn_info, n_neighbors):
"""
Extract the nearest neighbors distances and indices
from the knn_info parameter.
Parameters
----------
knn_info : array / sparse array / tuple, optional (device or host)
Either one of :
- Tuple (indices, distances) of arrays of
shape (n_samples, n_neighbors)
- Pairwise distances dense array of shape (n_samples, n_samples)
- KNN graph sparse array (preferably CSR/COO)
n_neighbors: number of nearest neighbors
"""
if knn_info is None:
# no KNN was provided
return None
deepcopy = False
if isinstance(knn_info, tuple):
# dists and indices provided as a tuple
results = knn_info
else:
isaKNNGraph = isinstance(
knn_info,
(
csr_matrix,
coo_matrix,
csc_matrix,
cp_csr_matrix,
cp_coo_matrix,
cp_csc_matrix,
),
)
if isaKNNGraph:
# extract dists and indices from a KNN graph
deepcopy = True
results = extract_knn_graph(knn_info)
else:
# extract dists and indices from a pairwise distance matrix
results = extract_pairwise_dists(knn_info, n_neighbors)
if results is not None:
knn_indices, knn_dists = results
knn_indices_m, _, _, _ = input_to_cuml_array(
knn_indices.flatten(),
order="C",
deepcopy=deepcopy,
check_dtype=np.int64,
convert_to_dtype=np.int64,
)
knn_dists_m, _, _, _ = input_to_cuml_array(
knn_dists.flatten(),
order="C",
deepcopy=deepcopy,
check_dtype=np.float32,
convert_to_dtype=np.float32,
)
return knn_indices_m, knn_dists_m
else:
return None
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/exceptions.py | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/cuda.pyx | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
class CudaRuntimeError(RuntimeError):
def __init__(self, extraMsg=None):
cdef _Error e = cudaGetLastError()
cdef bytes errMsg = cudaGetErrorString(e)
cdef bytes errName = cudaGetErrorName(e)
msg = "Error! %s reason='%s'" % (errName.decode(), errMsg.decode())
if extraMsg is not None:
msg += " extraMsg='%s'" % extraMsg
super(CudaRuntimeError, self).__init__(msg)
cdef class Stream:
"""
Stream represents a thin-wrapper around cudaStream_t and its operations.
Examples
--------
>>> import cuml
>>> stream = cuml.cuda.Stream()
>>> stream.sync()
>>> del stream # optional!
"""
# NOTE:
# If we store _Stream directly, this always leads to the following error:
# "Cannot convert Python object to '_Stream'"
# I was unable to find a good solution to this in reasonable time. Also,
# since cudaStream_t is a pointer anyways, storing it as an integer should
# be just fine (although, that certainly is ugly and hacky!).
cdef size_t s
def __cinit__(self):
if self.s != 0:
return
cdef _Stream stream
cdef _Error e = cudaStreamCreate(&stream)
if e != 0:
raise CudaRuntimeError("Stream create")
self.s = <size_t>stream
def __dealloc__(self):
self.sync()
cdef _Stream stream = <_Stream>self.s
cdef _Error e = cudaStreamDestroy(stream)
if e != 0:
raise CudaRuntimeError("Stream destroy")
def sync(self):
"""
Synchronize on the cudastream owned by this object. Note that this
could raise exception due to issues with previous asynchronous
launches
"""
cdef _Stream stream = <_Stream>self.s
cdef _Error e = cudaStreamSynchronize(stream)
if e != 0:
raise CudaRuntimeError("Stream sync")
def getStream(self):
return self.s
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/rng_state.pxd | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
from libcpp cimport bool
from libc.stdint cimport uint64_t
cdef extern from "raft/random/rng_state.hpp" namespace \
"raft::random":
enum GeneratorType:
GenPhilox, GenPC
cdef struct RngState:
RngState(uint64_t seed) except +
uint64_t seed,
uint64_t base_subsequence,
GeneratorType type
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/opg_data_utils_mg.pxd | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Util functions, will be moved to their own file as the other methods are
# refactored
# todo: use cuda_array_interface instead of arr_interfaces for building this
from libc.stdint cimport int64_t
from libcpp.vector cimport vector
from cuml.common.cython_utils import *
from cython.operator cimport dereference as deref
from libcpp.vector cimport vector
cdef extern from "cumlprims/opg/matrix/data.hpp" \
namespace "MLCommon::Matrix":
cdef cppclass Data[T]:
Data(T *ptr, size_t totalSize)
cdef cppclass floatData_t:
floatData_t(float *ptr, size_t totalSize)
float *ptr
size_t totalSize
cdef cppclass doubleData_t:
doubleData_t(double *ptr, size_t totalSize)
double *ptr
size_t totalSize
ctypedef Data[int64_t] int64Data_t
ctypedef Data[int] intData_t
ctypedef vector[int*] int_ptr_vector
ctypedef vector[float*] float_ptr_vector
cdef extern from "cumlprims/opg/matrix/part_descriptor.hpp" \
namespace "MLCommon::Matrix":
cdef cppclass RankSizePair:
int rank
size_t size
cdef cppclass PartDescriptor:
PartDescriptor(size_t M,
size_t N,
vector[RankSizePair *] &partsToRanks,
int myrank)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/array_descriptor.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import dataclass, field
from cuml.internals.array import CumlArray
import cuml
from cuml.internals.input_utils import (
input_to_cuml_array,
determine_array_type,
)
@dataclass
class CumlArrayDescriptorMeta:
# The type for the input value. One of: _input_type_to_str
input_type: str
# Dict containing values in different formats. One entry per type. Both the
# input type and any cached converted types will be stored. Erased on set
values: dict = field(default_factory=dict)
def get_input_value(self):
assert (
self.input_type in self.values
), "Missing value for input_type {}".format(self.input_type)
return self.values[self.input_type]
def __getstate__(self):
# Need to only return the input_value from
return {
"input_type": self.input_type,
"input_value": self.get_input_value(),
}
def __setstate__(self, d):
self.input_type = d["input_type"]
self.values = {self.input_type: d["input_value"]}
class CumlArrayDescriptor:
"""
Python descriptor object to control getting/setting `CumlArray` attributes
on `Base` objects. See the Estimator Guide for an in depth guide.
"""
def __init__(self, order="K"):
# order corresponds to the order that the CumlArray attribute
# should be in to work with the C++ algorithms.
self.order = order
def __set_name__(self, owner, name):
self.name = name
setattr(owner, name + "_order", self.order)
def _get_meta(
self, instance, throw_on_missing=False
) -> CumlArrayDescriptorMeta:
if throw_on_missing:
if self.name not in instance.__dict__:
raise AttributeError()
return instance.__dict__.setdefault(
self.name, CumlArrayDescriptorMeta(input_type=None, values={})
)
def _to_output(self, instance, to_output_type, to_output_dtype=None):
existing = self._get_meta(instance, throw_on_missing=True)
# Handle input_type==None which means we have a non-array object stored
if existing.input_type is None:
# Dont save in the cache. Just return the value
return existing.values[existing.input_type]
# Return a cached value if it exists
if to_output_type in existing.values:
return existing.values[to_output_type]
# If the input type was anything but CumlArray, need to create one now
if "cuml" not in existing.values:
existing.values["cuml"] = input_to_cuml_array(
existing.get_input_value(), order="K"
).array
cuml_arr: CumlArray = existing.values["cuml"]
# Do the conversion
output = cuml_arr.to_output(
output_type=to_output_type, output_dtype=to_output_dtype
)
# Cache the value
existing.values[to_output_type] = output
return output
def __get__(self, instance, owner):
if instance is None:
return self
existing = self._get_meta(instance, throw_on_missing=True)
assert len(existing.values) > 0
# Get the global output type
output_type = cuml.global_settings.output_type
# First, determine if we need to call to_output at all
if output_type == "mirror":
# We must be internal, just return the input type
return existing.get_input_value()
else:
# We are external, determine the target output type
if output_type is None:
# Default to the owning base object output_type
output_type = instance.output_type
if output_type == "input":
# Default to the owning base object, _input_type
output_type = instance._input_type
return self._to_output(instance, output_type)
def __set__(self, instance, value):
existing = self._get_meta(instance)
# Determine the type
existing.input_type = determine_array_type(value)
# Clear any existing values
existing.values.clear()
# Set the existing value
existing.values[existing.input_type] = value
def __delete__(self, instance):
if instance is not None:
del instance.__dict__[self.name]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/device_selection.py | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.global_settings import GlobalSettings
from cuml.internals.device_type import DeviceType
def set_global_device_type(device_type):
GlobalSettings().device_type = DeviceType.from_str(device_type)
def get_global_device_type():
return GlobalSettings().device_type
class using_device_type:
def __init__(self, device_type):
self.device_type = device_type
self.prev_device_type = None
def __enter__(self):
self.prev_device_type = GlobalSettings().device_type
set_global_device_type(self.device_type)
def __exit__(self, *_):
set_global_device_type(self.prev_device_type)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/numba_utils.py | #
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DEPRECATED: to be removed once full migration to CumlArray is done
# remaining usages: blobs.pyx, regression.pyx
from numba.cuda.cudadrv.driver import driver
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from("numba", "cuda")
@cuda.jit
def gpu_zeros_1d(out):
i = cuda.grid(1)
if i < out.shape[0]:
out[i] = 0
@cuda.jit
def gpu_zeros_2d(out):
i, j = cuda.grid(2)
if i < out.shape[0] and j < out.shape[1]:
out[i][j] = 0
def zeros(size, dtype, order="F"):
"""
Return device array of zeros generated on device.
"""
out = cuda.device_array(size, dtype=dtype, order=order)
if isinstance(size, tuple):
tpb = driver.get_device().MAX_THREADS_PER_BLOCK
nrows = size[0]
bpg = (nrows + tpb - 1) // tpb
gpu_zeros_2d[bpg, tpb](out)
elif size > 0:
gpu_zeros_1d.forall(size)(out)
return out
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/cuda.pxd | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Populate this with more typedef's (eg: events) as and when needed
cdef extern from * nogil:
ctypedef void* _Stream "cudaStream_t"
ctypedef int _Error "cudaError_t"
# Populate this with more runtime api method declarations as and when needed
cdef extern from "cuda_runtime_api.h" nogil:
_Error cudaStreamCreate(_Stream* s)
_Error cudaStreamDestroy(_Stream s)
_Error cudaStreamSynchronize(_Stream s)
_Error cudaGetLastError()
const char* cudaGetErrorString(_Error e)
const char* cudaGetErrorName(_Error e)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/__init__.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from cuml.internals.array import CumlArray
# from cuml.internals.array_sparse import SparseCumlArray
from cuml.internals.available_devices import is_cuda_available
from cuml.internals.array import CumlArray
from cuml.internals.array_sparse import SparseCumlArray
# utils
from cuml.internals import logger
from cuml.internals.import_utils import has_cupy
from cuml.internals.import_utils import has_dask
from cuml.internals.import_utils import check_min_numba_version
from cuml.internals.import_utils import check_min_cupy_version, has_scipy
from cuml.internals.input_utils import input_to_cuml_array
from cuml.internals.input_utils import input_to_host_array
from cuml.internals.input_utils import input_to_host_array_with_sparse_support
from cuml.internals.memory_utils import rmm_cupy_ary
from cuml.internals.memory_utils import set_global_output_type
from cuml.internals.memory_utils import using_memory_type
from cuml.internals.memory_utils import using_output_type
from cuml.internals.memory_utils import with_cupy_rmm
from cuml.common.device_selection import using_device_type
if is_cuda_available():
from cuml.common.pointer_utils import device_of_gpu_matrix
# legacy to be removed after complete CumlAray migration
from cuml.internals.input_utils import sparse_scipy_to_cp
from cuml.common.timing_utils import timed
__all__ = [
"CumlArray",
"SparseCumlArray",
"device_of_gpu_matrix",
"has_cupy",
"has_dask",
"check_min_numba_version",
"check_min_cupy_version",
"has_scipy",
"input_to_cuml_array",
"input_to_host_array",
"input_to_host_array_with_sparse_support",
"rmm_cupy_ary",
"set_global_output_type",
"using_device_type",
"using_memory_type",
"using_output_type",
"with_cupy_rmm",
"sparse_scipy_to_cp",
"timed",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/timing_utils.py | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from contextlib import contextmanager
# Helper function for timing blocks of code.
@contextmanager
def timed(name):
"""
For timing blocks of code.
Examples
--------
>>> with timed("Print Call"):
... print("Hello, World") # doctest: +SKIP
Hello, World
..Print Call : 0.0005
"""
t0 = time.time()
yield
t1 = time.time()
print("..%-24s: %8.4f" % (name, t1 - t0))
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/common/doc_utils.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decorators to generate common docstrings in the codebase.
Dense datatypes are currently the default, if you're a developer that landed
here, the docstrings apply to every parameter to which the decorators
are applied. The docstrings are generated at import time.
There are 2 decorators:
- generate_docstring: Meant to be used by fit/predict/et.al methods that have
the typical signatures (i.e. fit(x,y) or predict(x)). It detects the
parameters and default values and generates the appropriate docstring,
with some configurability for shapes and formats.
- insert_into_docstring: More flexible but less automatic method, meant to be
used by functions that use our common dense or sparse datatypes, but have
many more custom parameters that are particular to the class(es) as opposed
to being common in the codebase. Allows to keep our documentation up to
date and correct with minimal changes by keeping our common datatypes
concentrated here. NearestNeigbors is a good example of this use case.
More data types can be added as we need them.
cuml.dask datatype version of the docstrings will come in a future update.
"""
from inspect import signature
import inspect
_parameters_docstrings = {
"dense": "{name} : array-like (device or host) shape = {shape}\n"
" Dense matrix containing floats or doubles.\n"
" Acceptable formats: CUDA array interface compliant objects like\n"
" CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas\n"
" DataFrame/Series.",
"dense_anydtype": "{name} : array-like (device or host) shape = {shape}\n"
" Dense matrix of any dtype.\n"
" Acceptable formats: CUDA array interface compliant objects like\n"
" CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas\n"
" DataFrame/Series.",
"dense_intdtype": "{name} : array-like (device or host) shape = {shape}\n"
" Dense matrix of type np.int32.\n"
" Acceptable formats: CUDA array interface compliant objects like\n"
" CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas\n"
" DataFrame/Series.",
"sparse": "{name} : sparse array-like (device) shape = {shape}\n"
" Dense matrix containing floats or doubles.\n"
" Acceptable formats: cupyx.scipy.sparse",
"dense_sparse": "{name} : array-like (device or host) shape = {shape}\n"
" Dense or sparse matrix containing floats or doubles.\n"
" Acceptable dense formats: CUDA array interface compliant objects like\n" # noqa
" CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas\n"
" DataFrame/Series.",
"convert_dtype_fit": "convert_dtype : bool, optional (default = {default})\n"
" When set to True, the train method will, when necessary, convert\n"
" y to be the same data type as X if they differ. This\n"
" will increase memory used for the method.",
"convert_dtype_other": "convert_dtype : bool, optional (default = {default})\n"
" When set to True, the {func_name} method will, when necessary,\n"
" convert the input to the data type which was used to train the\n"
" model. This will increase memory used for the method.",
"convert_dtype_single": "convert_dtype : bool, optional (default = {default})\n"
" When set to True, the method will automatically\n"
" convert the inputs to {dtype}.",
"sample_weight": "sample_weight : array-like (device or host) shape = (n_samples,), default={default}\n" # noqa
" The weights for each observation in X. If None, all observations\n"
" are assigned equal weight.\n"
" Acceptable dense formats: CUDA array interface compliant objects like\n" # noqa
" CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas\n"
" DataFrame/Series.", # noqa
"return_sparse": "return_sparse : bool, optional (default = {default})\n"
" Ignored when the model is not fit on a sparse matrix\n"
" If True, the method will convert the result to a\n"
" cupyx.scipy.sparse.csr_matrix object.\n"
" NOTE: Currently, there is a loss of information when converting\n"
" to csr matrix (cusolver bug). Default will be switched to True\n"
" once this is solved.",
"sparse_tol": "sparse_tol : float, optional (default = {default})\n"
" Ignored when return_sparse=False.\n"
" If True, values in the inverse transform below this parameter\n"
" are clipped to 0.",
None: "{name} : None\n"
" Ignored. This parameter exists for compatibility only.",
}
_parameter_possible_values = [
"name",
"type",
"shape",
"default",
"description",
"accepted",
]
_return_values_docstrings = {
"dense": "{name} : cuDF, CuPy or NumPy object depending on cuML's output type configuration, shape = {shape}\n" # noqa
" {description}\n\n For more information on how to configure cuML's output type,\n" # noqa
" refer to: `Output Data Type Configuration`_.", # noqa
"dense_sparse": "{name} : cuDF, CuPy or NumPy object depending on cuML's output type configuration, cupyx.scipy.sparse for sparse output, shape = {shape}\n" # noqa
" {description}\n\n For more information on how to configure cuML's dense output type,\n" # noqa
" refer to: `Output Data Type Configuration`_.", # noqa
"dense_datatype": "cuDF, CuPy or NumPy object depending on cuML's output type"
"configuration, shape ={shape}",
"dense_sparse_datatype": "cuDF, CuPy or NumPy object depending on cuML's output type"
"configuration, shape ={shape}",
"custom_type": "{name} : {type}\n" " {description}",
}
_return_values_possible_values = ["name", "type", "shape", "description"]
_simple_params = ["return_sparse", "sparse_tol", "sample_weight"]
def generate_docstring(
X="dense",
X_shape="(n_samples, n_features)",
y="dense",
y_shape="(n_samples, 1)",
convert_dtype_cast=False,
skip_parameters=[],
skip_parameters_heading=False,
prepend_parameters=True,
parameters=False,
return_values=False,
):
"""
Decorator to generate dostrings of common functions in the codebase.
It will auto detect what parameters and default values the function has.
Unfortunately due to using cython, we cannot (cheaply) do detection of
return values.
Currently auto detected variables include:
- X
- y
- convert_dtype
- sample_weights
- return_sparse
- sparse_tol
Typical usage scenarios:
Examples
--------
# for a function that passes all dense parameters, no need to specify
# anything, and the decorator auto detects the parameters and defaults
@generate_docstring()
def fit(self, X, y, convert_dtype=True):
# for a function that takes X as dense or sparse
@generate_docstring(X='dense_sparse')
def fit(self, X, y, sample_weight=None):
# to specify return values
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'})
Parameters
-----------
X : str (default = 'dense')
Data type of variable X. Currently accepted types are: dense,
dense_anydtype, dense_intdtype, sparse, dense_sparse
X_shape : str (default = '(n_samples, n_features)')
Shape of variable X
y : str (default = 'dense')
Data type of variable y. Currently accepted types are: dense,
dense_anydtype, dense_intdtype, sparse, dense_sparse
y_shape : str (default = '(n_samples, 1)')
Shape of variable y
convert_dtype_cast : Boolean or str (default = False)
If not false, use it to specify when convert_dtype is used to convert
to a single specific dtype (as opposed to converting the dtype of one
variable to the dtype of another for example). Example of this is how
NearestNeighbors and UMAP use convert_dtype to convert inputs to
np.float32.
skip_parameters : list of str (default = [])
Use if you want the decorator to skip generating a docstring entry
for a specific parameter
skip_parameters_heading : boolean (default = False)
Set to True to not generate the Parameters section heading
prepend_parameters : boolean (default = True)
Use when setting skip_parameters_heading to True, so that the
parameters inserted by the decorator are inserted before the
parameters you already have in your docstring.
return_values : dict or list of dicts (default = False)
Use to generate docstrings of return values. One dictionary per
return value, this is the format:
{'name': 'name_of_variable',
'type': 'data type of returned value',
'description': 'Description of variable',
'shape': 'shape of returned variable'}
If type is one of dense or dense_sparse then the type is generated
from the corresponding entry in _return_values_docstrings. Otherwise
the type is used as specified.
"""
def deco(func):
params = signature(func).parameters
if func.__doc__ is None:
func.__doc__ = ""
# Add parameter section header if needed, can be skipped
if (
"X" in params or "y" in params or parameters
) and not skip_parameters_heading:
func.__doc__ += "\nParameters\n----------\n"
# Check if we want to prepend the parameters
if skip_parameters_heading and prepend_parameters:
loc_pars = func.__doc__.find("----------") + 11
current_params_in_docstring = func.__doc__[loc_pars:]
func.__doc__ = func.__doc__[:loc_pars]
# Process each parameter
for par, value in params.items():
if par == "self":
pass
# X and y are the most common
elif par == "X" and par not in skip_parameters:
func.__doc__ += _parameters_docstrings[X].format(
name=par, shape=X_shape
)
elif par == "y" and par not in skip_parameters:
func.__doc__ += _parameters_docstrings[y].format(
name=par, shape=y_shape
)
# convert_dtype requires some magic to distinguish
# whether we use the fit version or the version
# for the other methods.
elif par == "convert_dtype" and par not in skip_parameters:
if not convert_dtype_cast:
if func.__name__ == "fit":
k = "convert_dtype_fit"
else:
k = "convert_dtype_other"
func.__doc__ += _parameters_docstrings[k].format(
default=params["convert_dtype"].default,
func_name=func.__name__,
)
else:
func.__doc__ += _parameters_docstrings[
"convert_dtype_single"
].format(
default=params["convert_dtype"].default,
dtype=convert_dtype_cast,
)
# All other parameters only take a default (for now).
else:
if par in _simple_params:
func.__doc__ += _parameters_docstrings[par].format(
default=params[par].default
)
func.__doc__ += "\n\n"
if skip_parameters_heading and prepend_parameters:
# indexing at 8 to match indentation of inserted parameters
# this can be replaced with indentation detection
# https://github.com/rapidsai/cuml/issues/2714
func.__doc__ += current_params_in_docstring[8:]
# Add return section header if needed, no option to skip currently.
if return_values:
func.__doc__ += "\nReturns\n-------\n"
# convenience call to allow users to pass a single return
# value as a dictionary instead of a list of dictionaries
rets = (
[return_values]
if not isinstance(return_values, list)
else return_values
)
# process each entry in the return_values
# auto naming of predicted variable names will be a
# future improvement
for ret in rets:
if ret["type"] in _return_values_docstrings:
key = ret["type"]
# non custom types don't take the type parameter
del ret["type"]
else:
key = "custom_type"
# ret is already a dictionary, we just use it for the named
# parameters
func.__doc__ += _return_values_docstrings[key].format(**ret)
func.__doc__ += "\n\n"
return func
return deco
def insert_into_docstring(parameters=False, return_values=False):
"""
Decorator to insert a single entry into an existing docstring. Use
standard {} format parameters in your docstring, and then use this
decorator to insert the standard type information for that variable.
Examples
--------
@insert_into_docstring(parameters=[('dense', '(n_samples, n_features)')],
return_values=[('dense', '(n_samples, n_features)'),
('dense',
'(n_samples, n_features)')])
def kneighbors(self, X=None, n_neighbors=None, return_distance=True,
convert_dtype=True):
\"""
Query the GPU index for the k nearest neighbors of column vectors in X.
Parameters
----------
X : {}
n_neighbors : Integer
Number of neighbors to search. If not provided, the n_neighbors
from the model instance is used (default=10)
return_distance: Boolean
If False, distances will not be returned
convert_dtype : bool, optional (default = True)
When set to True, the kneighbors method will automatically
convert the inputs to np.float32.
Returns
-------
distances : {}
The distances of the k-nearest neighbors for each column vector
in X
indices : {}
The indices of the k-nearest neighbors for each column vector in X
\"""
Parameters
----------
parameters : list of tuples
List of tuples, each tuple containing: (type, shape) for the type
and shape of each parameter to be inserted. Current accepted values
are `dense` and `dense_sparse`.
return_values : list of tuples
List of tuples, each tuple containing: (type, shape) for the type
and shape of each parameter to be inserted. Current accepted values
are `dense` and `dense_sparse`.
"""
def deco(func):
# List of parameters to use in `format` call of the docstring
to_add = []
# See if we need to add parameter data types
if parameters:
for par in parameters:
to_add.append(
_parameters_docstrings[par[0]][9:].format(shape=par[1])
)
# See if we need to add return value data types
if return_values:
for ret in return_values:
to_add.append(
_return_values_docstrings[ret[0] + "_datatype"].format(
shape=ret[1]
)
)
if len(to_add) > 0:
func.__doc__ = str(inspect.getdoc(func)).format(*to_add)
func.__doc__ += "\n\n"
return func
return deco
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/cluster/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("agglomerative.pyx" ${agglomerative_algo} ${cluster_algo})
add_module_gpu_default("dbscan.pyx" ${dbscan_algo} ${cluster_algo})
add_module_gpu_default("kmeans.pyx" ${kmeans_algo} ${cluster_algo})
if(NOT SINGLEGPU)
list(APPEND cython_sources
dbscan_mg.pyx
kmeans_mg.pyx
)
endif()
add_subdirectory(hdbscan)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_mg_libraries}"
MODULE_PREFIX cluster_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/cluster/dbscan_mg.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.cluster import DBSCAN
from cuml.common.doc_utils import generate_docstring
class DBSCANMG(DBSCAN):
"""
A Multi-Node Multi-GPU implementation of DBSCAN
NOTE: This implementation of DBSCAN is meant to be used with an
initialized cumlCommunicator instance inside an existing distributed
system. Refer to the Dask DBSCAN implementation in
`cuml.dask.cluster.dbscan`.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@generate_docstring(skip_parameters_heading=True)
def fit(self, X, out_dtype="int32", sample_weight=None) -> "DBSCANMG":
"""
Perform DBSCAN clustering in a multi-node multi-GPU setting.
Parameters
----------
out_dtype: dtype Determines the precision of the output labels array.
default: "int32". Valid values are { "int32", np.int32,
"int64", np.int64}.
"""
return self._fit(X, out_dtype, True, sample_weight)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/cluster/agglomerative.pyx | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from libc.stdint cimport uintptr_t
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.array import CumlArray
from cuml.internals.base import Base
from cuml.common.doc_utils import generate_docstring
from pylibraft.common.handle cimport handle_t
from cuml.common import input_to_cuml_array
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.mixins import ClusterMixin
from cuml.internals.mixins import CMajorInputTagMixin
from cuml.metrics.distance_type cimport DistanceType
cdef extern from "raft/sparse/hierarchy/common.h" namespace "raft::hierarchy":
cdef cppclass linkage_output_int:
int m
int n_clusters
int n_leaves
int n_connected_components
int *labels
int *children
cdef extern from "cuml/cluster/linkage.hpp" namespace "ML":
cdef void single_linkage_pairwise(
const handle_t &handle,
const float *X,
size_t m,
size_t n,
linkage_output_int *out,
DistanceType metric,
int n_clusters
) except +
cdef void single_linkage_neighbors(
const handle_t &handle,
const float *X,
size_t m,
size_t n,
linkage_output_int *out,
DistanceType metric,
int c,
int n_clusters
) except +
_metrics_mapping = {
'l1': DistanceType.L1,
'cityblock': DistanceType.L1,
'manhattan': DistanceType.L1,
'l2': DistanceType.L2SqrtExpanded,
'euclidean': DistanceType.L2SqrtExpanded,
'cosine': DistanceType.CosineExpanded
}
class AgglomerativeClustering(Base, ClusterMixin, CMajorInputTagMixin):
"""
Agglomerative Clustering
Recursively merges the pair of clusters that minimally increases a
given linkage distance.
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
n_clusters : int (default = 2)
The number of clusters to find.
affinity : str, default='euclidean'
Metric used to compute the linkage. Can be "euclidean", "l1",
"l2", "manhattan", or "cosine". If connectivity is "knn" only
"euclidean" is accepted.
linkage : {"single"}, default="single"
Which linkage criterion to use. The linkage criterion determines
which distance to use between sets of observations. The algorithm
will merge the pairs of clusters that minimize this criterion.
* 'single' uses the minimum of the distances between all
observations of the two sets.
n_neighbors : int (default = 15)
The number of neighbors to compute when connectivity = "knn"
connectivity : {"pairwise", "knn"}, (default = "knn")
The type of connectivity matrix to compute.
* 'pairwise' will compute the entire fully-connected graph of
pairwise distances between each set of points. This is the
fastest to compute and can be very fast for smaller datasets
but requires O(n^2) space.
* 'knn' will sparsify the fully-connected connectivity matrix to
save memory and enable much larger inputs. "n_neighbors" will
control the amount of memory used and the graph will be connected
automatically in the event "n_neighbors" was not large enough
to connect it.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
"""
labels_ = CumlArrayDescriptor()
children_ = CumlArrayDescriptor()
def __init__(self, *, n_clusters=2, affinity="euclidean", linkage="single",
handle=None, verbose=False, connectivity='knn',
n_neighbors=10, output_type=None):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
if linkage is not "single":
raise ValueError("Only single linkage clustering is "
"supported currently")
if connectivity not in ["knn", "pairwise"]:
raise ValueError("'connectivity' can only be one of "
"{'knn', 'pairwise'}")
if n_clusters <= 0:
raise ValueError("'n_clusters' must be >= 1")
if n_neighbors > 1023 or n_neighbors < 2:
raise ValueError("'n_neighbors' must be a positive number "
"between 2 and 1023")
if affinity not in _metrics_mapping:
raise ValueError("'affinity' %s is not supported." % affinity)
self.n_clusters = n_clusters
self.affinity = affinity
self.linkage = linkage
self.n_neighbors = n_neighbors
self.connectivity = connectivity
self.n_clusters_ = None
self.n_leaves_ = None
self.n_connected_components_ = None
self.distances_ = None
@generate_docstring(skip_parameters_heading=True)
def fit(self, X, y=None, convert_dtype=True) -> "AgglomerativeClustering":
"""
Fit the hierarchical clustering from features.
"""
X_m, n_rows, n_cols, self.dtype = \
input_to_cuml_array(X, order='C',
check_dtype=[np.float32],
convert_to_dtype=(np.float32
if convert_dtype
else None))
if self.n_clusters > n_rows:
raise ValueError("'n_clusters' must be <= n_samples")
cdef uintptr_t input_ptr = X_m.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
# Hardcode n_components_ to 1 for single linkage. This will
# not be the case for other linkage types.
self.n_connected_components_ = 1
self.n_leaves_ = n_rows
self.n_clusters_ = self.n_clusters
self.labels_ = CumlArray.empty(n_rows, dtype="int32")
self.children_ = CumlArray.empty((2, n_rows), dtype="int32")
cdef uintptr_t labels_ptr = self.labels_.ptr
cdef uintptr_t children_ptr = self.children_.ptr
cdef linkage_output_int linkage_output
linkage_output.children = <int*>children_ptr
linkage_output.labels = <int*>labels_ptr
cdef DistanceType metric
if self.affinity in _metrics_mapping:
metric = _metrics_mapping[self.affinity]
else:
raise ValueError("'affinity' %s not supported." % self.affinity)
if self.connectivity == 'knn':
single_linkage_neighbors(
handle_[0], <float*>input_ptr, <int> n_rows,
<int> n_cols, <linkage_output_int*> &linkage_output,
<DistanceType> metric, <int>self.n_neighbors,
<int> self.n_clusters)
elif self.connectivity == 'pairwise':
single_linkage_pairwise(
handle_[0], <float*>input_ptr, <int> n_rows,
<int> n_cols, <linkage_output_int*> &linkage_output,
<DistanceType> metric, <int> self.n_clusters)
else:
raise ValueError("'connectivity' can only be one of "
"{'knn', 'pairwise'}")
self.handle.sync()
return self
@generate_docstring(skip_parameters_heading=True,
return_values={'name': 'preds',
'type': 'dense',
'description': 'Cluster indexes',
'shape': '(n_samples, 1)'})
def fit_predict(self, X, y=None) -> CumlArray:
"""
Fit the hierarchical clustering from features and return
cluster labels.
"""
return self.fit(X).labels_
def get_param_names(self):
return super().get_param_names() + [
"n_clusters",
"affinity",
"linkage",
"connectivity",
"n_neighbors"
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/cluster/kmeans_mg.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from cython.operator cimport dereference as deref
from libc.stdint cimport uintptr_t, int64_t
from libc.stdlib cimport free
from cuml.internals.array import CumlArray
from pylibraft.common.handle cimport handle_t
from cuml.common import input_to_cuml_array
from cuml.cluster import KMeans
from cuml.cluster.kmeans_utils cimport *
cdef extern from "cuml/cluster/kmeans_mg.hpp" \
namespace "ML::kmeans::opg" nogil:
cdef void fit(handle_t& handle,
KMeansParams& params,
const float *X,
int n_samples,
int n_features,
const float *sample_weight,
float *centroids,
float &inertia,
int &n_iter) except +
cdef void fit(handle_t& handle,
KMeansParams& params,
const double *X,
int n_samples,
int n_features,
const double *sample_weight,
double *centroids,
double &inertia,
int &n_iter) except +
cdef void fit(handle_t& handle,
KMeansParams& params,
const float *X,
int64_t n_samples,
int64_t n_features,
const float *sample_weight,
float *centroids,
float &inertia,
int64_t &n_iter) except +
cdef void fit(handle_t& handle,
KMeansParams& params,
const double *X,
int64_t n_samples,
int64_t n_features,
const double *sample_weight,
double *centroids,
double &inertia,
int64_t &n_iter) except +
class KMeansMG(KMeans):
"""
A Multi-Node Multi-GPU implementation of KMeans
NOTE: This implementation of KMeans is meant to be used with an
initialized cumlCommunicator instance inside an existing distributed
system. Refer to the Dask KMeans implementation in
`cuml.dask.cluster.kmeans`.
"""
def __init__(self, **kwargs):
super(KMeansMG, self).__init__(**kwargs)
def fit(self, X, sample_weight=None) -> "KMeansMG":
"""
Compute k-means clustering with X in a multi-node multi-GPU setting.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Dense matrix (floats or doubles) of shape (n_samples, n_features).
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
sample_weight : array-like (device or host) shape = (n_samples,), default=None # noqa
The weights for each observation in X. If None, all observations
are assigned equal weight.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
"""
X_m, self.n_rows, self.n_cols, self.dtype = \
input_to_cuml_array(X, order='C')
cdef uintptr_t input_ptr = X_m.ptr
cdef int n_rows = self.n_rows
cdef int n_cols = self.n_cols
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if sample_weight is None:
sample_weight_m = CumlArray.ones(shape=n_rows, dtype=self.dtype)
else:
sample_weight_m, _, _, _ = \
input_to_cuml_array(sample_weight, order='C',
convert_to_dtype=self.dtype,
check_rows=n_rows)
cdef uintptr_t sample_weight_ptr = sample_weight_m.ptr
if (self.init in ['scalable-k-means++', 'k-means||', 'random']):
self.cluster_centers_ = CumlArray.zeros(shape=(self.n_clusters,
self.n_cols),
dtype=self.dtype,
order='C')
cdef uintptr_t cluster_centers_ptr = self.cluster_centers_.ptr
int_dtype = np.int32 if np.int64(n_rows) * np.int64(n_cols) < 2**31-1 else np.int64
print(str(n_rows * n_cols))
cdef float inertiaf = 0
cdef double inertiad = 0
cdef KMeansParams* params = \
<KMeansParams*><size_t>self._get_kmeans_params()
cdef int n_iter = 0
cdef int64_t n_iter64 = 0
if self.dtype == np.float32:
if int_dtype == np.int32:
with nogil:
fit(
handle_[0],
<KMeansParams> deref(params),
<const float*> input_ptr,
<int> n_rows,
<int> n_cols,
<const float *>sample_weight_ptr,
<float*> cluster_centers_ptr,
inertiaf,
n_iter)
self.n_iter_ = n_iter
else:
with nogil:
fit(
handle_[0],
<KMeansParams> deref(params),
<const float*> input_ptr,
<int64_t> n_rows,
<int64_t> n_cols,
<const float *>sample_weight_ptr,
<float*> cluster_centers_ptr,
inertiaf,
n_iter64)
self.n_iter_ = n_iter64
self.handle.sync()
self.inertia_ = inertiaf
elif self.dtype == np.float64:
if int_dtype == np.int32:
with nogil:
fit(
handle_[0],
<KMeansParams> deref(params),
<const double*> input_ptr,
<int> n_rows,
<int> n_cols,
<const double *>sample_weight_ptr,
<double*> cluster_centers_ptr,
inertiad,
n_iter)
self.n_iter_ = n_iter
else:
with nogil:
fit(
handle_[0],
<KMeansParams> deref(params),
<const double*> input_ptr,
<int64_t> n_rows,
<int64_t> n_cols,
<const double *>sample_weight_ptr,
<double*> cluster_centers_ptr,
inertiad,
n_iter64)
self.n_iter_ = n_iter64
self.handle.sync()
self.inertia_ = inertiad
else:
raise TypeError('KMeans supports only float32 and float64 input,'
'but input type ' + str(self.dtype) +
' passed.')
self.handle.sync()
self.labels_, _, _, _ = input_to_cuml_array(self.predict(X,
sample_weight=sample_weight), order='C',
convert_to_dtype=self.dtype)
del X_m
free(params)
return self
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/cluster/__init__.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.device_support import GPU_ENABLED
from cuml.cluster.dbscan import DBSCAN
from cuml.cluster.kmeans import KMeans
from cuml.cluster.hdbscan import HDBSCAN
# TODO: These need to be deprecated and moved to hdbscan namespace
from cuml.cluster.hdbscan.prediction import all_points_membership_vectors
from cuml.cluster.hdbscan.prediction import approximate_predict
if GPU_ENABLED:
from cuml.cluster.agglomerative import AgglomerativeClustering
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/cluster/dbscan.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.array import CumlArray
from cuml.internals.base import Base
from cuml.common.doc_utils import generate_docstring
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.mixins import ClusterMixin
from cuml.internals.mixins import CMajorInputTagMixin
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
IF GPUBUILD == 1:
from libcpp cimport bool
from libc.stdint cimport uintptr_t, int64_t
from pylibraft.common.handle cimport handle_t
from cuml.metrics.distance_type cimport DistanceType
from cuml.common import input_to_cuml_array
from cuml.common import using_output_type
cdef extern from "cuml/cluster/dbscan.hpp" \
namespace "ML::Dbscan":
cdef void fit(handle_t& handle,
float *input,
int n_rows,
int n_cols,
float eps,
int min_pts,
DistanceType metric,
int *labels,
int *core_sample_indices,
float* sample_weight,
size_t max_mbytes_per_batch,
int verbosity,
bool opg) except +
cdef void fit(handle_t& handle,
double *input,
int n_rows,
int n_cols,
double eps,
int min_pts,
DistanceType metric,
int *labels,
int *core_sample_indices,
double* sample_weight,
size_t max_mbytes_per_batch,
int verbosity,
bool opg) except +
cdef void fit(handle_t& handle,
float *input,
int64_t n_rows,
int64_t n_cols,
double eps,
int min_pts,
DistanceType metric,
int64_t *labels,
int64_t *core_sample_indices,
float* sample_weight,
size_t max_mbytes_per_batch,
int verbosity,
bool opg) except +
cdef void fit(handle_t& handle,
double *input,
int64_t n_rows,
int64_t n_cols,
double eps,
int min_pts,
DistanceType metric,
int64_t *labels,
int64_t *core_sample_indices,
double* sample_weight,
size_t max_mbytes_per_batch,
int verbosity,
bool opg) except +
class DBSCAN(Base,
ClusterMixin,
CMajorInputTagMixin):
"""
DBSCAN is a very powerful yet fast clustering technique that finds clusters
where data is concentrated. This allows DBSCAN to generalize to many
problems if the datapoints tend to congregate in larger groups.
cuML's DBSCAN expects an array-like object or cuDF DataFrame, and
constructs an adjacency graph to compute the distances between close
neighbours.
Examples
--------
.. code-block:: python
>>> # Both import methods supported
>>> from cuml import DBSCAN
>>> from cuml.cluster import DBSCAN
>>>
>>> import cudf
>>> import numpy as np
>>>
>>> gdf_float = cudf.DataFrame()
>>> gdf_float['0'] = np.asarray([1.0,2.0,5.0], dtype = np.float32)
>>> gdf_float['1'] = np.asarray([4.0,2.0,1.0], dtype = np.float32)
>>> gdf_float['2'] = np.asarray([4.0,2.0,1.0], dtype = np.float32)
>>>
>>> dbscan_float = DBSCAN(eps = 1.0, min_samples = 1)
>>> dbscan_float.fit(gdf_float)
DBSCAN()
>>> dbscan_float.labels_
0 0
1 1
2 2
dtype: int32
Parameters
----------
eps : float (default = 0.5)
The maximum distance between 2 points such they reside in the same
neighborhood.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
min_samples : int (default = 5)
The number of samples in a neighborhood such that this group can be
considered as an important core point (including the point itself).
metric: {'euclidean', 'cosine', 'precomputed'}, default = 'euclidean'
The metric to use when calculating distances between points.
If metric is 'precomputed', X is assumed to be a distance matrix
and must be square.
The input will be modified temporarily when cosine distance is used
and the restored input matrix might not match completely
due to numerical rounding.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
max_mbytes_per_batch : (optional) int64
Calculate batch size using no more than this number of megabytes for
the pairwise distance computation. This enables the trade-off between
runtime and memory usage for making the N^2 pairwise distance
computations more tractable for large numbers of samples.
If you are experiencing out of memory errors when running DBSCAN, you
can set this value based on the memory size of your device.
Note: this option does not set the maximum total memory used in the
DBSCAN computation and so this value will not be able to be set to
the total memory available on the device.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
calc_core_sample_indices : (optional) boolean (default = True)
Indicates whether the indices of the core samples should be calculated.
The the attribute `core_sample_indices_` will not be used, setting this
to False will avoid unnecessary kernel launches
Attributes
----------
labels_ : array-like or cuDF series
Which cluster each datapoint belongs to. Noisy samples are labeled as
-1. Format depends on cuml global output type and estimator
output_type.
core_sample_indices_ : array-like or cuDF series
The indices of the core samples. Only calculated if
calc_core_sample_indices==True
Notes
-----
DBSCAN is very sensitive to the distance metric it is used with, and a
large assumption is that datapoints need to be concentrated in groups for
clusters to be constructed.
**Applications of DBSCAN**
DBSCAN's main benefit is that the number of clusters is not a
hyperparameter, and that it can find non-linearly shaped clusters.
This also allows DBSCAN to be robust to noise.
DBSCAN has been applied to analyzing particle collisions in the
Large Hadron Collider, customer segmentation in marketing analyses,
and much more.
For additional docs, see `scikitlearn's DBSCAN
<http://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html>`_.
"""
_cpu_estimator_import_path = 'sklearn.cluster.DBSCAN'
labels_ = CumlArrayDescriptor()
core_sample_indices_ = CumlArrayDescriptor()
@device_interop_preparation
def __init__(self, *,
eps=0.5,
handle=None,
min_samples=5,
metric='euclidean',
verbose=False,
max_mbytes_per_batch=None,
output_type=None,
calc_core_sample_indices=True):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.eps = eps
self.min_samples = min_samples
self.max_mbytes_per_batch = max_mbytes_per_batch
self.calc_core_sample_indices = calc_core_sample_indices
self.metric = metric
# internal array attributes
self.labels_ = None
# One used when `self.calc_core_sample_indices == True`
self.core_sample_indices_ = None
# C++ API expects this to be numeric.
if self.max_mbytes_per_batch is None:
self.max_mbytes_per_batch = 0
def _fit(self, X, out_dtype, opg, sample_weight) -> "DBSCAN":
"""
Protected auxiliary function for `fit`. Takes an additional parameter
opg that is set to `False` for SG, `True` for OPG (multi-GPU)
"""
if out_dtype not in ["int32", np.int32, "int64", np.int64]:
raise ValueError("Invalid value for out_dtype. "
"Valid values are {'int32', 'int64', "
"np.int32, np.int64}")
IF GPUBUILD == 1:
X_m, n_rows, n_cols, self.dtype = \
input_to_cuml_array(X, order='C',
check_dtype=[np.float32, np.float64])
if n_rows == 0:
raise ValueError("No rows in the input array. DBScan cannot be "
"fitted!")
cdef uintptr_t input_ptr = X_m.ptr
cdef uintptr_t sample_weight_ptr = <uintptr_t> NULL
if sample_weight is not None:
sample_weight_m, _, _, _ = \
input_to_cuml_array(sample_weight, check_dtype=self.dtype,
check_rows=n_rows, check_cols=1)
sample_weight_ptr = sample_weight_m.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
self.labels_ = CumlArray.empty(n_rows, dtype=out_dtype,
index=X_m.index)
cdef uintptr_t labels_ptr = self.labels_.ptr
cdef uintptr_t core_sample_indices_ptr = <uintptr_t> NULL
# metric
metric_parsing = {
"L2": DistanceType.L2SqrtUnexpanded,
"euclidean": DistanceType.L2SqrtUnexpanded,
"cosine": DistanceType.CosineExpanded,
"precomputed": DistanceType.Precomputed
}
if self.metric in metric_parsing:
metric = metric_parsing[self.metric.lower()]
else:
raise ValueError("Invalid value for metric: {}"
.format(self.metric))
# Create the output core_sample_indices only if needed
if self.calc_core_sample_indices:
self.core_sample_indices_ = \
CumlArray.empty(n_rows, dtype=out_dtype)
core_sample_indices_ptr = self.core_sample_indices_.ptr
if self.dtype == np.float32:
if out_dtype == "int32" or out_dtype is np.int32:
fit(handle_[0],
<float*>input_ptr,
<int> n_rows,
<int> n_cols,
<float> self.eps,
<int> self.min_samples,
<DistanceType> metric,
<int*> labels_ptr,
<int*> core_sample_indices_ptr,
<float*> sample_weight_ptr,
<size_t>self.max_mbytes_per_batch,
<int> self.verbose,
<bool> opg)
else:
fit(handle_[0],
<float*>input_ptr,
<int64_t> n_rows,
<int64_t> n_cols,
<float> self.eps,
<int> self.min_samples,
<DistanceType> metric,
<int64_t*> labels_ptr,
<int64_t*> core_sample_indices_ptr,
<float*> sample_weight_ptr,
<size_t>self.max_mbytes_per_batch,
<int> self.verbose,
<bool> opg)
else:
if out_dtype == "int32" or out_dtype is np.int32:
fit(handle_[0],
<double*>input_ptr,
<int> n_rows,
<int> n_cols,
<double> self.eps,
<int> self.min_samples,
<DistanceType> metric,
<int*> labels_ptr,
<int*> core_sample_indices_ptr,
<double*> sample_weight_ptr,
<size_t> self.max_mbytes_per_batch,
<int> self.verbose,
<bool> opg)
else:
fit(handle_[0],
<double*>input_ptr,
<int64_t> n_rows,
<int64_t> n_cols,
<double> self.eps,
<int> self.min_samples,
<DistanceType> metric,
<int64_t*> labels_ptr,
<int64_t*> core_sample_indices_ptr,
<double*> sample_weight_ptr,
<size_t> self.max_mbytes_per_batch,
<int> self.verbose,
<bool> opg)
# make sure that the `fit` is complete before the following
# delete call happens
self.handle.sync()
del X_m
# Finally, resize the core_sample_indices array if necessary
if self.calc_core_sample_indices:
# Temp convert to cupy array (better than using `cupy.asarray`)
with using_output_type("cupy"):
# First get the min index. These have to monotonically
# increasing, so the min index should be the first returned -1
min_index = cp.argmin(self.core_sample_indices_).item()
# Check for the case where there are no -1's
if ((min_index == 0 and
self.core_sample_indices_[min_index].item() != -1)):
# Nothing to delete. The array has no -1's
pass
else:
self.core_sample_indices_ = \
self.core_sample_indices_[:min_index]
return self
@generate_docstring(skip_parameters_heading=True)
@enable_device_interop
def fit(self, X, out_dtype="int32", sample_weight=None) -> "DBSCAN":
"""
Perform DBSCAN clustering from features.
Parameters
----------
out_dtype: dtype Determines the precision of the output labels array.
default: "int32". Valid values are { "int32", np.int32,
"int64", np.int64}.
sample_weight: array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at
least min_samples is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
default: None (which is equivalent to weight 1 for all samples).
"""
return self._fit(X, out_dtype, False, sample_weight)
@generate_docstring(skip_parameters_heading=True,
return_values={'name': 'preds',
'type': 'dense',
'description': 'Cluster labels',
'shape': '(n_samples, 1)'})
@enable_device_interop
def fit_predict(self, X, out_dtype="int32", sample_weight=None) -> CumlArray:
"""
Performs clustering on X and returns cluster labels.
Parameters
----------
out_dtype: dtype Determines the precision of the output labels array.
default: "int32". Valid values are { "int32", np.int32,
"int64", np.int64}.
sample_weight: array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at
least min_samples is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
default: None (which is equivalent to weight 1 for all samples).
"""
self.fit(X, out_dtype, sample_weight)
return self.labels_
def get_param_names(self):
return super().get_param_names() + [
"eps",
"min_samples",
"max_mbytes_per_batch",
"calc_core_sample_indices",
"metric",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/cluster/kmeans_utils.pxd | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
from libcpp cimport bool
from cuml.metrics.distance_type cimport DistanceType
from cuml.common.rng_state cimport RngState
cdef extern from "cuml/cluster/kmeans.hpp" namespace \
"ML::kmeans::KMeansParams":
enum InitMethod:
KMeansPlusPlus, Random, Array
cdef struct KMeansParams:
int n_clusters,
InitMethod init
int max_iter,
double tol,
int verbosity,
RngState rng_state,
DistanceType metric,
int n_init,
double oversampling_factor,
int batch_samples,
int batch_centroids,
bool inertia_check
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/cluster/kmeans.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
import typing
IF GPUBUILD == 1:
from cython.operator cimport dereference as deref
from libcpp cimport bool
from libc.stdint cimport uintptr_t, int64_t
from libc.stdlib cimport calloc, free
from pylibraft.common.handle cimport handle_t
from cuml.cluster.cpp.kmeans cimport fit_predict as cpp_fit_predict
from cuml.cluster.cpp.kmeans cimport predict as cpp_predict
from cuml.cluster.cpp.kmeans cimport transform as cpp_transform
from cuml.cluster.cpp.kmeans cimport KMeansParams
from cuml.metrics.distance_type cimport DistanceType
from cuml.cluster.kmeans_utils cimport *
from cuml.internals.array import CumlArray
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.base import Base
from cuml.common.doc_utils import generate_docstring
from cuml.internals.mixins import ClusterMixin
from cuml.internals.mixins import CMajorInputTagMixin
from cuml.common import input_to_cuml_array
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
class KMeans(Base,
ClusterMixin,
CMajorInputTagMixin):
"""
KMeans is a basic but powerful clustering method which is optimized via
Expectation Maximization. It randomly selects K data points in X, and
computes which samples are close to these points.
For every cluster of points, a mean is computed (hence the name), and this
becomes the new centroid.
cuML's KMeans expects an array-like object or cuDF DataFrame, and supports
the scalable KMeans++ initialization method. This method is more stable
than randomly selecting K points.
Examples
--------
.. code-block:: python
>>> # Both import methods supported
>>> from cuml import KMeans
>>> from cuml.cluster import KMeans
>>> import cudf
>>> import numpy as np
>>> import pandas as pd
>>>
>>> a = np.asarray([[1.0, 1.0], [1.0, 2.0], [3.0, 2.0], [4.0, 3.0]],
... dtype=np.float32)
>>> b = cudf.DataFrame(a)
>>> # Input:
>>> b
0 1
0 1.0 1.0
1 1.0 2.0
2 3.0 2.0
3 4.0 3.0
>>>
>>> # Calling fit
>>> kmeans_float = KMeans(n_clusters=2)
>>> kmeans_float.fit(b)
KMeans()
>>>
>>> # Labels:
>>> kmeans_float.labels_
0 0
1 0
2 1
3 1
dtype: int32
>>> # cluster_centers:
>>> kmeans_float.cluster_centers_
0 1
0 1.0 1.5
1 3.5 2.5
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
n_clusters : int (default = 8)
The number of centroids or clusters you want.
max_iter : int (default = 300)
The more iterations of EM, the more accurate, but slower.
tol : float64 (default = 1e-4)
Stopping criterion when centroid means do not change much.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
random_state : int (default = 1)
If you want results to be the same when you restart Python, select a
state.
init : {'scalable-kmeans++', 'k-means||', 'random'} or an \
ndarray (default = 'scalable-k-means++')
- ``'scalable-k-means++'`` or ``'k-means||'``: Uses fast and stable
scalable kmeans++ initialization.
- ``'random'``: Choose `n_cluster` observations (rows) at random
from data for the initial centroids.
- If an ndarray is passed, it should be of
shape (`n_clusters`, `n_features`) and gives the initial centers.
n_init: int (default = 1)
Number of instances the k-means algorithm will be called with
different seeds. The final results will be from the instance
that produces lowest inertia out of n_init instances.
oversampling_factor : float64 (default = 2.0)
The amount of points to sample
in scalable k-means++ initialization for potential centroids.
Increasing this value can lead to better initial centroids at the
cost of memory. The total number of centroids sampled in scalable
k-means++ is oversampling_factor * n_clusters * 8.
max_samples_per_batch : int (default = 32768)
The number of data samples to use for batches of the pairwise distance
computation. This computation is done throughout both fit predict. The
default should suit most cases. The total number of elements in the
batched pairwise distance computation is :py:`max_samples_per_batch *
n_clusters`. It might become necessary to lower this number when
`n_clusters` becomes prohibitively large.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
cluster_centers_ : array
The coordinates of the final clusters. This represents of "mean" of
each data cluster.
labels_ : array
Which cluster each datapoint belongs to.
Notes
-----
KMeans requires `n_clusters` to be specified. This means one needs to
approximately guess or know how many clusters a dataset has. If one is not
sure, one can start with a small number of clusters, and visualize the
resulting clusters with PCA, UMAP or T-SNE, and verify that they look
appropriate.
**Applications of KMeans**
The biggest advantage of KMeans is its speed and simplicity. That is
why KMeans is many practitioner's first choice of a clustering
algorithm. KMeans has been extensively used when the number of clusters
is approximately known, such as in big data clustering tasks,
image segmentation and medical clustering.
For additional docs, see `scikitlearn's Kmeans
<http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_.
"""
_cpu_estimator_import_path = 'sklearn.cluster.KMeans'
labels_ = CumlArrayDescriptor()
cluster_centers_ = CumlArrayDescriptor()
def _get_kmeans_params(self):
IF GPUBUILD == 1:
cdef KMeansParams* params = \
<KMeansParams*>calloc(1, sizeof(KMeansParams))
params.n_clusters = <int>self.n_clusters
params.init = self._params_init
params.max_iter = <int>self.max_iter
params.tol = <double>self.tol
params.verbosity = <int>self.verbose
params.rng_state.seed = self.random_state
params.metric = DistanceType.L2Expanded # distance metric as squared L2: @todo - support other metrics # noqa: E501
params.batch_samples = <int>self.max_samples_per_batch
params.oversampling_factor = <double>self.oversampling_factor
params.n_init = <int>self.n_init
return <size_t>params
ELSE:
return None
@device_interop_preparation
def __init__(self, *, handle=None, n_clusters=8, max_iter=300, tol=1e-4,
verbose=False, random_state=1,
init='scalable-k-means++', n_init=1, oversampling_factor=2.0,
max_samples_per_batch=1<<15, output_type=None):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.n_clusters = n_clusters
self.random_state = random_state
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
self.inertia_ = 0
self.n_iter_ = 0
self.oversampling_factor=oversampling_factor
self.max_samples_per_batch=int(max_samples_per_batch)
# internal array attributes
self.labels_ = None
self.cluster_centers_ = None
# cuPy does not allow comparing with string. See issue #2372
init_str = init if isinstance(init, str) else None
# K-means++ is the constrained case of k-means||
# w/ oversampling factor = 0
if (init_str == 'k-means++'):
init_str = 'k-means||'
self.oversampling_factor = 0
if (init_str in ['scalable-k-means++', 'k-means||']):
self.init = init_str
IF GPUBUILD == 1:
self._params_init = KMeansPlusPlus
elif (init_str == 'random'):
self.init = init
IF GPUBUILD == 1:
self._params_init = Random
else:
self.init = 'preset'
IF GPUBUILD == 1:
self._params_init = Array
self.cluster_centers_, _n_rows, self.n_cols, self.dtype = \
input_to_cuml_array(init, order='C',
check_dtype=[np.float32, np.float64])
@generate_docstring()
@enable_device_interop
def fit(self, X, sample_weight=None) -> "KMeans":
"""
Compute k-means clustering with X.
"""
if self.init == 'preset':
check_cols = self.n_cols
check_dtype = self.dtype
else:
check_cols = False
check_dtype = [np.float32, np.float64]
_X_m, _n_rows, self.n_cols, self.dtype = \
input_to_cuml_array(X, order='C',
check_cols=check_cols,
check_dtype=check_dtype)
IF GPUBUILD == 1:
cdef uintptr_t input_ptr = _X_m.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if sample_weight is None:
sample_weight_m = CumlArray.ones(shape=_n_rows, dtype=self.dtype)
else:
sample_weight_m, _, _, _ = \
input_to_cuml_array(sample_weight, order='C',
convert_to_dtype=self.dtype,
check_rows=_n_rows)
cdef uintptr_t sample_weight_ptr = sample_weight_m.ptr
int_dtype = np.int32 if np.int64(_n_rows) * np.int64(self.n_cols) < 2**31-1 else np.int64
self.labels_ = CumlArray.zeros(shape=_n_rows, dtype=int_dtype)
cdef uintptr_t labels_ptr = self.labels_.ptr
if (self.init in ['scalable-k-means++', 'k-means||', 'random']):
self.cluster_centers_ = \
CumlArray.zeros(shape=(self.n_clusters, self.n_cols),
dtype=self.dtype, order='C')
cdef uintptr_t cluster_centers_ptr = self.cluster_centers_.ptr
cdef float inertiaf = 0
cdef double inertiad = 0
cdef KMeansParams* params = \
<KMeansParams*><size_t>self._get_kmeans_params()
cdef int n_iter_int = 0
cdef int64_t n_iter_int64 = 0
if self.dtype == np.float32:
if int_dtype == np.int32:
cpp_fit_predict(
handle_[0],
<KMeansParams> deref(params),
<const float*> input_ptr,
<int> _n_rows,
<int> self.n_cols,
<const float *>sample_weight_ptr,
<float*> cluster_centers_ptr,
<int*> labels_ptr,
inertiaf,
n_iter_int)
self.n_iter_ = n_iter_int
else:
cpp_fit_predict(
handle_[0],
<KMeansParams> deref(params),
<const float*> input_ptr,
<int64_t> _n_rows,
<int64_t> self.n_cols,
<const float *>sample_weight_ptr,
<float*> cluster_centers_ptr,
<int64_t*> labels_ptr,
inertiaf,
n_iter_int64)
self.n_iter_ = n_iter_int64
self.handle.sync()
self.inertia_ = inertiaf
elif self.dtype == np.float64:
if int_dtype == np.int32:
cpp_fit_predict(
handle_[0],
<KMeansParams> deref(params),
<const double*> input_ptr,
<int> _n_rows,
<int> self.n_cols,
<const double *>sample_weight_ptr,
<double*> cluster_centers_ptr,
<int*> labels_ptr,
inertiad,
n_iter_int)
self.n_iter_ = n_iter_int
else:
cpp_fit_predict(
handle_[0],
<KMeansParams> deref(params),
<const double*> input_ptr,
<int64_t> _n_rows,
<int64_t> self.n_cols,
<const double *>sample_weight_ptr,
<double*> cluster_centers_ptr,
<int64_t*> labels_ptr,
inertiad,
n_iter_int64)
self.n_iter_ = n_iter_int64
self.handle.sync()
self.inertia_ = inertiad
else:
raise TypeError('KMeans supports only float32 and float64 input,'
'but input type ' + str(self.dtype) +
' passed.')
self.handle.sync()
del _X_m
del sample_weight_m
free(params)
return self
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Cluster indexes',
'shape': '(n_samples, 1)'})
@enable_device_interop
def fit_predict(self, X, sample_weight=None) -> CumlArray:
"""
Compute cluster centers and predict cluster index for each sample.
"""
return self.fit(X, sample_weight=sample_weight).labels_
def _predict_labels_inertia(self, X, convert_dtype=False,
sample_weight=None,
normalize_weights=True
) -> typing.Tuple[CumlArray, float]:
"""
Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Dense matrix (floats or doubles) of shape (n_samples, n_features).
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
convert_dtype : bool, optional (default = False)
When set to True, the predict method will, when necessary, convert
the input to the data type which was used to train the model. This
will increase memory used for the method.
sample_weight : array-like (device or host) shape = (n_samples,), default=None # noqa
The weights for each observation in X. If None, all observations
are assigned equal weight.
Returns
-------
labels : array
Which cluster each datapoint belongs to.
inertia : float/double
Sum of squared distances of samples to their closest cluster center.
"""
_X_m, _n_rows, _n_cols, _ = \
input_to_cuml_array(X, order='C', check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_cols=self.n_cols)
IF GPUBUILD == 1:
cdef uintptr_t input_ptr = _X_m.ptr
if sample_weight is None:
sample_weight_m = CumlArray.ones(shape=_n_rows, dtype=self.dtype)
else:
sample_weight_m, _, _, _ = \
input_to_cuml_array(sample_weight, order='C',
convert_to_dtype=self.dtype,
check_rows=_n_rows)
cdef uintptr_t sample_weight_ptr = sample_weight_m.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t cluster_centers_ptr = self.cluster_centers_.ptr
int_dtype = np.int32 if np.int64(_n_rows) * np.int64(_n_cols) < 2**31-1 else np.int64
labels_ = CumlArray.zeros(shape=_n_rows, dtype=int_dtype,
index=_X_m.index)
cdef uintptr_t labels_ptr = labels_.ptr
# Sum of squared distances of samples to their closest cluster center.
cdef float inertiaf = 0
cdef double inertiad = 0
cdef KMeansParams* params = \
<KMeansParams*><size_t>self._get_kmeans_params()
if self.dtype == np.float32:
if int_dtype == np.int32:
cpp_predict(
handle_[0],
<KMeansParams> deref(params),
<float*> cluster_centers_ptr,
<float*> input_ptr,
<size_t> _n_rows,
<size_t> self.n_cols,
<float *>sample_weight_ptr,
<bool> normalize_weights,
<int*> labels_ptr,
inertiaf)
else:
cpp_predict(
handle_[0],
<KMeansParams> deref(params),
<float*> cluster_centers_ptr,
<float*> input_ptr,
<int64_t> _n_rows,
<int64_t> self.n_cols,
<float *>sample_weight_ptr,
<bool> normalize_weights,
<int64_t*> labels_ptr,
inertiaf)
self.handle.sync()
inertia = inertiaf
elif self.dtype == np.float64:
if int_dtype == np.int32:
cpp_predict(
handle_[0],
<KMeansParams> deref(params),
<double*> cluster_centers_ptr,
<double*> input_ptr,
<size_t> _n_rows,
<size_t> self.n_cols,
<double *>sample_weight_ptr,
<bool> normalize_weights,
<int*> labels_ptr,
inertiad)
else:
cpp_predict(
handle_[0],
<KMeansParams> deref(params),
<double*> cluster_centers_ptr,
<double*> input_ptr,
<int64_t> _n_rows,
<int64_t> self.n_cols,
<double *>sample_weight_ptr,
<bool> normalize_weights,
<int64_t*> labels_ptr,
inertiad)
self.handle.sync()
inertia = inertiad
else:
raise TypeError('KMeans supports only float32 and float64 input,'
'but input type ' + str(self.dtype) +
' passed.')
self.handle.sync()
del _X_m
del sample_weight_m
free(params)
return labels_, inertia
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Cluster indexes',
'shape': '(n_samples, 1)'})
@enable_device_interop
def predict(self, X, convert_dtype=False, sample_weight=None,
normalize_weights=True) -> CumlArray:
"""
Predict the closest cluster each sample in X belongs to.
"""
labels, _ = self._predict_labels_inertia(
X,
convert_dtype=convert_dtype,
sample_weight=sample_weight,
normalize_weights=normalize_weights)
return labels
@generate_docstring(return_values={'name': 'X_new',
'type': 'dense',
'description': 'Transformed data',
'shape': '(n_samples, n_clusters)'})
@enable_device_interop
def transform(self, X, convert_dtype=False) -> CumlArray:
"""
Transform X to a cluster-distance space.
"""
_X_m, _n_rows, _n_cols, _dtype = \
input_to_cuml_array(X, order='C', check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_cols=self.n_cols)
IF GPUBUILD == 1:
cdef uintptr_t input_ptr = _X_m.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t cluster_centers_ptr = self.cluster_centers_.ptr
preds = CumlArray.zeros(shape=(_n_rows, self.n_clusters),
dtype=self.dtype,
order='C')
cdef uintptr_t preds_ptr = preds.ptr
# distance metric as L2-norm/euclidean distance: @todo - support other metrics # noqa: E501
cdef KMeansParams* params = \
<KMeansParams*><size_t>self._get_kmeans_params()
params.metric = DistanceType.L2SqrtExpanded
int_dtype = np.int32 if self.labels_.dtype == np.int32 else np.int64
if self.dtype == np.float32:
if int_dtype == np.int32:
cpp_transform(
handle_[0],
<KMeansParams> deref(params),
<float*> cluster_centers_ptr,
<float*> input_ptr,
<int> _n_rows,
<int> self.n_cols,
<float*> preds_ptr)
else:
cpp_transform(
handle_[0],
<KMeansParams> deref(params),
<float*> cluster_centers_ptr,
<float*> input_ptr,
<int64_t> _n_rows,
<int64_t> self.n_cols,
<float*> preds_ptr)
elif self.dtype == np.float64:
if int_dtype == np.int32:
cpp_transform(
handle_[0],
<KMeansParams> deref(params),
<double*> cluster_centers_ptr,
<double*> input_ptr,
<int> _n_rows,
<int> self.n_cols,
<double*> preds_ptr)
else:
cpp_transform(
handle_[0],
<KMeansParams> deref(params),
<double*> cluster_centers_ptr,
<double*> input_ptr,
<int64_t> _n_rows,
<int64_t> self.n_cols,
<double*> preds_ptr)
else:
raise TypeError('KMeans supports only float32 and float64 input,'
'but input type ' + str(self.dtype) +
' passed.')
self.handle.sync()
del _X_m
free(params)
return preds
@generate_docstring(return_values={'name': 'score',
'type': 'float',
'description': 'Opposite of the value \
of X on the K-means \
objective.'})
@enable_device_interop
def score(self, X, y=None, sample_weight=None, convert_dtype=True):
"""
Opposite of the value of X on the K-means objective.
"""
return -1 * self._predict_labels_inertia(
X, convert_dtype=convert_dtype,
sample_weight=sample_weight)[1]
@generate_docstring(return_values={'name': 'X_new',
'type': 'dense',
'description': 'Transformed data',
'shape': '(n_samples, n_clusters)'})
@enable_device_interop
def fit_transform(self, X, convert_dtype=False,
sample_weight=None) -> CumlArray:
"""
Compute clustering and transform X to cluster-distance space.
"""
self.fit(X, sample_weight=sample_weight)
return self.transform(X, convert_dtype=convert_dtype)
def get_param_names(self):
return super().get_param_names() + \
['n_init', 'oversampling_factor', 'max_samples_per_batch',
'init', 'max_iter', 'n_clusters', 'random_state',
'tol']
| 0 |
rapidsai_public_repos/cuml/python/cuml/cluster | rapidsai_public_repos/cuml/python/cuml/cluster/cpp/kmeans.pxd | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from libc.stdint cimport uintptr_t, int64_t
from cuml.metrics.distance_type cimport DistanceType
from pylibraft.common.handle cimport handle_t
import ctypes
from libcpp cimport bool
from cuml.metrics.distance_type cimport DistanceType
from cuml.common.rng_state cimport RngState
cdef extern from "cuml/cluster/kmeans.hpp" namespace \
"ML::kmeans::KMeansParams":
enum InitMethod:
KMeansPlusPlus, Random, Array
cdef struct KMeansParams:
int n_clusters,
InitMethod init
int max_iter,
double tol,
int verbosity,
RngState rng_state,
DistanceType metric,
int n_init,
double oversampling_factor,
int batch_samples,
int batch_centroids,
bool inertia_check
cdef extern from "cuml/cluster/kmeans.hpp" namespace "ML::kmeans":
cdef void fit_predict(handle_t& handle,
KMeansParams& params,
const float *X,
int n_samples,
int n_features,
const float *sample_weight,
float *centroids,
int *labels,
float &inertia,
int &n_iter) except +
cdef void fit_predict(handle_t& handle,
KMeansParams& params,
const double *X,
int n_samples,
int n_features,
const double *sample_weight,
double *centroids,
int *labels,
double &inertia,
int &n_iter) except +
cdef void predict(handle_t& handle,
KMeansParams& params,
const float *centroids,
const float *X,
int n_samples,
int n_features,
const float *sample_weight,
bool normalize_weights,
int *labels,
float &inertia) except +
cdef void predict(handle_t& handle,
KMeansParams& params,
double *centroids,
const double *X,
int n_samples,
int n_features,
const double *sample_weight,
bool normalize_weights,
int *labels,
double &inertia) except +
cdef void transform(handle_t& handle,
KMeansParams& params,
const float *centroids,
const float *X,
int n_samples,
int n_features,
float *X_new) except +
cdef void transform(handle_t& handle,
KMeansParams& params,
const double *centroids,
const double *X,
int n_samples,
int n_features,
double *X_new) except +
cdef void fit_predict(handle_t& handle,
KMeansParams& params,
const float *X,
int64_t n_samples,
int64_t n_features,
const float *sample_weight,
float *centroids,
int64_t *labels,
float &inertia,
int64_t &n_iter) except +
cdef void fit_predict(handle_t& handle,
KMeansParams& params,
const double *X,
int64_t n_samples,
int64_t n_features,
const double *sample_weight,
double *centroids,
int64_t *labels,
double &inertia,
int64_t &n_iter) except +
cdef void predict(handle_t& handle,
KMeansParams& params,
const float *centroids,
const float *X,
int64_t n_samples,
int64_t n_features,
const float *sample_weight,
bool normalize_weights,
int64_t *labels,
float &inertia) except +
cdef void predict(handle_t& handle,
KMeansParams& params,
double *centroids,
const double *X,
int64_t n_samples,
int64_t n_features,
const double *sample_weight,
bool normalize_weights,
int64_t *labels,
double &inertia) except +
cdef void transform(handle_t& handle,
KMeansParams& params,
const float *centroids,
const float *X,
int64_t n_samples,
int64_t n_features,
float *X_new) except +
cdef void transform(handle_t& handle,
KMeansParams& params,
const double *centroids,
const double *X,
int64_t n_samples,
int64_t n_features,
double *X_new) except +
| 0 |
rapidsai_public_repos/cuml/python/cuml/cluster | rapidsai_public_repos/cuml/python/cuml/cluster/hdbscan/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("hdbscan.pyx" ${hdbscan_algo} ${cluster_algo})
add_module_gpu_default("prediction.pyx" ${hdbscan_algo} ${cluster_algo})
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
MODULE_PREFIX cluster_hdbscan_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/cluster | rapidsai_public_repos/cuml/python/cuml/cluster/hdbscan/hdbscan.pyx | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from libc.stdint cimport uintptr_t
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from warnings import warn
from cuml.internals.array import CumlArray
from cuml.internals.base import UniversalBase
from cuml.common.doc_utils import generate_docstring
from cuml.common import input_to_cuml_array
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
from cuml.internals.mixins import ClusterMixin
from cuml.internals.mixins import CMajorInputTagMixin
from cuml.internals.import_utils import has_hdbscan
import cuml
IF GPUBUILD == 1:
from libcpp cimport bool
from libc.stdlib cimport free
from cython.operator cimport dereference as deref
from cuml.metrics.distance_type cimport DistanceType
from rmm._lib.device_uvector cimport device_uvector
from pylibraft.common.handle import Handle
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/cluster/hdbscan.hpp" namespace "ML::HDBSCAN::Common":
ctypedef enum CLUSTER_SELECTION_METHOD:
EOM "ML::HDBSCAN::Common::CLUSTER_SELECTION_METHOD::EOM"
LEAF "ML::HDBSCAN::Common::CLUSTER_SELECTION_METHOD::LEAF"
cdef cppclass CondensedHierarchy[value_idx, value_t]:
CondensedHierarchy(
const handle_t &handle, size_t n_leaves)
CondensedHierarchy(const handle_t& handle_,
size_t n_leaves_,
int _n_edges_,
value_idx* parents_,
value_idx* children_,
value_t* lambdas_,
value_idx* sizes_)
value_idx *get_parents()
value_idx *get_children()
value_t *get_lambdas()
value_idx *get_sizes()
value_idx get_n_edges()
cdef cppclass hdbscan_output[int, float]:
hdbscan_output(const handle_t &handle,
int n_leaves,
int *labels,
float *probabilities,
int *children,
int *sizes,
float *deltas,
int *mst_src,
int *mst_dst,
float *mst_weights)
int get_n_leaves()
int get_n_clusters()
float *get_stabilities()
int *get_labels()
int *get_inverse_label_map()
float *get_core_dists()
CondensedHierarchy[int, float] &get_condensed_tree()
cdef cppclass HDBSCANParams:
int min_samples
int min_cluster_size
int max_cluster_size,
float cluster_selection_epsilon,
bool allow_single_cluster,
CLUSTER_SELECTION_METHOD cluster_selection_method,
cdef cppclass PredictionData[int, float]:
PredictionData(const handle_t &handle,
int m,
int n,
float *core_dists)
size_t n_rows
size_t n_cols
void generate_prediction_data(const handle_t& handle,
CondensedHierarchy[int, float]&
condensed_tree,
int* labels,
int* inverse_label_map,
int n_selected_clusters,
PredictionData[int, float]& prediction_data)
cdef extern from "cuml/cluster/hdbscan.hpp" namespace "ML":
void hdbscan(const handle_t & handle,
const float * X,
size_t m, size_t n,
DistanceType metric,
HDBSCANParams & params,
hdbscan_output & output,
float * core_dists)
void build_condensed_hierarchy(
const handle_t &handle,
const int *children,
const float *delta,
const int *sizes,
int min_cluster_size,
int n_leaves,
CondensedHierarchy[int, float] &condensed_tree)
void _extract_clusters(const handle_t &handle, size_t n_leaves,
int _n_edges, int *parents, int *children,
float *lambdas, int *sizes, int *labels,
float *probabilities,
CLUSTER_SELECTION_METHOD cluster_selection_method,
bool allow_single_cluster, int max_cluster_size,
float cluster_selection_epsilon)
cdef extern from "cuml/cluster/hdbscan.hpp" namespace "ML::HDBSCAN::HELPER":
void compute_core_dists(const handle_t& handle,
const float* X,
float* core_dists,
size_t m,
size_t n,
DistanceType metric,
int min_samples)
void compute_inverse_label_map(const handle_t& handle,
CondensedHierarchy[int, float]&
condensed_tree,
size_t n_leaves,
CLUSTER_SELECTION_METHOD
cluster_selection_method,
device_uvector[int]& inverse_label_map,
bool allow_single_cluster,
int max_cluster_size,
float cluster_selection_epsilon)
_metrics_mapping = {
'l2': DistanceType.L2SqrtExpanded,
'euclidean': DistanceType.L2SqrtExpanded,
}
def _cuml_array_from_ptr(ptr, buf_size, shape, dtype, owner):
mem = cp.cuda.UnownedMemory(ptr=ptr, size=buf_size,
owner=owner,
device_id=-1)
mem_ptr = cp.cuda.memory.MemoryPointer(mem, 0)
return CumlArray(data=cp.ndarray(shape=shape,
dtype=dtype,
memptr=mem_ptr))
def _construct_condensed_tree_attribute(ptr,
n_condensed_tree_edges,
dtype="int32",
owner=None):
return _cuml_array_from_ptr(
ptr, n_condensed_tree_edges * sizeof(float),
(n_condensed_tree_edges,), dtype, owner
)
def _build_condensed_tree_plot_host(
parent, child, lambdas, sizes,
cluster_selection_method, allow_single_cluster):
raw_tree = np.recarray(shape=(parent.shape[0],),
formats=[np.intp, np.intp, float, np.intp],
names=('parent', 'child', 'lambda_val',
'child_size'))
raw_tree['parent'] = parent
raw_tree['child'] = child
raw_tree['lambda_val'] = lambdas
raw_tree['child_size'] = sizes
if has_hdbscan(raise_if_unavailable=True):
from hdbscan.plots import CondensedTree
return CondensedTree(raw_tree,
cluster_selection_method,
allow_single_cluster)
return None
def condense_hierarchy(dendrogram,
min_cluster_size,
allow_single_cluster=False,
cluster_selection_epsilon=0.0):
"""
Accepts a dendrogram in the Scipy hierarchy format, condenses the
dendrogram to collapse subtrees containing less than min_cluster_size
leaves, and returns an hdbscan.plots.CondensedTree object with
the result on host.
Parameters
----------
dendrogram : array-like (size n_samples, 4)
Dendrogram in Scipy hierarchy format
min_cluster_size : int minimum number of children for a cluster
to persist
allow_single_cluster : bool whether or not to allow a single
cluster in the face of mostly noise.
cluster_selection_epsilon : float minimum distance threshold used to
determine when clusters should be merged.
Returns
-------
condensed_tree : hdbscan.plots.CondensedTree object
"""
_children, _, _, _ = \
input_to_cuml_array(dendrogram[:, 0:2].astype('int32'), order='C',
check_dtype=[np.int32],
convert_to_dtype=(np.int32))
_lambdas, _, _, _ = \
input_to_cuml_array(dendrogram[:, 2], order='C',
check_dtype=[np.float32],
convert_to_dtype=(np.float32))
_sizes, _, _, _ = \
input_to_cuml_array(dendrogram[:, 3], order='C',
check_dtype=[np.int32],
convert_to_dtype=(np.int32))
IF GPUBUILD == 1:
handle = Handle()
cdef handle_t *handle_ = <handle_t*> <size_t> handle.getHandle()
n_leaves = dendrogram.shape[0]+1
cdef CondensedHierarchy[int, float] *condensed_tree =\
new CondensedHierarchy[int, float](
handle_[0], <size_t>n_leaves)
cdef uintptr_t _children_ptr = _children.ptr
cdef uintptr_t _lambdas_ptr = _lambdas.ptr
cdef uintptr_t _sizes_ptr = _sizes.ptr
build_condensed_hierarchy(handle_[0],
<int*> _children_ptr,
<float*> _lambdas_ptr,
<int*> _sizes_ptr,
<int>min_cluster_size,
n_leaves,
deref(condensed_tree))
n_condensed_tree_edges = \
condensed_tree.get_n_edges()
condensed_parent_ = _construct_condensed_tree_attribute(
<size_t>condensed_tree.get_parents(), n_condensed_tree_edges)
condensed_child_ = _construct_condensed_tree_attribute(
<size_t>condensed_tree.get_children(), n_condensed_tree_edges)
condensed_lambdas_ = \
_construct_condensed_tree_attribute(
<size_t>condensed_tree.get_lambdas(), n_condensed_tree_edges,
"float32")
condensed_sizes_ = _construct_condensed_tree_attribute(
<size_t>condensed_tree.get_sizes(), n_condensed_tree_edges)
condensed_tree_host = _build_condensed_tree_plot_host(
condensed_parent_.to_output('numpy'),
condensed_child_.to_output("numpy"),
condensed_lambdas_.to_output("numpy"),
condensed_sizes_.to_output("numpy"), cluster_selection_epsilon,
allow_single_cluster)
del condensed_tree
return condensed_tree_host
def delete_hdbscan_output(obj):
IF GPUBUILD == 1:
cdef hdbscan_output *output
if hasattr(obj, "hdbscan_output_"):
output = <hdbscan_output*>\
<uintptr_t> obj.hdbscan_output_
del output
del obj.hdbscan_output_
class HDBSCAN(UniversalBase, ClusterMixin, CMajorInputTagMixin):
"""
HDBSCAN Clustering
Recursively merges the pair of clusters that minimally increases a
given linkage distance.
Note that while the algorithm is generally deterministic and should
provide matching results between RAPIDS and the Scikit-learn Contrib
versions, the construction of the k-nearest neighbors graph and
minimum spanning tree can introduce differences between the two
algorithms, especially when several nearest neighbors around a
point might have the same distance. While the differences in
the minimum spanning trees alone might be subtle, they can
(and often will) lead to some points being assigned different
cluster labels between the two implementations.
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
alpha : float, optional (default=1.0)
A distance scaling parameter as used in robust single linkage.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
min_cluster_size : int, optional (default = 5)
The minimum number of samples in a group for that group to be
considered a cluster; groupings smaller than this size will be left
as noise.
min_samples : int, optional (default=None)
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
If 'None', it defaults to the min_cluster_size.
cluster_selection_epsilon : float, optional (default=0.0)
A distance threshold. Clusters below this value will be merged.
Note that this should not be used
if we want to predict the cluster labels for new points in future
(e.g. using approximate_predict), as the approximate_predict function
is not aware of this argument.
max_cluster_size : int, optional (default=0)
A limit to the size of clusters returned by the eom algorithm.
Has no effect when using leaf clustering (where clusters are
usually small regardless) and can also be overridden in rare
cases by a high value for cluster_selection_epsilon. Note that
this should not be used if we want to predict the cluster labels
for new points in future (e.g. using approximate_predict), as
the approximate_predict function is not aware of this argument.
metric : string or callable, optional (default='euclidean')
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
p : int, optional (default=2)
p value to use if using the minkowski metric.
cluster_selection_method : string, optional (default='eom')
The method used to select clusters from the condensed tree. The
standard approach for HDBSCAN* is to use an Excess of Mass algorithm
to find the most persistent clusters. Alternatively you can instead
select the clusters at the leaves of the tree -- this provides the
most fine grained and homogeneous clusters. Options are:
* ``eom``
* ``leaf``
allow_single_cluster : bool, optional (default=False)
By default HDBSCAN* will not produce a single cluster, setting this
to True will override this and allow single cluster results in
the case that you feel this is a valid result for your dataset.
gen_min_span_tree : bool, optional (default=False)
Whether to populate the `minimum_spanning_tree_` member for
utilizing plotting tools. This requires the `hdbscan` CPU Python
package to be installed.
gen_condensed_tree : bool, optional (default=False)
Whether to populate the `condensed_tree_` member for
utilizing plotting tools. This requires the `hdbscan` CPU
Python package to be installed.
gen_single_linkage_tree_ : bool, optional (default=False)
Whether to populate the `single_linkage_tree_` member for
utilizing plotting tools. This requires the `hdbscan` CPU
Python package t be installed.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
prediction_data : bool, optional (default=False)
Whether to generate extra cached data for predicting labels or
membership vectors few new unseen points later. If you wish to
persist the clustering object for later re-use you probably want
to set this to True.
Attributes
----------
labels_ : ndarray, shape (n_samples, )
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
probabilities_ : ndarray, shape (n_samples, )
The strength with which each sample is a member of its assigned
cluster. Noise points have probability zero; points in clusters
have values assigned proportional to the degree that they
persist as part of the cluster.
cluster_persistence_ : ndarray, shape (n_clusters, )
A score of how persistent each cluster is. A score of 1.0 represents
a perfectly stable cluster that persists over all distance scales,
while a score of 0.0 represents a perfectly ephemeral cluster. These
scores can be used to gauge the relative coherence of the
clusters output by the algorithm.
condensed_tree_ : CondensedTree object
The condensed tree produced by HDBSCAN. The object has methods
for converting to pandas, networkx, and plotting.
single_linkage_tree_ : SingleLinkageTree object
The single linkage tree produced by HDBSCAN. The object has methods
for converting to pandas, networkx, and plotting.
minimum_spanning_tree_ : MinimumSpanningTree object
The minimum spanning tree of the mutual reachability graph generated
by HDBSCAN. Note that this is not generated by default and will only
be available if `gen_min_span_tree` was set to True on object creation.
Even then in some optimized cases a tree may not be generated.
"""
_cpu_estimator_import_path = 'hdbscan.HDBSCAN'
labels_ = CumlArrayDescriptor()
probabilities_ = CumlArrayDescriptor()
outlier_scores_ = CumlArrayDescriptor()
cluster_persistence_ = CumlArrayDescriptor()
# Single Linkage Tree
children_ = CumlArrayDescriptor()
lambdas_ = CumlArrayDescriptor()
sizes_ = CumlArrayDescriptor()
# Minimum Spanning Tree
mst_src_ = CumlArrayDescriptor()
mst_dst_ = CumlArrayDescriptor()
mst_weights_ = CumlArrayDescriptor()
@device_interop_preparation
def __init__(self, *,
min_cluster_size=5,
min_samples=None,
cluster_selection_epsilon=0.0,
max_cluster_size=0,
metric='euclidean',
alpha=1.0,
p=2,
cluster_selection_method='eom',
allow_single_cluster=False,
gen_min_span_tree=False,
handle=None,
verbose=False,
connectivity='knn',
output_type=None,
prediction_data=False):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
if min_samples is None:
min_samples = min_cluster_size
if connectivity not in ["knn", "pairwise"]:
raise ValueError("'connectivity' can only be one of "
"{'knn', 'pairwise'}")
if 2 < min_samples and min_samples > 1023:
raise ValueError("'min_samples' must be a positive number "
"between 2 and 1023")
self.min_cluster_size = min_cluster_size
self.min_samples = min_samples
self.cluster_selection_epsilon = cluster_selection_epsilon
self.max_cluster_size = max_cluster_size
self.metric = metric
self.p = p
self.alpha = alpha
self.cluster_selection_method = cluster_selection_method
self.allow_single_cluster = allow_single_cluster
self.connectivity = connectivity
self.fit_called_ = False
self.prediction_data = prediction_data
self.n_clusters_ = None
self.n_leaves_ = None
self.condensed_tree_obj = None
self.single_linkage_tree_obj = None
self.minimum_spanning_tree_ = None
self.prediction_data_obj = None
self.gen_min_span_tree = gen_min_span_tree
self.core_dists = None
self.condensed_tree_ptr = None
self.prediction_data_ptr = None
self._cpu_to_gpu_interop_prepped = False
@property
def condensed_tree_(self):
if self.condensed_tree_obj is None:
self.condensed_tree_obj = _build_condensed_tree_plot_host(
self.condensed_parent_.to_output("numpy"),
self.condensed_child_.to_output("numpy"),
self.condensed_lambdas_.to_output("numpy"),
self.condensed_sizes_.to_output("numpy"),
self.cluster_selection_method, self.allow_single_cluster)
return self.condensed_tree_obj
@condensed_tree_.setter
def condensed_tree_(self, new_val):
self.condensed_tree_obj = new_val
@property
def single_linkage_tree_(self):
if self.single_linkage_tree_obj is None:
with cuml.using_output_type("numpy"):
raw_tree = np.column_stack(
(self.children_[0, :self.n_leaves_-1],
self.children_[1, :self.n_leaves_-1],
self.lambdas_[:self.n_leaves_-1],
self.sizes_[:self.n_leaves_-1]))
raw_tree = raw_tree.astype(np.float64)
if has_hdbscan(raise_if_unavailable=True):
from hdbscan.plots import SingleLinkageTree
self.single_linkage_tree_obj = SingleLinkageTree(raw_tree)
return self.single_linkage_tree_obj
@single_linkage_tree_.setter
def single_linkage_tree_(self, new_val):
self.single_linkage_tree_obj = new_val
@property
def prediction_data_(self):
if not self.prediction_data:
raise ValueError(
'Train model with fit(prediction_data=True). or call '
'model.generate_prediction_data()')
if self.prediction_data_obj is None:
if has_hdbscan(raise_if_unavailable=True):
from sklearn.neighbors import KDTree, BallTree
from hdbscan.prediction import PredictionData
FAST_METRICS = KDTree.valid_metrics + \
BallTree.valid_metrics + ["cosine", "arccos"]
if self.metric in FAST_METRICS:
min_samples = self.min_samples or self.min_cluster_size
if self.metric in KDTree.valid_metrics:
tree_type = "kdtree"
elif self.metric in BallTree.valid_metrics:
tree_type = "balltree"
else:
warn("Metric {} not supported"
"for prediction data!".format(self.metric))
return
self.prediction_data_obj = PredictionData(
self.X_m.to_output("numpy"),
self.condensed_tree_,
min_samples,
tree_type=tree_type,
metric=self.metric)
return self.prediction_data_obj
@prediction_data_.setter
def prediction_data_(self, new_val):
self.prediction_data_obj = new_val
def build_minimum_spanning_tree(self, X):
if self.gen_min_span_tree and self.minimum_spanning_tree_ is None:
with cuml.using_output_type("numpy"):
raw_tree = np.column_stack((self.mst_src_,
self.mst_dst_,
self.mst_weights_))
raw_tree = raw_tree.astype(np.float64)
if has_hdbscan(raise_if_unavailable=True):
from hdbscan.plots import MinimumSpanningTree
self.minimum_spanning_tree_ = \
MinimumSpanningTree(raw_tree, X.to_output("numpy"))
return self.minimum_spanning_tree_
@enable_device_interop
def generate_prediction_data(self):
"""
Create data that caches intermediate results used for predicting
the label of new/unseen points. This data is only useful if you
are intending to use functions from hdbscan.prediction.
"""
if not self.fit_called_:
raise ValueError(
'The model is not trained yet (call fit() first).')
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t core_dists = self.core_dists.ptr
cdef PredictionData[int, float] *prediction_data = new PredictionData(
handle_[0], <int> self.n_rows, <int> self.n_cols,
<float*> core_dists)
self.prediction_data_ptr = <size_t>prediction_data
cdef uintptr_t condensed_tree_ptr = self.condensed_tree_ptr
cdef CondensedHierarchy[int, float] *condensed_tree = \
<CondensedHierarchy[int, float] *> condensed_tree_ptr
labels, _, _, _ = input_to_cuml_array(self.labels_,
order='C',
convert_to_dtype=np.int32)
cdef uintptr_t _labels_ptr = labels.ptr
cdef uintptr_t inverse_label_map_ptr = self.inverse_label_map.ptr
generate_prediction_data(handle_[0],
deref(condensed_tree),
<int*> _labels_ptr,
<int*> inverse_label_map_ptr,
<int> self.n_clusters_,
deref(prediction_data))
self.handle.sync()
self.prediction_data = True
def __dealloc__(self):
delete_hdbscan_output(self)
IF GPUBUILD == 1:
cdef CondensedHierarchy[int, float]* condensed_tree_ptr
if hasattr(self, "condensed_tree_ptr"):
condensed_tree_ptr = <CondensedHierarchy[int, float]*> \
<uintptr_t> self.condensed_tree_ptr
free(condensed_tree_ptr)
del self.condensed_tree_ptr
cdef PredictionData* prediction_data_ptr
if hasattr(self, "prediction_data_ptr"):
prediction_data_ptr = \
<PredictionData*> <uintptr_t> self.prediction_data_ptr
free(prediction_data_ptr)
del self.prediction_data_ptr
# this is only constructed when trying to gpu predict
# with a cpu model
if hasattr(self, "inverse_label_map_ptr"):
inverse_label_map_ptr = \
<device_uvector[int]*> <uintptr_t> self.inverse_label_map_ptr
free(inverse_label_map_ptr)
del self.inverse_label_map_ptr
def _construct_output_attributes(self):
IF GPUBUILD == 1:
cdef hdbscan_output *hdbscan_output_ = \
<hdbscan_output*><size_t>self.hdbscan_output_
self.n_clusters_ = hdbscan_output_.get_n_clusters()
if self.n_clusters_ > 0:
self.cluster_persistence_ = _cuml_array_from_ptr(
<size_t>hdbscan_output_.get_stabilities(),
hdbscan_output_.get_n_clusters() * sizeof(float),
(1, hdbscan_output_.get_n_clusters()), "float32", self)
else:
self.cluster_persistence_ = CumlArray.empty((0,), dtype="float32")
n_condensed_tree_edges = \
hdbscan_output_.get_condensed_tree().get_n_edges()
self.condensed_parent_ = _construct_condensed_tree_attribute(
<size_t>hdbscan_output_.get_condensed_tree().get_parents(),
n_condensed_tree_edges)
self.condensed_child_ = _construct_condensed_tree_attribute(
<size_t>hdbscan_output_.get_condensed_tree().get_children(),
n_condensed_tree_edges)
self.condensed_lambdas_ = \
_construct_condensed_tree_attribute(
<size_t>hdbscan_output_.get_condensed_tree().get_lambdas(),
n_condensed_tree_edges, "float32")
self.condensed_sizes_ = _construct_condensed_tree_attribute(
<size_t>hdbscan_output_.get_condensed_tree().get_sizes(),
n_condensed_tree_edges)
if self.n_clusters_ > 0:
self.inverse_label_map = _cuml_array_from_ptr(
<size_t>hdbscan_output_.get_inverse_label_map(),
self.n_clusters_ * sizeof(int),
(self.n_clusters_, ), "int32", self)
else:
self.inverse_label_map = CumlArray.empty((0,), dtype="int32")
@generate_docstring()
@enable_device_interop
def fit(self, X, y=None, convert_dtype=True) -> "HDBSCAN":
"""
Fit HDBSCAN model from features.
"""
X_m, n_rows, n_cols, self.dtype = \
input_to_cuml_array(X, order='C',
check_dtype=[np.float32],
convert_to_dtype=(np.float32
if convert_dtype
else None))
self.X_m = X_m
self.n_rows = n_rows
self.n_cols = n_cols
cdef uintptr_t _input_ptr = X_m.ptr
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
# Hardcode n_components_ to 1 for single linkage. This will
# not be the case for other linkage types.
self.n_connected_components_ = 1
self.n_leaves_ = n_rows
self.labels_ = CumlArray.empty(n_rows, dtype="int32", index=X_m.index)
self.children_ = CumlArray.empty((2, n_rows), dtype="int32")
self.probabilities_ = CumlArray.empty(n_rows, dtype="float32")
self.sizes_ = CumlArray.empty(n_rows, dtype="int32")
self.lambdas_ = CumlArray.empty(n_rows, dtype="float32")
self.mst_src_ = CumlArray.empty(n_rows-1, dtype="int32")
self.mst_dst_ = CumlArray.empty(n_rows-1, dtype="int32")
self.mst_weights_ = CumlArray.empty(n_rows-1, dtype="float32")
self.core_dists = CumlArray.empty(n_rows, dtype="float32")
cdef uintptr_t _labels_ptr = self.labels_.ptr
cdef uintptr_t _children_ptr = self.children_.ptr
cdef uintptr_t _sizes_ptr = self.sizes_.ptr
cdef uintptr_t _lambdas_ptr = self.lambdas_.ptr
cdef uintptr_t _probabilities_ptr = self.probabilities_.ptr
cdef uintptr_t mst_src_ptr = self.mst_src_.ptr
cdef uintptr_t mst_dst_ptr = self.mst_dst_.ptr
cdef uintptr_t mst_weights_ptr = self.mst_weights_.ptr
# If calling fit a second time, release
# any memory owned from previous trainings
delete_hdbscan_output(self)
cdef hdbscan_output *linkage_output = new hdbscan_output(
handle_[0], n_rows,
<int*>_labels_ptr,
<float*>_probabilities_ptr,
<int*>_children_ptr,
<int*>_sizes_ptr,
<float*>_lambdas_ptr,
<int*>mst_src_ptr,
<int*>mst_dst_ptr,
<float*>mst_weights_ptr)
self.hdbscan_output_ = <size_t>linkage_output
cdef HDBSCANParams params
params.min_samples = self.min_samples
# params.alpha = self.alpha
params.min_cluster_size = self.min_cluster_size
params.max_cluster_size = self.max_cluster_size
params.cluster_selection_epsilon = self.cluster_selection_epsilon
params.allow_single_cluster = self.allow_single_cluster
if self.cluster_selection_method == 'eom':
params.cluster_selection_method = CLUSTER_SELECTION_METHOD.EOM
elif self.cluster_selection_method == 'leaf':
params.cluster_selection_method = CLUSTER_SELECTION_METHOD.LEAF
else:
raise ValueError("Cluster selection method not supported. "
"Must one of {'eom', 'leaf'}")
cdef DistanceType metric
if self.metric in _metrics_mapping:
metric = _metrics_mapping[self.metric]
else:
raise ValueError(f"metric '{self.metric}' not supported, only "
"'l2' and 'euclidean' are currently "
"supported.")
cdef uintptr_t core_dists_ptr = self.core_dists.ptr
if self.connectivity == 'knn' or self.connectivity == 'pairwise':
hdbscan(handle_[0],
<float*>_input_ptr,
<int> n_rows,
<int> n_cols,
<DistanceType> metric,
params,
deref(linkage_output),
<float*> core_dists_ptr)
else:
raise ValueError("'connectivity' can only be one of "
"{'knn', 'pairwise'}")
self.fit_called_ = True
self.condensed_tree_ptr = \
<size_t> &linkage_output[0].get_condensed_tree()
self._construct_output_attributes()
if self.prediction_data:
self.generate_prediction_data()
self.handle.sync()
self.build_minimum_spanning_tree(X_m)
return self
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Cluster indexes',
'shape': '(n_samples, 1)'})
@enable_device_interop
def fit_predict(self, X, y=None) -> CumlArray:
"""
Fit the HDBSCAN model from features and return
cluster labels.
"""
return self.fit(X).labels_
def _extract_clusters(self, condensed_tree):
parents, _n_edges, _, _ = \
input_to_cuml_array(condensed_tree.to_numpy()['parent'],
order='C',
convert_to_dtype=np.int32)
children, _, _, _ = \
input_to_cuml_array(condensed_tree.to_numpy()['child'],
order='C',
convert_to_dtype=np.int32)
lambdas, _, _, _ = \
input_to_cuml_array(condensed_tree.to_numpy()['lambda_val'],
order='C',
convert_to_dtype=np.float32)
sizes, _, _, _ = \
input_to_cuml_array(condensed_tree.to_numpy()['child_size'],
order='C',
convert_to_dtype=np.int32)
n_leaves = int(condensed_tree.to_numpy()['parent'].min())
self.labels_test = CumlArray.empty(n_leaves, dtype="int32")
self.probabilities_test = CumlArray.empty(n_leaves, dtype="float32")
cdef uintptr_t _labels_ptr = self.labels_test.ptr
cdef uintptr_t _parents_ptr = parents.ptr
cdef uintptr_t _children_ptr = children.ptr
cdef uintptr_t _sizes_ptr = sizes.ptr
cdef uintptr_t _lambdas_ptr = lambdas.ptr
cdef uintptr_t _probabilities_ptr = self.probabilities_test.ptr
IF GPUBUILD == 1:
if self.cluster_selection_method == 'eom':
cluster_selection_method = CLUSTER_SELECTION_METHOD.EOM
elif self.cluster_selection_method == 'leaf':
cluster_selection_method = CLUSTER_SELECTION_METHOD.LEAF
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
_extract_clusters(handle_[0],
<size_t> n_leaves,
<int> _n_edges,
<int*> _parents_ptr,
<int*> _children_ptr,
<float*> _lambdas_ptr,
<int*> _sizes_ptr,
<int*> _labels_ptr,
<float*> _probabilities_ptr,
<CLUSTER_SELECTION_METHOD> cluster_selection_method,
<bool> self.allow_single_cluster,
<int> self.max_cluster_size,
<float> self.cluster_selection_epsilon)
def __getstate__(self):
state = self.__dict__.copy()
ptr_keys = []
for k in state.keys():
if "ptr" in k:
ptr_keys.append(k)
for k in ptr_keys:
del state[k]
return state
def __setstate__(self, state):
super(HDBSCAN, self).__init__(
handle=state["handle"],
verbose=state["verbose"]
)
if not state["fit_called_"]:
return
self.condensed_parent_ = state["condensed_parent_"]
self.condensed_child_ = state["condensed_child_"]
self.condensed_lambdas_ = state["condensed_lambdas_"]
self.condensed_sizes_ = state["condensed_sizes_"]
self.X_m = state["X_m"]
self.n_rows = state["n_rows"]
self.n_cols = state["n_cols"]
self.labels_ = state["labels_"]
self.core_dists = state["core_dists"]
self.inverse_label_map = state["inverse_label_map"]
self.n_clusters_ = state["n_clusters_"]
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t _parent_ptr = self.condensed_parent_.ptr
cdef uintptr_t _child_ptr = self.condensed_child_.ptr
cdef uintptr_t _lambdas_ptr = self.condensed_lambdas_.ptr
cdef uintptr_t _sizes_ptr = self.condensed_sizes_.ptr
cdef CondensedHierarchy[int, float] *condensed_tree = \
new CondensedHierarchy[int, float](
handle_[0], <size_t>self.n_rows,
<int>self.condensed_parent_.shape[0],
<int*> _parent_ptr, <int*> _child_ptr,
<float*> _lambdas_ptr, <int*> _sizes_ptr)
self.condensed_tree_ptr = <size_t> condensed_tree
cdef uintptr_t core_dists_ptr = self.core_dists.ptr
cdef PredictionData[int, float] *prediction_data = new PredictionData(
handle_[0], <int> self.n_rows, <int> self.n_cols,
<float*> core_dists_ptr)
self.prediction_data_ptr = <size_t>prediction_data
self.labels, _, _, _ = input_to_cuml_array(self.labels_.values['cuml'],
order='C',
convert_to_dtype=np.int32)
cdef uintptr_t _labels_ptr = self.labels.ptr
cdef uintptr_t inverse_label_map_ptr = self.inverse_label_map.ptr
generate_prediction_data(handle_[0],
deref(condensed_tree),
<int*> _labels_ptr,
<int*> inverse_label_map_ptr,
<int> self.n_clusters_,
deref(prediction_data))
self.handle.sync()
self.__dict__.update(state)
def _prep_cpu_to_gpu_prediction(self, convert_dtype=True):
"""
This is an internal function, to be called when HDBSCAN
is trained on CPU but GPU inference is desired.
"""
if not self.prediction_data:
raise ValueError("PredictionData not generated. "
"Please call clusterer.fit again with "
"prediction_data=True or call "
"clusterer.generate_prediction_data()")
if self._cpu_to_gpu_interop_prepped:
return
self.X_m, self.n_rows, self.n_cols, _ = \
input_to_cuml_array(self._cpu_model._raw_data, order='C',
check_dtype=[np.float32],
convert_to_dtype=(np.float32
if convert_dtype
else None))
self.condensed_parent_, _n_edges, _, _ = \
input_to_cuml_array(self.condensed_tree_.to_numpy()['parent'],
order='C',
convert_to_dtype=np.int32)
self.condensed_child_, _, _, _ = \
input_to_cuml_array(self.condensed_tree_.to_numpy()['child'],
order='C',
convert_to_dtype=np.int32)
self.condensed_lambdas_, _, _, _ = \
input_to_cuml_array(self.condensed_tree_.to_numpy()['lambda_val'],
order='C',
convert_to_dtype=np.float32)
self.condensed_sizes_, _, _, _ = \
input_to_cuml_array(self.condensed_tree_.to_numpy()['child_size'],
order='C',
convert_to_dtype=np.int32)
cdef uintptr_t _parent_ptr = self.condensed_parent_.ptr
cdef uintptr_t _child_ptr = self.condensed_child_.ptr
cdef uintptr_t _lambdas_ptr = self.condensed_lambdas_.ptr
cdef uintptr_t _sizes_ptr = self.condensed_sizes_.ptr
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef CondensedHierarchy[int, float] *condensed_tree = \
new CondensedHierarchy[int, float](
handle_[0], <size_t>self.n_rows, <int>_n_edges,
<int*> _parent_ptr, <int*> _child_ptr,
<float*> _lambdas_ptr, <int*> _sizes_ptr)
self.condensed_tree_ptr = <size_t> condensed_tree
self.core_dists = CumlArray.empty(self.n_rows, dtype="float32")
metric = _metrics_mapping[self.metric]
cdef uintptr_t X_ptr = self.X_m.ptr
cdef uintptr_t core_dists_ptr = self.core_dists.ptr
compute_core_dists(handle_[0],
<float*> X_ptr,
<float*> core_dists_ptr,
<size_t> self.n_rows,
<size_t> self.n_cols,
<DistanceType> metric,
<int> self.min_samples)
cdef device_uvector[int] *inverse_label_map = \
new device_uvector[int](0, handle_[0].get_stream())
cdef CLUSTER_SELECTION_METHOD cluster_selection_method
if self.cluster_selection_method == 'eom':
cluster_selection_method = CLUSTER_SELECTION_METHOD.EOM
elif self.cluster_selection_method == 'leaf':
cluster_selection_method = CLUSTER_SELECTION_METHOD.LEAF
compute_inverse_label_map(handle_[0],
deref(condensed_tree),
<size_t> self.n_rows,
<CLUSTER_SELECTION_METHOD>
cluster_selection_method,
deref(inverse_label_map),
<bool> self.allow_single_cluster,
<int> self.max_cluster_size,
<float> self.cluster_selection_epsilon)
self.n_clusters_ = <int> inverse_label_map[0].size()
self.inverse_label_map_ptr = <size_t> inverse_label_map[0].data()
self.inverse_label_map = \
_cuml_array_from_ptr(self.inverse_label_map_ptr,
self.n_clusters_ * sizeof(int),
(self.n_clusters_, ), "int32", self)
self.fit_called_ = True
self.generate_prediction_data()
self.handle.sync()
self._cpu_to_gpu_interop_prepped = True
def get_param_names(self):
return super().get_param_names() + [
"metric",
"min_cluster_size",
"max_cluster_size",
"min_samples",
"cluster_selection_epsilon",
"cluster_selection_method",
"p",
"allow_single_cluster",
"connectivity",
"alpha",
"gen_min_span_tree",
"prediction_data"
]
def get_attr_names(self):
attr_names = ['labels_', 'probabilities_', 'cluster_persistence_',
'condensed_tree_', 'single_linkage_tree_',
'outlier_scores_']
if self.gen_min_span_tree:
attr_names = attr_names + ['minimum_spanning_tree_']
if self.prediction_data:
attr_names = attr_names + ['prediction_data_']
return attr_names
| 0 |
rapidsai_public_repos/cuml/python/cuml/cluster | rapidsai_public_repos/cuml/python/cuml/cluster/hdbscan/prediction.pyx | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from libc.stdint cimport uintptr_t
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.array import CumlArray
from cuml.common import (
input_to_cuml_array,
input_to_host_array
)
from cuml.internals.device_type import DeviceType
from cuml.internals.import_utils import has_hdbscan
from cuml.internals import logger
import cuml
IF GPUBUILD == 1:
from cython.operator cimport dereference as deref
from pylibraft.common.handle cimport handle_t
from cuml.metrics.distance_type cimport DistanceType
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
cdef extern from "cuml/cluster/hdbscan.hpp" namespace "ML::HDBSCAN::Common":
cdef cppclass CondensedHierarchy[value_idx, value_t]:
CondensedHierarchy(
const handle_t &handle, size_t n_leaves)
value_idx *get_parents()
value_idx *get_children()
value_t *get_lambdas()
value_idx get_n_edges()
cdef cppclass hdbscan_output[int, float]:
hdbscan_output(const handle_t &handle,
int n_leaves,
int *labels,
float *probabilities,
int *children,
int *sizes,
float *deltas,
int *mst_src,
int *mst_dst,
float *mst_weights)
int get_n_leaves()
int get_n_clusters()
float *get_stabilities()
int *get_labels()
int *get_inverse_label_map()
float *get_core_dists()
CondensedHierarchy[int, float] &get_condensed_tree()
cdef cppclass PredictionData[int, float]:
PredictionData(const handle_t &handle,
int m,
int n,
float *core_dists)
size_t n_rows
size_t n_cols
cdef extern from "cuml/cluster/hdbscan.hpp" namespace "ML":
void compute_all_points_membership_vectors(
const handle_t &handle,
CondensedHierarchy[int, float] &condensed_tree,
PredictionData[int, float] &prediction_data_,
float* X,
DistanceType metric,
float* membership_vec,
size_t batch_size)
void compute_membership_vector(
const handle_t& handle,
CondensedHierarchy[int, float] &condensed_tree,
PredictionData[int, float] &prediction_data,
float* X,
float* points_to_predict,
size_t n_prediction_points,
int min_samples,
DistanceType metric,
float* membership_vec,
size_t batch_size)
void out_of_sample_predict(const handle_t &handle,
CondensedHierarchy[int, float] &condensed_tree,
PredictionData[int, float] &prediction_data,
float* X,
int* labels,
float* points_to_predict,
size_t n_prediction_points,
DistanceType metric,
int min_samples,
int* out_labels,
float* out_probabilities)
_metrics_mapping = {
'l1': DistanceType.L1,
'cityblock': DistanceType.L1,
'manhattan': DistanceType.L1,
'l2': DistanceType.L2SqrtExpanded,
'euclidean': DistanceType.L2SqrtExpanded,
'cosine': DistanceType.CosineExpanded
}
def all_points_membership_vectors(clusterer, batch_size=4096):
"""
Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
had ``prediction_data=True`` set.
batch_size : int, optional, default=min(4096, n_rows)
Lowers memory requirement by computing distance-based membership
in smaller batches of points in the training data. For example, a batch
size of 1,000 computes distance based memberships for 1,000 points at a
time. The default batch size is 4,096.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
"""
if batch_size <= 0:
raise ValueError("batch_size must be > 0")
device_type = cuml.global_settings.device_type
# cpu infer, cpu/gpu train
if device_type == DeviceType.host:
assert has_hdbscan(raise_if_unavailable=True)
from hdbscan.prediction import all_points_membership_vectors \
as cpu_all_points_membership_vectors
# trained on gpu
if not hasattr(clusterer, "_cpu_model"):
# the reference HDBSCAN implementations uses @property
# for attributes without setters available for them,
# so they can't be transferred from the GPU model
# to the CPU model
raise ValueError("Inferring on CPU is not supported yet when the "
"model has been trained on GPU")
# this took a long debugging session to figure out, but
# this method on cpu does not work without this copy for some reason
clusterer._cpu_model.prediction_data_.raw_data = \
clusterer._cpu_model.prediction_data_.raw_data.copy()
return cpu_all_points_membership_vectors(clusterer._cpu_model)
elif device_type == DeviceType.device:
# trained on cpu
if hasattr(clusterer, "_cpu_model"):
clusterer._prep_cpu_to_gpu_prediction()
if not clusterer.fit_called_:
raise ValueError("The clusterer is not fit on data. "
"Please call clusterer.fit first")
if not clusterer.prediction_data:
raise ValueError("PredictionData not generated. "
"Please call clusterer.fit again with "
"prediction_data=True or call "
"clusterer.generate_prediction_data()")
if clusterer.n_clusters_ == 0:
return np.zeros(clusterer.n_rows, dtype=np.float32)
cdef uintptr_t _input_ptr = clusterer.X_m.ptr
membership_vec = CumlArray.empty(
(clusterer.n_rows * clusterer.n_clusters_,),
dtype="float32")
cdef uintptr_t _membership_vec_ptr = membership_vec.ptr
IF GPUBUILD == 1:
cdef PredictionData *prediction_data_ = \
<PredictionData*><size_t>clusterer.prediction_data_ptr
cdef CondensedHierarchy[int, float] *condensed_tree = \
<CondensedHierarchy[int, float]*><size_t> clusterer.condensed_tree_ptr
cdef handle_t* handle_ = <handle_t*><size_t>clusterer.handle.getHandle()
compute_all_points_membership_vectors(handle_[0],
deref(condensed_tree),
deref(prediction_data_),
<float*> _input_ptr,
_metrics_mapping[clusterer.metric],
<float*> _membership_vec_ptr,
batch_size)
clusterer.handle.sync()
return membership_vec.to_output(
output_type="numpy",
output_dtype="float32").reshape((clusterer.n_rows,
clusterer.n_clusters_))
def membership_vector(clusterer, points_to_predict, batch_size=4096, convert_dtype=True):
"""
Predict soft cluster membership. The result produces a vector
for each point in ``points_to_predict`` that gives a probability that
the given point is a member of a cluster for each of the selected clusters
of the ``clusterer``.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
batch_size : int, optional, default=min(4096, n_points_to_predict)
Lowers memory requirement by computing distance-based membership
in smaller batches of points in the prediction data. For example, a
batch size of 1,000 computes distance based memberships for 1,000
points at a time. The default batch size is 4,096.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` is a member of cluster ``j`` is
in ``membership_vectors[i, j]``.
"""
device_type = cuml.global_settings.device_type
# cpu infer, cpu/gpu train
if device_type == DeviceType.host:
assert has_hdbscan(raise_if_unavailable=True)
from hdbscan.prediction import membership_vector \
as cpu_membership_vector
# trained on gpu
if not hasattr(clusterer, "_cpu_model"):
# the reference HDBSCAN implementations uses @property
# for attributes without setters available for them,
# so they can't be transferred from the GPU model
# to the CPU model
raise ValueError("Inferring on CPU is not supported yet when the "
"model has been trained on GPU")
host_points_to_predict = input_to_host_array(points_to_predict).array
return cpu_membership_vector(clusterer._cpu_model,
host_points_to_predict)
elif device_type == DeviceType.device:
# trained on cpu
if hasattr(clusterer, "_cpu_model"):
clusterer._prep_cpu_to_gpu_prediction()
if not clusterer.fit_called_:
raise ValueError("The clusterer is not fit on data. "
"Please call clusterer.fit first")
if not clusterer.prediction_data:
raise ValueError("PredictionData not generated. "
"Please call clusterer.fit again with "
"prediction_data=True")
if batch_size <= 0:
raise ValueError("batch_size must be > 0")
_points_to_predict_m, n_prediction_points, n_cols, _ = \
input_to_cuml_array(points_to_predict, order='C',
check_dtype=[np.float32],
convert_to_dtype=(np.float32
if convert_dtype
else None))
if clusterer.n_clusters_ == 0:
return np.zeros(n_prediction_points, dtype=np.float32)
if n_cols != clusterer.n_cols:
raise ValueError('New points dimension does not match fit data!')
IF GPUBUILD == 1:
cdef uintptr_t _prediction_ptr = _points_to_predict_m.ptr
cdef uintptr_t _input_ptr = clusterer.X_m.ptr
membership_vec = CumlArray.empty(
(n_prediction_points * clusterer.n_clusters_,),
dtype="float32")
cdef uintptr_t _membership_vec_ptr = membership_vec.ptr
cdef CondensedHierarchy[int, float] *condensed_tree = \
<CondensedHierarchy[int, float]*><size_t> clusterer.condensed_tree_ptr
cdef PredictionData *prediction_data_ = \
<PredictionData*><size_t>clusterer.prediction_data_ptr
cdef handle_t* handle_ = <handle_t*><size_t>clusterer.handle.getHandle()
compute_membership_vector(handle_[0],
deref(condensed_tree),
deref(prediction_data_),
<float*> _input_ptr,
<float*> _prediction_ptr,
n_prediction_points,
clusterer.min_samples,
_metrics_mapping[clusterer.metric],
<float*> _membership_vec_ptr,
batch_size)
clusterer.handle.sync()
return membership_vec.to_output(
output_type="numpy",
output_dtype="float32").reshape((n_prediction_points,
clusterer.n_clusters_))
def approximate_predict(clusterer, points_to_predict, convert_dtype=True):
"""Predict the cluster label of new points. The returned labels
will be those of the original clustering found by ``clusterer``,
and therefore are not (necessarily) the cluster labels that would
be found by clustering the original data combined with
``points_to_predict``, hence the 'approximate' label.
If you simply wish to assign new points to an existing clustering
in the 'best' way possible, this is the function to use. If you
want to predict how ``points_to_predict`` would cluster with
the original data under HDBSCAN the most efficient existing approach
is to simply recluster with the new point(s) added to the original dataset.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
had ``prediction_data=True`` set.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
labels : array (n_samples,)
The predicted labels of the ``points_to_predict``
probabilities : array (n_samples,)
The soft cluster scores for each of the ``points_to_predict``
"""
device_type = cuml.global_settings.device_type
# cpu infer, cpu/gpu train
if device_type == DeviceType.host:
assert has_hdbscan(raise_if_unavailable=True)
from hdbscan.prediction import approximate_predict \
as cpu_approximate_predict
# trained on gpu
if not hasattr(clusterer, "_cpu_model"):
# the reference HDBSCAN implementations uses @property
# for attributes without setters available for them,
# so they can't be transferred from the GPU model
# to the CPU model
raise ValueError("Inferring on CPU is not supported yet when the "
"model has been trained on GPU")
host_points_to_predict = input_to_host_array(points_to_predict).array
return cpu_approximate_predict(clusterer._cpu_model,
host_points_to_predict)
elif device_type == DeviceType.device:
# trained on cpu
if hasattr(clusterer, "_cpu_model"):
clusterer._prep_cpu_to_gpu_prediction()
if not clusterer.fit_called_:
raise ValueError("The clusterer is not fit on data. "
"Please call clusterer.fit first")
if not clusterer.prediction_data:
raise ValueError("PredictionData not generated. "
"Please call clusterer.fit again with "
"prediction_data=True")
if clusterer.n_clusters_ == 0:
logger.warn(
'Clusterer does not have any defined clusters, new data '
'will be automatically predicted as outliers.'
)
_points_to_predict_m, n_prediction_points, n_cols, _ = \
input_to_cuml_array(points_to_predict, order='C',
check_dtype=[np.float32],
convert_to_dtype=(np.float32
if convert_dtype
else None))
if n_cols != clusterer.n_cols:
raise ValueError('New points dimension does not match fit data!')
cdef uintptr_t _prediction_ptr = _points_to_predict_m.ptr
cdef uintptr_t _input_ptr = clusterer.X_m.ptr
prediction_labels = CumlArray.empty(
(n_prediction_points,),
dtype="int32")
cdef uintptr_t _prediction_labels_ptr = prediction_labels.ptr
prediction_probs = CumlArray.empty(
(n_prediction_points,),
dtype="float32")
cdef uintptr_t _prediction_probs_ptr = prediction_probs.ptr
labels, _, _, _ = input_to_cuml_array(clusterer.labels_,
order="C",
convert_to_dtype=np.int32)
cdef uintptr_t _labels_ptr = labels.ptr
IF GPUBUILD == 1:
cdef CondensedHierarchy[int, float] *condensed_tree = \
<CondensedHierarchy[int, float]*><size_t> clusterer.condensed_tree_ptr
cdef PredictionData *prediction_data_ = \
<PredictionData*><size_t>clusterer.prediction_data_ptr
cdef handle_t* handle_ = <handle_t*><size_t>clusterer.handle.getHandle()
out_of_sample_predict(handle_[0],
deref(condensed_tree),
deref(prediction_data_),
<float*> _input_ptr,
<int*> _labels_ptr,
<float*> _prediction_ptr,
n_prediction_points,
_metrics_mapping[clusterer.metric],
clusterer.min_samples,
<int*> _prediction_labels_ptr,
<float*> _prediction_probs_ptr)
clusterer.handle.sync()
return prediction_labels.to_output(output_type="numpy"), \
prediction_probs.to_output(output_type="numpy", output_dtype="float32")
| 0 |
rapidsai_public_repos/cuml/python/cuml/cluster | rapidsai_public_repos/cuml/python/cuml/cluster/hdbscan/__init__.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.cluster.hdbscan.hdbscan import HDBSCAN
from cuml.cluster.hdbscan.hdbscan import condense_hierarchy
from cuml.cluster.hdbscan.prediction import all_points_membership_vectors
from cuml.cluster.hdbscan.prediction import membership_vector
from cuml.cluster.hdbscan.prediction import approximate_predict
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/manifold/umap_utils.pxd | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from rmm._lib.memory_resource cimport DeviceMemoryResource
from rmm._lib.cuda_stream_view cimport cuda_stream_view
from libcpp.memory cimport unique_ptr
from libc.stdint cimport uint64_t, uintptr_t, int64_t
from libcpp cimport bool
from libcpp.memory cimport shared_ptr
from cuml.metrics.distance_type cimport DistanceType
cdef extern from "cuml/manifold/umapparams.h" namespace "ML::UMAPParams":
enum MetricType:
EUCLIDEAN = 0,
CATEGORICAL = 1
cdef extern from "cuml/common/callback.hpp" namespace "ML::Internals":
cdef cppclass GraphBasedDimRedCallback
cdef extern from "cuml/manifold/umapparams.h" namespace "ML":
cdef cppclass UMAPParams:
int n_neighbors,
int n_components,
int n_epochs,
float learning_rate,
float min_dist,
float spread,
float set_op_mix_ratio,
float local_connectivity,
float repulsion_strength,
int negative_sample_rate,
float transform_queue_size,
int verbosity,
float a,
float b,
float initial_alpha,
int init,
int target_n_neighbors,
MetricType target_metric,
float target_weight,
uint64_t random_state,
bool deterministic,
DistanceType metric,
float p,
GraphBasedDimRedCallback * callback
cdef extern from "raft/sparse/coo.hpp":
cdef cppclass COO "raft::sparse::COO<float, int>":
COO(cuda_stream_view stream)
void allocate(int nnz, int size, bool init, cuda_stream_view stream)
int nnz
float* vals()
int* rows()
int* cols()
cdef class GraphHolder:
cdef unique_ptr[COO] c_graph
cdef DeviceMemoryResource mr
@staticmethod
cdef GraphHolder new_graph(cuda_stream_view stream)
@staticmethod
cdef GraphHolder from_ptr(unique_ptr[COO]& ptr)
@staticmethod
cdef GraphHolder from_coo_array(graph, handle, coo_array)
cdef COO* get(GraphHolder self)
cdef uintptr_t vals(GraphHolder self)
cdef uintptr_t rows(GraphHolder self)
cdef uintptr_t cols(GraphHolder self)
cdef uint64_t get_nnz(GraphHolder self)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/manifold/umap_utils.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from rmm._lib.memory_resource cimport get_current_device_resource
from pylibraft.common.handle cimport handle_t
from cuml.manifold.umap_utils cimport *
from cuml.metrics.distance_type cimport DistanceType
from libcpp.utility cimport move
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
cdef class GraphHolder:
@staticmethod
cdef GraphHolder new_graph(cuda_stream_view stream):
cdef GraphHolder graph = GraphHolder.__new__(GraphHolder)
graph.c_graph.reset(new COO(stream))
graph.mr = get_current_device_resource()
return graph
@staticmethod
cdef GraphHolder from_ptr(unique_ptr[COO]& ptr):
cdef GraphHolder graph = GraphHolder.__new__(GraphHolder)
graph.c_graph = move(ptr)
return graph
@staticmethod
cdef GraphHolder from_coo_array(GraphHolder graph, handle, coo_array):
def copy_from_array(dst_raft_coo_ptr, src_cp_coo):
size = src_cp_coo.size
itemsize = np.dtype(src_cp_coo.dtype).itemsize
dest_buff = cp.cuda.UnownedMemory(ptr=dst_raft_coo_ptr,
size=size * itemsize,
owner=None,
device_id=-1)
dest_mptr = cp.cuda.memory.MemoryPointer(dest_buff, 0)
src_buff = cp.cuda.UnownedMemory(ptr=src_cp_coo.data.ptr,
size=size * itemsize,
owner=None,
device_id=-1)
src_mptr = cp.cuda.memory.MemoryPointer(src_buff, 0)
dest_mptr.copy_from_device(src_mptr, size * itemsize)
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
graph.c_graph.reset(new COO(handle_.get_stream()))
graph.get().allocate(coo_array.nnz,
coo_array.shape[0],
False,
handle_.get_stream())
handle_.sync_stream()
copy_from_array(graph.vals(), coo_array.data.astype('float32'))
copy_from_array(graph.rows(), coo_array.row.astype('int32'))
copy_from_array(graph.cols(), coo_array.col.astype('int32'))
graph.mr = get_current_device_resource()
return graph
cdef inline COO* get(self):
return self.c_graph.get()
cdef uintptr_t vals(self):
return <uintptr_t>self.get().vals()
cdef uintptr_t rows(self):
return <uintptr_t>self.get().rows()
cdef uintptr_t cols(self):
return <uintptr_t>self.get().cols()
cdef uint64_t get_nnz(self):
return self.get().nnz
def get_cupy_coo(self):
def create_nonowning_cp_array(ptr, dtype):
mem = cp.cuda.UnownedMemory(ptr=ptr,
size=(self.get_nnz() *
np.dtype(dtype).itemsize),
owner=self,
device_id=-1)
memptr = cp.cuda.memory.MemoryPointer(mem, 0)
return cp.ndarray(self.get_nnz(), dtype=dtype, memptr=memptr)
vals = create_nonowning_cp_array(self.vals(), np.float32)
rows = create_nonowning_cp_array(self.rows(), np.int32)
cols = create_nonowning_cp_array(self.cols(), np.int32)
return cp.sparse.coo_matrix(((vals, (rows, cols))))
def __dealloc__(self):
self.c_graph.reset(NULL)
def find_ab_params(spread, min_dist):
""" Function taken from UMAP-learn : https://github.com/lmcinnes/umap
Fit a, b params for the differentiable curve used in lower
dimensional fuzzy simplicial complex construction. We want the
smooth curve (from a pre-defined family with simple gradient) that
best matches an offset exponential decay.
"""
def curve(x, a, b):
return 1.0 / (1.0 + a * x ** (2 * b))
from cuml.internals.import_utils import has_scipy
if has_scipy():
from scipy.optimize import curve_fit
else:
raise RuntimeError('Scipy is needed to run find_ab_params')
xv = np.linspace(0, spread * 3, 300)
yv = np.zeros(xv.shape)
yv[xv < min_dist] = 1.0
yv[xv >= min_dist] = np.exp(-(xv[xv >= min_dist] - min_dist) / spread)
params, _ = curve_fit(curve, xv, yv)
return params[0], params[1]
metric_parsing = {
"l2": DistanceType.L2SqrtUnexpanded,
"euclidean": DistanceType.L2SqrtUnexpanded,
"sqeuclidean": DistanceType.L2Unexpanded,
"cityblock": DistanceType.L1,
"l1": DistanceType.L1,
"manhattan": DistanceType.L1,
"taxicab": DistanceType.L1,
"minkowski": DistanceType.LpUnexpanded,
"chebyshev": DistanceType.Linf,
"linf": DistanceType.Linf,
"cosine": DistanceType.CosineExpanded,
"correlation": DistanceType.CorrelationExpanded,
"hellinger": DistanceType.HellingerExpanded,
"hamming": DistanceType.HammingUnexpanded,
"jaccard": DistanceType.JaccardExpanded,
"canberra": DistanceType.Canberra
}
DENSE_SUPPORTED_METRICS = [
DistanceType.Canberra,
DistanceType.CorrelationExpanded,
DistanceType.CosineExpanded,
DistanceType.HammingUnexpanded,
DistanceType.HellingerExpanded,
# DistanceType.JaccardExpanded, # not supported
DistanceType.L1,
DistanceType.L2SqrtUnexpanded,
DistanceType.L2Unexpanded,
DistanceType.Linf,
DistanceType.LpUnexpanded,
]
SPARSE_SUPPORTED_METRICS = [
DistanceType.Canberra,
DistanceType.CorrelationExpanded,
DistanceType.CosineExpanded,
DistanceType.HammingUnexpanded,
DistanceType.HellingerExpanded,
DistanceType.JaccardExpanded,
DistanceType.L1,
DistanceType.L2SqrtUnexpanded,
DistanceType.L2Unexpanded,
DistanceType.Linf,
DistanceType.LpUnexpanded,
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/manifold/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("simpl_set.pyx" ${simpl_set_algo} ${manifold_algo})
add_module_gpu_default("t_sne.pyx" ${t_sne_algo} ${manifold_algo})
add_module_gpu_default("umap.pyx" ${umap_algo} ${manifold_algo})
add_module_gpu_default("umap_utils.pyx")
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
MODULE_PREFIX manifold_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/manifold/umap.pyx | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
pd = cpu_only_import('pandas')
import joblib
from cuml.internals.safe_imports import gpu_only_import
cupy = gpu_only_import('cupy')
cupyx = gpu_only_import('cupyx')
from cuml.common.sparsefuncs import extract_knn_infos
from cuml.internals.safe_imports import gpu_only_import_from
cp_csr_matrix = gpu_only_import_from('cupyx.scipy.sparse', 'csr_matrix')
cp_coo_matrix = gpu_only_import_from('cupyx.scipy.sparse', 'coo_matrix')
cp_csc_matrix = gpu_only_import_from('cupyx.scipy.sparse', 'csc_matrix')
import cuml.internals
from cuml.internals.base import UniversalBase
from cuml.common.doc_utils import generate_docstring
from cuml.internals import logger
from cuml.internals.available_devices import is_cuda_available
from cuml.internals.input_utils import input_to_cuml_array
from cuml.internals.array import CumlArray
from cuml.internals.array_sparse import SparseCumlArray
from cuml.internals.mixins import CMajorInputTagMixin
from cuml.common.sparse_utils import is_sparse
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
rmm = gpu_only_import('rmm')
from libc.stdint cimport uintptr_t
if is_cuda_available():
from cuml.manifold.simpl_set import fuzzy_simplicial_set # no-cython-lint
from cuml.manifold.simpl_set import simplicial_set_embedding # no-cython-lint
# TODO: These two symbols are considered part of the public API of this module
# which is why imports should not be removed. The no-cython-lint markers can be
# replaced with an explicit __all__ specifications once
# https://github.com/MarcoGorelli/cython-lint/issues/80 is resolved.
else:
# if no GPU is present, we import the UMAP equivalents
from umap.umap_ import fuzzy_simplicial_set # no-cython-lint
from umap.umap_ import simplicial_set_embedding # no-cython-lint
IF GPUBUILD == 1:
from libc.stdlib cimport free
from cuml.manifold.umap_utils cimport *
from pylibraft.common.handle cimport handle_t
from cuml.manifold.umap_utils import GraphHolder, find_ab_params, \
metric_parsing, DENSE_SUPPORTED_METRICS, SPARSE_SUPPORTED_METRICS
from cuml.manifold.simpl_set import fuzzy_simplicial_set, \
simplicial_set_embedding
cdef extern from "cuml/manifold/umap.hpp" namespace "ML::UMAP":
void fit(handle_t & handle,
float * X,
float * y,
int n,
int d,
int64_t * knn_indices,
float * knn_dists,
UMAPParams * params,
float * embeddings,
COO * graph) except +
void fit_sparse(handle_t &handle,
int *indptr,
int *indices,
float *data,
size_t nnz,
float *y,
int n,
int d,
int * knn_indices,
float * knn_dists,
UMAPParams *params,
float *embeddings,
COO * graph) except +
void transform(handle_t & handle,
float * X,
int n,
int d,
float * orig_X,
int orig_n,
float * embedding,
int embedding_n,
UMAPParams * params,
float * out) except +
void transform_sparse(handle_t &handle,
int *indptr,
int *indices,
float *data,
size_t nnz,
int n,
int d,
int *orig_x_indptr,
int *orig_x_indices,
float *orig_x_data,
size_t orig_nnz,
int orig_n,
float *embedding,
int embedding_n,
UMAPParams *params,
float *transformed) except +
class UMAP(UniversalBase,
CMajorInputTagMixin):
"""
Uniform Manifold Approximation and Projection
Finds a low dimensional embedding of the data that approximates
an underlying manifold.
Adapted from https://github.com/lmcinnes/umap/blob/master/umap/umap_.py
The UMAP algorithm is outlined in [1]. This implementation follows the
GPU-accelerated version as described in [2].
Parameters
----------
n_neighbors: float (optional, default 15)
The size of local neighborhood (in terms of number of neighboring
sample points) used for manifold approximation. Larger values
result in more global views of the manifold, while smaller
values result in more local data being preserved. In general
values should be in the range 2 to 100.
n_components: int (optional, default 2)
The dimension of the space to embed into. This defaults to 2 to
provide easy visualization, but can reasonably be set to any
metric: string (default='euclidean').
Distance metric to use. Supported distances are ['l1, 'cityblock',
'taxicab', 'manhattan', 'euclidean', 'l2', 'sqeuclidean', 'canberra',
'minkowski', 'chebyshev', 'linf', 'cosine', 'correlation', 'hellinger',
'hamming', 'jaccard']
Metrics that take arguments (such as minkowski) can have arguments
passed via the metric_kwds dictionary.
Note: The 'jaccard' distance metric is only supported for sparse
inputs.
metric_kwds: dict (optional, default=None)
Metric argument
n_epochs: int (optional, default None)
The number of training epochs to be used in optimizing the
low dimensional embedding. Larger values result in more accurate
embeddings. If None is specified a value will be selected based on
the size of the input dataset (200 for large datasets, 500 for small).
learning_rate: float (optional, default 1.0)
The initial learning rate for the embedding optimization.
init: string (optional, default 'spectral')
How to initialize the low dimensional embedding. Options are:
* 'spectral': use a spectral embedding of the fuzzy 1-skeleton
* 'random': assign initial embedding positions at random.
min_dist: float (optional, default 0.1)
The effective minimum distance between embedded points. Smaller values
will result in a more clustered/clumped embedding where nearby points
on the manifold are drawn closer together, while larger values will
result on a more even dispersal of points. The value should be set
relative to the ``spread`` value, which determines the scale at which
embedded points will be spread out.
spread: float (optional, default 1.0)
The effective scale of embedded points. In combination with
``min_dist`` this determines how clustered/clumped the embedded
points are.
set_op_mix_ratio: float (optional, default 1.0)
Interpolate between (fuzzy) union and intersection as the set operation
used to combine local fuzzy simplicial sets to obtain a global fuzzy
simplicial sets. Both fuzzy set operations use the product t-norm.
The value of this parameter should be between 0.0 and 1.0; a value of
1.0 will use a pure fuzzy union, while 0.0 will use a pure fuzzy
intersection.
local_connectivity: int (optional, default 1)
The local connectivity required -- i.e. the number of nearest
neighbors that should be assumed to be connected at a local level.
The higher this value the more connected the manifold becomes
locally. In practice this should be not more than the local intrinsic
dimension of the manifold.
repulsion_strength: float (optional, default 1.0)
Weighting applied to negative samples in low dimensional embedding
optimization. Values higher than one will result in greater weight
being given to negative samples.
negative_sample_rate: int (optional, default 5)
The number of negative samples to select per positive sample
in the optimization process. Increasing this value will result
in greater repulsive force being applied, greater optimization
cost, but slightly more accuracy.
transform_queue_size: float (optional, default 4.0)
For transform operations (embedding new points using a trained model
this will control how aggressively to search for nearest neighbors.
Larger values will result in slower performance but more accurate
nearest neighbor evaluation.
a: float (optional, default None)
More specific parameters controlling the embedding. If None these
values are set automatically as determined by ``min_dist`` and
``spread``.
b: float (optional, default None)
More specific parameters controlling the embedding. If None these
values are set automatically as determined by ``min_dist`` and
``spread``.
hash_input: bool, optional (default = False)
UMAP can hash the training input so that exact embeddings
are returned when transform is called on the same data upon
which the model was trained. This enables consistent
behavior between calling ``model.fit_transform(X)`` and
calling ``model.fit(X).transform(X)``. Not that the CPU-based
UMAP reference implementation does this by default. This
feature is made optional in the GPU version due to the
significant overhead in copying memory to the host for
computing the hash.
precomputed_knn : array / sparse array / tuple, optional (device or host)
Either one of a tuple (indices, distances) of
arrays of shape (n_samples, n_neighbors), a pairwise distances
dense array of shape (n_samples, n_samples) or a KNN graph
sparse array (preferably CSR/COO). This feature allows
the precomputation of the KNN outside of UMAP
and also allows the use of a custom distance function. This function
should match the metric used to train the UMAP embeedings.
random_state : int, RandomState instance or None, optional (default=None)
random_state is the seed used by the random number generator during
embedding initialization and during sampling used by the optimizer.
Note: Unfortunately, achieving a high amount of parallelism during
the optimization stage often comes at the expense of determinism,
since many floating-point additions are being made in parallel
without a deterministic ordering. This causes slightly different
results across training sessions, even when the same seed is used
for random number generation. Setting a random_state will enable
consistency of trained embeddings, allowing for reproducible results
to 3 digits of precision, but will do so at the expense of potentially
slower training and increased memory usage.
callback: An instance of GraphBasedDimRedCallback class
Used to intercept the internal state of embeddings while they are being
trained. Example of callback usage:
.. code-block:: python
from cuml.internals import GraphBasedDimRedCallback
class CustomCallback(GraphBasedDimRedCallback):
def on_preprocess_end(self, embeddings):
print(embeddings.copy_to_host())
def on_epoch_end(self, embeddings):
print(embeddings.copy_to_host())
def on_train_end(self, embeddings):
print(embeddings.copy_to_host())
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Notes
-----
This module is heavily based on Leland McInnes' reference UMAP package.
However, there are a number of differences and features that are not yet
implemented in `cuml.umap`:
* Using a pre-computed pairwise distance matrix (under consideration
for future releases)
* Manual initialization of initial embedding positions
In addition to these missing features, you should expect to see
the final embeddings differing between cuml.umap and the reference
UMAP. In particular, the reference UMAP uses an approximate kNN
algorithm for large data sizes while cuml.umap always uses exact
kNN.
References
----------
.. [1] `Leland McInnes, John Healy, James Melville
UMAP: Uniform Manifold Approximation and Projection for Dimension
Reduction <https://arxiv.org/abs/1802.03426>`_
.. [2] `Corey Nolet, Victor Lafargue, Edward Raff, Thejaswi Nanditale,
Tim Oates, John Zedlewski, Joshua Patterson
Bringing UMAP Closer to the Speed of Light with GPU Acceleration
<https://arxiv.org/abs/2008.00325>`_
"""
_cpu_estimator_import_path = 'umap.UMAP'
embedding_ = CumlArrayDescriptor(order='C')
@device_interop_preparation
def __init__(self, *,
n_neighbors=15,
n_components=2,
metric="euclidean",
metric_kwds=None,
n_epochs=None,
learning_rate=1.0,
min_dist=0.1,
spread=1.0,
set_op_mix_ratio=1.0,
local_connectivity=1.0,
repulsion_strength=1.0,
negative_sample_rate=5,
transform_queue_size=4.0,
init="spectral",
a=None,
b=None,
target_n_neighbors=-1,
target_weight=0.5,
target_metric="categorical",
hash_input=False,
random_state=None,
precomputed_knn=None,
callback=None,
handle=None,
verbose=False,
output_type=None):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.hash_input = hash_input
self.n_neighbors = n_neighbors
self.n_components = n_components
self.metric = metric
self.metric_kwds = metric_kwds
self.n_epochs = n_epochs
if init == "spectral" or init == "random":
self.init = init
else:
raise Exception("Initialization strategy not supported: %d" % init)
if a is None or b is None:
a, b = type(self).find_ab_params(spread, min_dist)
self.a = a
self.b = b
self.learning_rate = learning_rate
self.min_dist = min_dist
self.spread = spread
self.set_op_mix_ratio = set_op_mix_ratio
self.local_connectivity = local_connectivity
self.repulsion_strength = repulsion_strength
self.negative_sample_rate = negative_sample_rate
self.transform_queue_size = transform_queue_size
self.target_n_neighbors = target_n_neighbors
self.target_weight = target_weight
self.deterministic = random_state is not None
# Check to see if we are already a random_state (type==np.uint64).
# Reuse this if already passed (can happen from get_params() of another
# instance)
if isinstance(random_state, np.uint64):
self.random_state = random_state
else:
# Otherwise create a RandomState instance to generate a new
# np.uint64
if isinstance(random_state, np.random.RandomState):
rs = random_state
else:
rs = np.random.RandomState(random_state)
self.random_state = rs.randint(low=0,
high=np.iinfo(np.uint32).max,
dtype=np.uint32)
if target_metric == "euclidean" or target_metric == "categorical":
self.target_metric = target_metric
else:
raise Exception("Invalid target metric: {}" % target_metric)
self.callback = callback # prevent callback destruction
self.embedding_ = None
self.validate_hyperparams()
self.sparse_fit = False
self._input_hash = None
self._small_data = False
self.precomputed_knn = extract_knn_infos(precomputed_knn,
n_neighbors)
def validate_hyperparams(self):
if self.min_dist > self.spread:
raise ValueError("min_dist should be <= spread")
@staticmethod
def _build_umap_params(cls, sparse):
IF GPUBUILD == 1:
cdef UMAPParams* umap_params = new UMAPParams()
umap_params.n_neighbors = <int> cls.n_neighbors
umap_params.n_components = <int> cls.n_components
umap_params.n_epochs = <int> cls.n_epochs if cls.n_epochs else 0
umap_params.learning_rate = <float> cls.learning_rate
umap_params.min_dist = <float> cls.min_dist
umap_params.spread = <float> cls.spread
umap_params.set_op_mix_ratio = <float> cls.set_op_mix_ratio
umap_params.local_connectivity = <float> cls.local_connectivity
umap_params.repulsion_strength = <float> cls.repulsion_strength
umap_params.negative_sample_rate = <int> cls.negative_sample_rate
umap_params.transform_queue_size = <int> cls.transform_queue_size
umap_params.verbosity = <int> cls.verbose
umap_params.a = <float> cls.a
umap_params.b = <float> cls.b
if cls.init == "spectral":
umap_params.init = <int> 1
else: # self.init == "random"
umap_params.init = <int> 0
umap_params.target_n_neighbors = <int> cls.target_n_neighbors
if cls.target_metric == "euclidean":
umap_params.target_metric = MetricType.EUCLIDEAN
else: # self.target_metric == "categorical"
umap_params.target_metric = MetricType.CATEGORICAL
umap_params.target_weight = <float> cls.target_weight
umap_params.random_state = <uint64_t> cls.random_state
umap_params.deterministic = <bool> cls.deterministic
try:
umap_params.metric = metric_parsing[cls.metric.lower()]
if sparse:
if umap_params.metric not in SPARSE_SUPPORTED_METRICS:
raise NotImplementedError(f"Metric '{cls.metric}' not supported for sparse inputs.")
elif umap_params.metric not in DENSE_SUPPORTED_METRICS:
raise NotImplementedError(f"Metric '{cls.metric}' not supported for dense inputs.")
except KeyError:
raise ValueError(f"Invalid value for metric: {cls.metric}")
if cls.metric_kwds is None:
umap_params.p = <float> 2.0
else:
umap_params.p = <float>cls.metric_kwds.get('p')
cdef uintptr_t callback_ptr = 0
if cls.callback:
callback_ptr = cls.callback.get_native_callback()
umap_params.callback = <GraphBasedDimRedCallback*>callback_ptr
return <size_t>umap_params
@staticmethod
def _destroy_umap_params(ptr):
IF GPUBUILD == 1:
cdef UMAPParams* umap_params = <UMAPParams*> <size_t> ptr
free(umap_params)
@staticmethod
def find_ab_params(spread, min_dist):
IF GPUBUILD == 1:
return find_ab_params(spread, min_dist)
@generate_docstring(convert_dtype_cast='np.float32',
X='dense_sparse',
skip_parameters_heading=True)
@enable_device_interop
def fit(self, X, y=None, convert_dtype=True,
knn_graph=None) -> "UMAP":
"""
Fit X into an embedded space.
Parameters
----------
knn_graph : array / sparse array / tuple, optional (device or host)
Either one of a tuple (indices, distances) of
arrays of shape (n_samples, n_neighbors), a pairwise distances
dense array of shape (n_samples, n_samples) or a KNN graph
sparse array (preferably CSR/COO). This feature allows
the precomputation of the KNN outside of UMAP
and also allows the use of a custom distance function. This function
should match the metric used to train the UMAP embeedings.
Takes precedence over the precomputed_knn parameter.
"""
if len(X.shape) != 2:
raise ValueError("data should be two dimensional")
if y is not None and knn_graph is not None\
and self.target_metric != "categorical":
raise ValueError("Cannot provide a KNN graph when in \
semi-supervised mode with categorical target_metric for now.")
# Handle sparse inputs
if is_sparse(X):
self._raw_data = SparseCumlArray(X, convert_to_dtype=cupy.float32,
convert_format=False)
self.n_rows, self.n_dims = self._raw_data.shape
self.sparse_fit = True
# Handle dense inputs
else:
self._raw_data, self.n_rows, self.n_dims, _ = \
input_to_cuml_array(X, order='C', check_dtype=np.float32,
convert_to_dtype=(np.float32
if convert_dtype
else None))
if self.n_rows <= 1:
raise ValueError("There needs to be more than 1 sample to "
"build nearest the neighbors graph")
cdef uintptr_t _knn_dists_ptr = 0
cdef uintptr_t _knn_indices_ptr = 0
if knn_graph is not None or self.precomputed_knn is not None:
if knn_graph is not None:
knn_indices, knn_dists = extract_knn_infos(knn_graph,
self.n_neighbors)
elif self.precomputed_knn is not None:
knn_indices, knn_dists = self.precomputed_knn
if self.sparse_fit:
knn_indices, _, _, _ = \
input_to_cuml_array(knn_indices, convert_to_dtype=np.int32)
_knn_dists_ptr = knn_dists.ptr
_knn_indices_ptr = knn_indices.ptr
self.n_neighbors = min(self.n_rows, self.n_neighbors)
self.embedding_ = CumlArray.zeros((self.n_rows,
self.n_components),
order="C", dtype=np.float32,
index=self._raw_data.index)
if self.hash_input:
self._input_hash = joblib.hash(self._raw_data.to_output('numpy'))
cdef uintptr_t _embed_raw_ptr = self.embedding_.ptr
cdef uintptr_t _y_raw_ptr = 0
if y is not None:
y_m, _, _, _ = \
input_to_cuml_array(y, check_dtype=np.float32,
convert_to_dtype=(np.float32
if convert_dtype
else None))
_y_raw_ptr = y_m.ptr
IF GPUBUILD == 1:
cdef handle_t * handle_ = \
<handle_t*> <size_t> self.handle.getHandle()
fss_graph = GraphHolder.new_graph(handle_.get_stream())
cdef UMAPParams* umap_params = \
<UMAPParams*> <size_t> UMAP._build_umap_params(self,
self.sparse_fit)
if self.sparse_fit:
fit_sparse(handle_[0],
<int*><uintptr_t> self._raw_data.indptr.ptr,
<int*><uintptr_t> self._raw_data.indices.ptr,
<float*><uintptr_t> self._raw_data.data.ptr,
<size_t> self._raw_data.nnz,
<float*> _y_raw_ptr,
<int> self.n_rows,
<int> self.n_dims,
<int*> _knn_indices_ptr,
<float*> _knn_dists_ptr,
<UMAPParams*> umap_params,
<float*> _embed_raw_ptr,
<COO*> fss_graph.get())
else:
fit(handle_[0],
<float*><uintptr_t> self._raw_data.ptr,
<float*> _y_raw_ptr,
<int> self.n_rows,
<int> self.n_dims,
<int64_t*> _knn_indices_ptr,
<float*> _knn_dists_ptr,
<UMAPParams*>umap_params,
<float*>_embed_raw_ptr,
<COO*> fss_graph.get())
self.graph_ = fss_graph.get_cupy_coo()
self.handle.sync()
UMAP._destroy_umap_params(<size_t>umap_params)
return self
@generate_docstring(convert_dtype_cast='np.float32',
skip_parameters_heading=True,
return_values={'name': 'X_new',
'type': 'dense',
'description': 'Embedding of the \
data in \
low-dimensional space.',
'shape': '(n_samples, n_components)'})
@cuml.internals.api_base_fit_transform()
@enable_device_interop
def fit_transform(self, X, y=None, convert_dtype=True,
knn_graph=None) -> CumlArray:
"""
Fit X into an embedded space and return that transformed
output.
There is a subtle difference between calling fit_transform(X)
and calling fit().transform(). Calling fit_transform(X) will
train the embeddings on X and return the embeddings. Calling
fit(X).transform(X) will train the embeddings on X and then
run a second optimization.
Parameters
----------
knn_graph : sparse array-like (device or host)
shape=(n_samples, n_samples)
A sparse array containing the k-nearest neighbors of X,
where the columns are the nearest neighbor indices
for each row and the values are their distances.
It's important that `k>=n_neighbors`,
so that UMAP can model the neighbors from this graph,
instead of building its own internally.
Users using the knn_graph parameter provide UMAP
with their own run of the KNN algorithm. This allows the user
to pick a custom distance function (sometimes useful
on certain datasets) whereas UMAP uses euclidean by default.
The custom distance function should match the metric used
to train UMAP embeddings. Storing and reusing a knn_graph
will also provide a speedup to the UMAP algorithm
when performing a grid search.
Acceptable formats: sparse SciPy ndarray, CuPy device ndarray,
CSR/COO preferred other formats will go through conversion to CSR
"""
self.fit(X, y, convert_dtype=convert_dtype, knn_graph=knn_graph)
return self.embedding_
@generate_docstring(convert_dtype_cast='np.float32',
skip_parameters_heading=True,
return_values={'name': 'X_new',
'type': 'dense',
'description': 'Embedding of the \
data in \
low-dimensional space.',
'shape': '(n_samples, n_components)'})
@enable_device_interop
def transform(self, X, convert_dtype=True) -> CumlArray:
"""
Transform X into the existing embedded space and return that
transformed output.
Please refer to the reference UMAP implementation for information
on the differences between fit_transform() and running fit()
transform().
Specifically, the transform() function is stochastic:
https://github.com/lmcinnes/umap/issues/158
"""
if len(X.shape) != 2:
raise ValueError("X should be two dimensional")
if is_sparse(X) and not self.sparse_fit:
logger.warn("Model was trained on dense data but sparse "
"data was provided to transform(). Converting "
"to dense.")
X = X.todense()
elif not is_sparse(X) and self.sparse_fit:
logger.warn("Model was trained on sparse data but dense "
"data was provided to transform(). Converting "
"to sparse.")
X = cupyx.scipy.sparse.csr_matrix(X)
if is_sparse(X):
X_m = SparseCumlArray(X, convert_to_dtype=cupy.float32,
convert_format=False)
index = None
else:
X_m, n_rows, n_cols, _ = \
input_to_cuml_array(X, order='C', check_dtype=np.float32,
convert_to_dtype=(np.float32
if convert_dtype
else None))
index = X_m.index
n_rows = X_m.shape[0]
n_cols = X_m.shape[1]
if n_cols != self._raw_data.shape[1]:
raise ValueError("n_features of X must match n_features of "
"training data")
if self.hash_input:
if joblib.hash(X_m.to_output('numpy')) == self._input_hash:
del X_m
return self.embedding_
embedding = CumlArray.zeros((n_rows, self.n_components),
order="C", dtype=np.float32,
index=index)
cdef uintptr_t _xformed_ptr = embedding.ptr
cdef uintptr_t _embed_ptr = self.embedding_.ptr
IF GPUBUILD == 1:
cdef UMAPParams* umap_params = \
<UMAPParams*> <size_t> UMAP._build_umap_params(self,
self.sparse_fit)
cdef handle_t * handle_ = \
<handle_t*> <size_t> self.handle.getHandle()
if self.sparse_fit:
transform_sparse(handle_[0],
<int*><uintptr_t> X_m.indptr.ptr,
<int*><uintptr_t> X_m.indices.ptr,
<float*><uintptr_t> X_m.data.ptr,
<size_t> X_m.nnz,
<int> X_m.shape[0],
<int> X_m.shape[1],
<int*><uintptr_t> self._raw_data.indptr.ptr,
<int*><uintptr_t> self._raw_data.indices.ptr,
<float*><uintptr_t> self._raw_data.data.ptr,
<size_t> self._raw_data.nnz,
<int> self._raw_data.shape[0],
<float*> _embed_ptr,
<int> self._raw_data.shape[0],
<UMAPParams*> umap_params,
<float*> _xformed_ptr)
else:
transform(handle_[0],
<float*><uintptr_t> X_m.ptr,
<int> n_rows,
<int> n_cols,
<float*><uintptr_t>self._raw_data.ptr,
<int> self._raw_data.shape[0],
<float*> _embed_ptr,
<int> n_rows,
<UMAPParams*> umap_params,
<float*> _xformed_ptr)
self.handle.sync()
UMAP._destroy_umap_params(<size_t>umap_params)
del X_m
return embedding
def get_param_names(self):
return super().get_param_names() + [
"n_neighbors",
"n_components",
"n_epochs",
"learning_rate",
"min_dist",
"spread",
"set_op_mix_ratio",
"local_connectivity",
"repulsion_strength",
"negative_sample_rate",
"transform_queue_size",
"init",
"a",
"b",
"target_n_neighbors",
"target_weight",
"target_metric",
"hash_input",
"random_state",
"callback",
"metric",
"metric_kwds",
"precomputed_knn"
]
def get_attr_names(self):
return ['_raw_data', 'embedding_', '_input_hash', '_small_data']
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/manifold/__init__.py | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.available_devices import is_cuda_available
from cuml.manifold.umap import UMAP
if is_cuda_available():
from cuml.manifold.t_sne import TSNE
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/manifold/t_sne.pyx | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
# distutils: extra_compile_args = -Ofast
# cython: boundscheck = False
# cython: wraparound = False
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
pd = cpu_only_import('pandas')
import warnings
from cuml.internals.safe_imports import gpu_only_import
cupy = gpu_only_import('cupy')
import cuml.internals
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.base import Base
from pylibraft.common.handle cimport handle_t
import cuml.internals.logger as logger
from cuml.internals.array import CumlArray
from cuml.internals.array_sparse import SparseCumlArray
from cuml.common.sparse_utils import is_sparse
from cuml.common.doc_utils import generate_docstring
from cuml.common import input_to_cuml_array
from cuml.internals.mixins import CMajorInputTagMixin
from cuml.common.sparsefuncs import extract_knn_infos
from cuml.metrics.distance_type cimport DistanceType
rmm = gpu_only_import('rmm')
from libcpp cimport bool
from libc.stdint cimport uintptr_t
from libc.stdint cimport int64_t
from libc.stdlib cimport free
from cython.operator cimport dereference as deref
cimport cuml.common.cuda
cdef extern from "cuml/manifold/tsne.h" namespace "ML":
enum TSNE_ALGORITHM:
EXACT = 0,
BARNES_HUT = 1,
FFT = 2
cdef cppclass TSNEParams:
int dim,
int n_neighbors,
float theta,
float epssq,
float perplexity,
int perplexity_max_iter,
float perplexity_tol,
float early_exaggeration,
float late_exaggeration,
int exaggeration_iter,
float min_gain,
float pre_learning_rate,
float post_learning_rate,
int max_iter,
float min_grad_norm,
float pre_momentum,
float post_momentum,
long long random_state,
int verbosity,
bool initialize_embeddings,
bool square_distances,
DistanceType metric,
float p,
TSNE_ALGORITHM algorithm
cdef extern from "cuml/manifold/tsne.h" namespace "ML":
cdef void TSNE_fit(
handle_t &handle,
float *X,
float *Y,
int n,
int p,
int64_t* knn_indices,
float* knn_dists,
TSNEParams ¶ms,
float* kl_div) except +
cdef void TSNE_fit_sparse(
const handle_t &handle,
int *indptr,
int *indices,
float *data,
float *Y,
int nnz,
int n,
int p,
int* knn_indices,
float* knn_dists,
TSNEParams ¶ms,
float* kl_div) except +
class TSNE(Base,
CMajorInputTagMixin):
"""
t-SNE (T-Distributed Stochastic Neighbor Embedding) is an extremely
powerful dimensionality reduction technique that aims to maintain
local distances between data points. It is extremely robust to whatever
dataset you give it, and is used in many areas including cancer research,
music analysis and neural network weight visualizations.
cuML's t-SNE supports three algorithms: the original exact algorithm, the
Barnes-Hut approximation and the fast Fourier transform interpolation
approximation. The latter two are derived from CannyLabs' open-source CUDA
code and produce extremely fast embeddings when n_components = 2. The exact
algorithm is more accurate, but too slow to use on large datasets.
Parameters
----------
n_components : int (default 2)
The output dimensionality size. Currently only 2 is supported.
perplexity : float (default 30.0)
Larger datasets require a larger value. Consider choosing different
perplexity values from 5 to 50 and see the output differences.
early_exaggeration : float (default 12.0)
Controls the space between clusters. Not critical to tune this.
late_exaggeration : float (default 1.0)
Controls the space between clusters. It may be beneficial to increase
this slightly to improve cluster separation. This will be applied
after `exaggeration_iter` iterations (FFT only).
learning_rate : float (default 200.0)
The learning rate usually between (10, 1000). If this is too high,
t-SNE could look like a cloud / ball of points.
n_iter : int (default 1000)
The more epochs, the more stable/accurate the final embedding.
n_iter_without_progress : int (default 300)
Currently unused. When the KL Divergence becomes too small after some
iterations, terminate t-SNE early.
min_grad_norm : float (default 1e-07)
The minimum gradient norm for when t-SNE will terminate early.
Used in the 'exact' and 'fft' algorithms. Consider reducing if
the embeddings are unsatisfactory. It's recommended to use a
smaller value for smaller datasets.
metric : str (default='euclidean').
Distance metric to use. Supported distances are ['l1, 'cityblock',
'manhattan', 'euclidean', 'l2', 'sqeuclidean', 'minkowski',
'chebyshev', 'cosine', 'correlation']
init : str 'random' (default 'random')
Currently supports random initialization.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
random_state : int (default None)
Setting this can make repeated runs look more similar. Note, however,
that this highly parallelized t-SNE implementation is not completely
deterministic between runs, even with the same `random_state`.
method : str 'fft', 'barnes_hut' or 'exact' (default 'fft')
'barnes_hut' and 'fft' are fast approximations. 'exact' is more
accurate but slower.
angle : float (default 0.5)
Valid values are between 0.0 and 1.0, which trade off speed and
accuracy, respectively. Generally, these values are set between 0.2 and
0.8. (Barnes-Hut only.)
learning_rate_method : str 'adaptive', 'none' or None (default 'adaptive')
Either adaptive or None. 'adaptive' tunes the learning rate, early
exaggeration, perplexity and n_neighbors automatically based on
input size.
n_neighbors : int (default 90)
The number of datapoints you want to use in the
attractive forces. Smaller values are better for preserving
local structure, whilst larger values can improve global structure
preservation. Default is 3 * 30 (perplexity)
perplexity_max_iter : int (default 100)
The number of epochs the best gaussian bands are found for.
exaggeration_iter : int (default 250)
To promote the growth of clusters, set this higher.
pre_momentum : float (default 0.5)
During the exaggeration iteration, more forcefully apply gradients.
post_momentum : float (default 0.8)
During the late phases, less forcefully apply gradients.
square_distances : boolean, default=True
Whether TSNE should square the distance values.
Internally, this will be used to compute a kNN graph using the provided
metric and then squaring it when True. If a `knn_graph` is passed
to `fit` or `fit_transform` methods, all the distances will be
squared when True. For example, if a `knn_graph` was obtained using
'sqeuclidean' metric, the distances will still be squared when True.
Note: This argument should likely be set to False for distance metrics
other than 'euclidean' and 'l2'.
precomputed_knn : array / sparse array / tuple, optional (device or host)
Either one of a tuple (indices, distances) of
arrays of shape (n_samples, n_neighbors), a pairwise distances
dense array of shape (n_samples, n_samples) or a KNN graph
sparse array (preferably CSR/COO). This feature allows
the precomputation of the KNN outside of TSNE
and also allows the use of a custom distance function. This function
should match the metric used to train the TSNE embeedings.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
kl_divergence_ : float
Kullback-Leibler divergence after optimization. An experimental
feature at this time.
References
----------
.. [1] `van der Maaten, L.J.P.
t-Distributed Stochastic Neighbor Embedding
<https://lvdmaaten.github.io/tsne/>`_
.. [2] van der Maaten, L.J.P.; Hinton, G.E.
Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
.. [3] George C. Linderman, Manas Rachh, Jeremy G. Hoskins,
Stefan Steinerberger, Yuval Kluger Efficient Algorithms for
t-distributed Stochastic Neighborhood Embedding
.. tip::
Maaten and Linderman showcased how t-SNE can be very sensitive to both
the starting conditions (i.e. random initialization), and how parallel
versions of t-SNE can generate vastly different results between runs.
You can run t-SNE multiple times to settle on the best configuration.
Note that using the same random_state across runs does not guarantee
similar results each time.
.. note::
The CUDA implementation is derived from the excellent CannyLabs open
source implementation here: https://github.com/CannyLab/tsne-cuda/. The
CannyLabs code is licensed according to the conditions in
cuml/cpp/src/tsne/cannylabs_tsne_license.txt. A full description of
their approach is available in their article t-SNE-CUDA:
GPU-Accelerated t-SNE and its Applications to Modern Data
(https://arxiv.org/abs/1807.11824).
"""
X_m = CumlArrayDescriptor()
embedding_ = CumlArrayDescriptor()
def __init__(self, *,
n_components=2,
perplexity=30.0,
early_exaggeration=12.0,
late_exaggeration=1.0,
learning_rate=200.0,
n_iter=1000,
n_iter_without_progress=300,
min_grad_norm=1e-07,
metric='euclidean',
metric_params=None,
init='random',
verbose=False,
random_state=None,
method='fft',
angle=0.5,
learning_rate_method='adaptive',
n_neighbors=90,
perplexity_max_iter=100,
exaggeration_iter=250,
pre_momentum=0.5,
post_momentum=0.8,
square_distances=True,
precomputed_knn=None,
handle=None,
output_type=None):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
if n_components < 0:
raise ValueError("n_components = {} should be more "
"than 0.".format(n_components))
if n_components != 2:
raise ValueError("Currently TSNE supports n_components = 2; "
"but got n_components = {}".format(n_components))
if perplexity < 0:
raise ValueError("perplexity = {} should be more than 0.".format(
perplexity))
if early_exaggeration < 0:
raise ValueError("early_exaggeration = {} should be more "
"than 0.".format(early_exaggeration))
if late_exaggeration < 0:
raise ValueError("late_exaggeration = {} should be more "
"than 0.".format(late_exaggeration))
if learning_rate < 0:
raise ValueError("learning_rate = {} should be more "
"than 0.".format(learning_rate))
if n_iter < 0:
raise ValueError("n_iter = {} should be more than 0.".format(
n_iter))
if n_iter <= 100:
warnings.warn("n_iter = {} might cause TSNE to output wrong "
"results. Set it higher.".format(n_iter))
if init.lower() != 'random':
# TODO https://github.com/rapidsai/cuml/issues/3458
warnings.warn("TSNE does not support {} but only random "
"initialization.".format(init))
init = 'random'
if angle < 0 or angle > 1:
raise ValueError("angle = {} should be ≥ 0 and ≤ 1".format(angle))
if n_neighbors < 0:
raise ValueError("n_neighbors = {} should be more "
"than 0.".format(n_neighbors))
if n_neighbors > 1023:
warnings.warn("n_neighbors = {} should be less than 1024")
n_neighbors = 1023
if perplexity_max_iter < 0:
raise ValueError("perplexity_max_iter = {} should be more "
"than 0.".format(perplexity_max_iter))
if exaggeration_iter < 0:
raise ValueError("exaggeration_iter = {} should be more "
"than 0.".format(exaggeration_iter))
if exaggeration_iter > n_iter:
raise ValueError("exaggeration_iter = {} should be more less "
"than n_iter = {}.".format(exaggeration_iter,
n_iter))
if pre_momentum < 0 or pre_momentum > 1:
raise ValueError("pre_momentum = {} should be more than 0 "
"and less than 1.".format(pre_momentum))
if post_momentum < 0 or post_momentum > 1:
raise ValueError("post_momentum = {} should be more than 0 "
"and less than 1.".format(post_momentum))
if pre_momentum > post_momentum:
raise ValueError("post_momentum = {} should be more than "
"pre_momentum = {}".format(post_momentum,
pre_momentum))
if method == "fft":
warnings.warn("Starting from version 22.04, the default method "
"of TSNE is 'fft'.")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.late_exaggeration = late_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.metric_params = metric_params
self.init = init
self.random_state = random_state
self.method = method
self.angle = angle
self.n_neighbors = n_neighbors
self.perplexity_max_iter = perplexity_max_iter
self.exaggeration_iter = exaggeration_iter
self.pre_momentum = pre_momentum
self.post_momentum = post_momentum
if learning_rate_method is None:
self.learning_rate_method = 'none'
else:
# To support `sklearn.base.clone()`, we must minimize altering
# argument references unless absolutely necessary. Check to see if
# lowering the string results in the same value, and if so, keep
# the same reference that was passed in. This may seem redundant,
# but it allows `clone()` to function without raising an error
if (learning_rate_method.lower() != learning_rate_method):
learning_rate_method = learning_rate_method.lower()
self.learning_rate_method = learning_rate_method
self.epssq = 0.0025
self.perplexity_tol = 1e-5
self.min_gain = 0.01
self.pre_learning_rate = learning_rate
self.post_learning_rate = learning_rate * 2
self.square_distances = square_distances
self.X_m = None
self.embedding_ = None
self.sparse_fit = False
self.precomputed_knn = extract_knn_infos(precomputed_knn,
n_neighbors)
@generate_docstring(skip_parameters_heading=True,
X='dense_sparse',
convert_dtype_cast='np.float32')
def fit(self, X, convert_dtype=True, knn_graph=None) -> "TSNE":
"""
Fit X into an embedded space.
Parameters
----------
knn_graph : array / sparse array / tuple, optional (device or host)
Either one of a tuple (indices, distances) of
arrays of shape (n_samples, n_neighbors), a pairwise distances
dense array of shape (n_samples, n_samples) or a KNN graph
sparse array (preferably CSR/COO). This feature allows
the precomputation of the KNN outside of TSNE
and also allows the use of a custom distance function. This function
should match the metric used to train the TSNE embeedings.
Takes precedence over the precomputed_knn parameter.
"""
cdef int n, p
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if handle_ == NULL:
raise ValueError("cuML Handle is Null! Terminating TSNE.")
if len(X.shape) != 2:
raise ValueError("data should be two dimensional")
if is_sparse(X):
self.X_m = SparseCumlArray(X, convert_to_dtype=cupy.float32,
convert_format=False)
n, p = self.X_m.shape
self.sparse_fit = True
# Handle dense inputs
else:
self.X_m, n, p, _ = \
input_to_cuml_array(X, order='C', check_dtype=np.float32,
convert_to_dtype=(np.float32
if convert_dtype
else None))
if n <= 1:
raise ValueError("There needs to be more than 1 sample to build "
"nearest the neighbors graph")
self.n_neighbors = min(n, self.n_neighbors)
if self.perplexity > n:
warnings.warn("Perplexity = {} should be less than the "
"# of datapoints = {}.".format(self.perplexity, n))
self.perplexity = n
cdef uintptr_t knn_dists_ptr = 0
cdef uintptr_t knn_indices_ptr = 0
if knn_graph is not None or self.precomputed_knn is not None:
if knn_graph is not None:
knn_indices, knn_dists = extract_knn_infos(knn_graph,
self.n_neighbors)
elif self.precomputed_knn is not None:
knn_indices, knn_dists = self.precomputed_knn
if self.sparse_fit:
knn_indices, _, _, _ = \
input_to_cuml_array(knn_indices, convert_to_dtype=np.int32)
knn_dists_ptr = knn_dists.ptr
knn_indices_ptr = knn_indices.ptr
# Prepare output embeddings
self.embedding_ = CumlArray.zeros(
(n, self.n_components),
order="F",
dtype=np.float32,
index=self.X_m.index)
cdef uintptr_t embed_ptr = self.embedding_.ptr
# Find best params if learning rate method is adaptive
if self.learning_rate_method=='adaptive' and (self.method=="barnes_hut"
or self.method=='fft'):
logger.debug("Learning rate is adaptive. In TSNE paper, "
"it has been shown that as n->inf, "
"Barnes Hut works well if n_neighbors->30, "
"learning_rate->20000, early_exaggeration->24.")
logger.debug("cuML uses an adpative method."
"n_neighbors decreases to 30 as n->inf. "
"Likewise for the other params.")
if n <= 2000:
self.n_neighbors = min(max(self.n_neighbors, 90), n)
else:
# A linear trend from (n=2000, neigh=100) to (n=60000,neigh=30)
self.n_neighbors = max(int(102 - 0.0012 * n), 30)
self.pre_learning_rate = max(n / 3.0, 1)
self.post_learning_rate = self.pre_learning_rate
self.early_exaggeration = 24.0 if n > 10000 else 12.0
if logger.should_log_for(logger.level_debug):
logger.debug("New n_neighbors = {}, learning_rate = {}, "
"exaggeration = {}"
.format(self.n_neighbors, self.pre_learning_rate,
self.early_exaggeration))
if self.method == 'barnes_hut':
algo = TSNE_ALGORITHM.BARNES_HUT
elif self.method == 'fft':
algo = TSNE_ALGORITHM.FFT
elif self.method == 'exact':
algo = TSNE_ALGORITHM.EXACT
else:
raise ValueError("Allowed methods are 'exact', 'barnes_hut' and "
"'fft'.")
cdef TSNEParams* params = <TSNEParams*> <size_t> \
self._build_tsne_params(algo)
cdef float kl_divergence = 0
if self.sparse_fit:
TSNE_fit_sparse(handle_[0],
<int*><uintptr_t>
self.X_m.indptr.ptr,
<int*><uintptr_t>
self.X_m.indices.ptr,
<float*><uintptr_t>
self.X_m.data.ptr,
<float*> embed_ptr,
<int> self.X_m.nnz,
<int> n,
<int> p,
<int*> knn_indices_ptr,
<float*> knn_dists_ptr,
<TSNEParams&> deref(params),
&kl_divergence)
else:
TSNE_fit(handle_[0],
<float*><uintptr_t> self.X_m.ptr,
<float*> embed_ptr,
<int> n,
<int> p,
<int64_t*> knn_indices_ptr,
<float*> knn_dists_ptr,
<TSNEParams&> deref(params),
&kl_divergence)
self.handle.sync()
free(params)
self._kl_divergence_ = kl_divergence
logger.debug("[t-SNE] KL divergence: {}".format(kl_divergence))
return self
@generate_docstring(convert_dtype_cast='np.float32',
skip_parameters_heading=True,
return_values={'name': 'X_new',
'type': 'dense',
'description': 'Embedding of the \
data in \
low-dimensional space.',
'shape': '(n_samples, n_components)'})
@cuml.internals.api_base_fit_transform()
def fit_transform(self, X, convert_dtype=True,
knn_graph=None) -> CumlArray:
"""
Fit X into an embedded space and return that transformed output.
"""
return self.fit(X, convert_dtype=convert_dtype,
knn_graph=knn_graph)._transform(X)
def _transform(self, X) -> CumlArray:
"""
Internal transform function to allow base wrappers default
functionality to work
"""
return self.embedding_
def _build_tsne_params(self, algo):
cdef long long seed = -1
if self.random_state is not None:
seed = self.random_state
cdef TSNEParams* params = new TSNEParams()
params.dim = <int> self.n_components
params.n_neighbors = <int> self.n_neighbors
params.theta = <float> self.angle
params.epssq = <float> self.epssq
params.perplexity = <float> self.perplexity
params.perplexity_max_iter = <int> self.perplexity_max_iter
params.perplexity_tol = <float> self.perplexity_tol
params.early_exaggeration = <float> self.early_exaggeration
params.late_exaggeration = <float> self.late_exaggeration
params.exaggeration_iter = <int> self.exaggeration_iter
params.min_gain = <float> self.min_gain
params.pre_learning_rate = <float> self.pre_learning_rate
params.post_learning_rate = <float> self.post_learning_rate
params.max_iter = <int> self.n_iter
params.min_grad_norm = <float> self.min_grad_norm
params.pre_momentum = <float> self.pre_momentum
params.post_momentum = <float> self.post_momentum
params.random_state = <long long> seed
params.verbosity = <int> self.verbose
params.initialize_embeddings = <bool> True
params.square_distances = <bool> self.square_distances
params.algorithm = algo
# metric
metric_parsing = {
"l2": DistanceType.L2SqrtExpanded,
"euclidean": DistanceType.L2SqrtExpanded,
"sqeuclidean": DistanceType.L2Expanded,
"cityblock": DistanceType.L1,
"l1": DistanceType.L1,
"manhattan": DistanceType.L1,
"minkowski": DistanceType.LpUnexpanded,
"chebyshev": DistanceType.Linf,
"cosine": DistanceType.CosineExpanded,
"correlation": DistanceType.CorrelationExpanded
}
if self.metric.lower() in metric_parsing:
params.metric = metric_parsing[self.metric.lower()]
else:
raise ValueError("Invalid value for metric: {}"
.format(self.metric))
if self.metric_params is None:
params.p = <float> 2.0
else:
params.p = <float>self.metric_params.get('p')
return <size_t> params
@property
def kl_divergence_(self):
if self.method == 'barnes_hut':
warnings.warn("The calculation of the Kullback-Leibler "
"divergence is still an experimental feature "
"while using the Barnes Hut algorithm.")
return self._kl_divergence_
@kl_divergence_.setter
def kl_divergence_(self, value):
self._kl_divergence_ = value
def __del__(self):
if hasattr(self, "embedding_"):
del self.embedding_
def __getstate__(self):
state = self.__dict__.copy()
if "handle" in state:
del state["handle"]
return state
def __setstate__(self, state):
super(TSNE, self).__init__(handle=None,
verbose=state['verbose'])
self.__dict__.update(state)
return state
def get_param_names(self):
return super().get_param_names() + [
"n_components",
"perplexity",
"early_exaggeration",
"late_exaggeration",
"learning_rate",
"n_iter",
"n_iter_without_progress",
"min_grad_norm",
"metric",
"metric_params",
"init",
"random_state",
"method",
"angle",
"learning_rate_method",
"n_neighbors",
"perplexity_max_iter",
"exaggeration_iter",
"pre_momentum",
"post_momentum",
"square_distances",
"precomputed_knn"
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/manifold/simpl_set.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.manifold.umap_utils cimport *
from cuml.manifold.umap_utils import GraphHolder, find_ab_params, \
metric_parsing
from cuml.internals.input_utils import input_to_cuml_array
from cuml.internals.array import CumlArray
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
from libc.stdint cimport uintptr_t
from libc.stdlib cimport free
from libcpp.memory cimport unique_ptr
cdef extern from "cuml/manifold/umap.hpp" namespace "ML::UMAP":
unique_ptr[COO] get_graph(handle_t &handle,
float* X,
float* y,
int n,
int d,
int64_t* knn_indices,
float* knn_dists,
UMAPParams* params)
void refine(handle_t &handle,
float* X,
int n,
int d,
COO* cgraph_coo,
UMAPParams* params,
float* embeddings)
def fuzzy_simplicial_set(X,
n_neighbors,
random_state=None,
metric="euclidean",
metric_kwds=None,
knn_indices=None,
knn_dists=None,
set_op_mix_ratio=1.0,
local_connectivity=1.0,
verbose=False):
"""Given a set of data X, a neighborhood size, and a measure of distance
compute the fuzzy simplicial set (here represented as a fuzzy graph in
the form of a sparse matrix) associated to the data. This is done by
locally approximating geodesic distance at each point, creating a fuzzy
simplicial set for each such point, and then combining all the local
fuzzy simplicial sets into a global one via a fuzzy union.
Parameters
----------
X: array of shape (n_samples, n_features)
The data to be modelled as a fuzzy simplicial set.
n_neighbors: int
The number of neighbors to use to approximate geodesic distance.
Larger numbers induce more global estimates of the manifold that can
miss finer detail, while smaller values will focus on fine manifold
structure to the detriment of the larger picture.
random_state: numpy RandomState or equivalent
A state capable being used as a numpy random state.
metric: string (default='euclidean').
Distance metric to use. Supported distances are ['l1, 'cityblock',
'taxicab', 'manhattan', 'euclidean', 'l2', 'sqeuclidean', 'canberra',
'minkowski', 'chebyshev', 'linf', 'cosine', 'correlation', 'hellinger',
'hamming', 'jaccard']
Metrics that take arguments (such as minkowski) can have arguments
passed via the metric_kwds dictionary.
Note: The 'jaccard' distance metric is only supported for sparse
inputs.
metric_kwds: dict (optional, default=None)
Metric argument
knn_indices: array of shape (n_samples, n_neighbors) (optional)
If the k-nearest neighbors of each point has already been calculated
you can pass them in here to save computation time. This should be
an array with the indices of the k-nearest neighbors as a row for
each data point.
knn_dists: array of shape (n_samples, n_neighbors) (optional)
If the k-nearest neighbors of each point has already been calculated
you can pass them in here to save computation time. This should be
an array with the distances of the k-nearest neighbors as a row for
each data point.
set_op_mix_ratio: float (optional, default 1.0)
Interpolate between (fuzzy) union and intersection as the set operation
used to combine local fuzzy simplicial sets to obtain a global fuzzy
simplicial sets. Both fuzzy set operations use the product t-norm.
The value of this parameter should be between 0.0 and 1.0; a value of
1.0 will use a pure fuzzy union, while 0.0 will use a pure fuzzy
intersection.
local_connectivity: int (optional, default 1)
The local connectivity required -- i.e. the number of nearest
neighbors that should be assumed to be connected at a local level.
The higher this value the more connected the manifold becomes
locally. In practice this should be not more than the local intrinsic
dimension of the manifold.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
fuzzy_simplicial_set: coo_matrix
A fuzzy simplicial set represented as a sparse matrix. The (i,
j) entry of the matrix represents the membership strength of the
1-simplex between the ith and jth sample points.
"""
if metric_kwds is None:
metric_kwds = {}
deterministic = random_state is not None
if not isinstance(random_state, int):
if isinstance(random_state, np.random.RandomState):
rs = random_state
else:
rs = np.random.RandomState(random_state)
random_state = rs.randint(low=np.iinfo(np.int32).min,
high=np.iinfo(np.int32).max,
dtype=np.int32)
cdef UMAPParams* umap_params = new UMAPParams()
umap_params.n_neighbors = <int> n_neighbors
umap_params.random_state = <uint64_t> random_state
umap_params.deterministic = <bool> deterministic
umap_params.set_op_mix_ratio = <float> set_op_mix_ratio
umap_params.local_connectivity = <float> local_connectivity
try:
umap_params.metric = metric_parsing[metric.lower()]
except KeyError:
raise ValueError(f"Invalid value for metric: {metric}")
if metric_kwds is None:
umap_params.p = <float> 2.0
else:
umap_params.p = <float> metric_kwds.get("p", 2.0)
umap_params.verbosity = <int> verbose
X_m, _, _, _ = \
input_to_cuml_array(X,
order='C',
check_dtype=np.float32,
convert_to_dtype=np.float32)
if knn_indices is not None and knn_dists is not None:
knn_indices_m, _, _, _ = \
input_to_cuml_array(knn_indices,
order='C',
check_dtype=np.int64,
convert_to_dtype=np.int64)
knn_dists_m, _, _, _ = \
input_to_cuml_array(knn_dists,
order='C',
check_dtype=np.float32,
convert_to_dtype=np.float32)
X_ptr = 0
knn_indices_ptr = knn_indices_m.ptr
knn_dists_ptr = knn_dists_m.ptr
else:
X_m, _, _, _ = \
input_to_cuml_array(X,
order='C',
check_dtype=np.float32,
convert_to_dtype=np.float32)
X_ptr = X_m.ptr
knn_indices_ptr = 0
knn_dists_ptr = 0
handle = Handle()
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
cdef unique_ptr[COO] fss_graph_ptr = get_graph(
handle_[0],
<float*><uintptr_t> X_ptr,
<float*><uintptr_t> NULL,
<int> X.shape[0],
<int> X.shape[1],
<int64_t*><uintptr_t> knn_indices_ptr,
<float*><uintptr_t> knn_dists_ptr,
<UMAPParams*> umap_params)
fss_graph = GraphHolder.from_ptr(fss_graph_ptr)
free(umap_params)
return fss_graph.get_cupy_coo()
def simplicial_set_embedding(
data,
graph,
n_components=2,
initial_alpha=1.0,
a=None,
b=None,
repulsion_strength=1.0,
negative_sample_rate=5,
n_epochs=None,
init="spectral",
random_state=None,
metric="euclidean",
metric_kwds=None,
output_metric="euclidean",
output_metric_kwds=None,
verbose=False,
):
"""Perform a fuzzy simplicial set embedding, using a specified
initialisation method and then minimizing the fuzzy set cross entropy
between the 1-skeletons of the high and low dimensional fuzzy simplicial
sets.
Parameters
----------
data: array of shape (n_samples, n_features)
The source data to be embedded by UMAP.
graph: sparse matrix
The 1-skeleton of the high dimensional fuzzy simplicial set as
represented by a graph for which we require a sparse matrix for the
(weighted) adjacency matrix.
n_components: int
The dimensionality of the euclidean space into which to embed the data.
initial_alpha: float
Initial learning rate for the SGD.
a: float
Parameter of differentiable approximation of right adjoint functor
b: float
Parameter of differentiable approximation of right adjoint functor
repulsion_strength: float
Weight to apply to negative samples.
negative_sample_rate: int (optional, default 5)
The number of negative samples to select per positive sample
in the optimization process. Increasing this value will result
in greater repulsive force being applied, greater optimization
cost, but slightly more accuracy.
n_epochs: int (optional, default 0)
The number of training epochs to be used in optimizing the
low dimensional embedding. Larger values result in more accurate
embeddings. If 0 is specified a value will be selected based on
the size of the input dataset (200 for large datasets, 500 for small).
init: string
How to initialize the low dimensional embedding. Options are:
* 'spectral': use a spectral embedding of the fuzzy 1-skeleton
* 'random': assign initial embedding positions at random.
* A numpy array of initial embedding positions.
random_state: numpy RandomState or equivalent
A state capable being used as a numpy random state.
metric: string (default='euclidean').
Distance metric to use. Supported distances are ['l1, 'cityblock',
'taxicab', 'manhattan', 'euclidean', 'l2', 'sqeuclidean', 'canberra',
'minkowski', 'chebyshev', 'linf', 'cosine', 'correlation', 'hellinger',
'hamming', 'jaccard']
Metrics that take arguments (such as minkowski) can have arguments
passed via the metric_kwds dictionary.
Note: The 'jaccard' distance metric is only supported for sparse
inputs.
metric_kwds: dict (optional, default=None)
Metric argument
output_metric: function
Function returning the distance between two points in embedding space
and the gradient of the distance wrt the first argument.
output_metric_kwds: dict
Key word arguments to be passed to the output_metric function.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
embedding: array of shape (n_samples, n_components)
The optimized of ``graph`` into an ``n_components`` dimensional
euclidean space.
"""
if metric_kwds is None:
metric_kwds = {}
if output_metric_kwds is None:
output_metric_kwds = {}
if init not in ['spectral', 'random']:
raise Exception("Initialization strategy not supported: %d" % init)
if output_metric not in ['euclidean', 'categorical']:
raise Exception("Invalid output metric: {}" % output_metric)
n_epochs = n_epochs if n_epochs else 0
if a is None or b is None:
spread = 1.0
min_dist = 0.1
a, b = find_ab_params(spread, min_dist)
deterministic = random_state is not None
if not isinstance(random_state, int):
if isinstance(random_state, np.random.RandomState):
rs = random_state
else:
rs = np.random.RandomState(random_state)
random_state = rs.randint(low=np.iinfo(np.int32).min,
high=np.iinfo(np.int32).max,
dtype=np.int32)
cdef UMAPParams* umap_params = new UMAPParams()
umap_params.n_components = <int> n_components
umap_params.initial_alpha = <int> initial_alpha
umap_params.a = <int> a
umap_params.b = <int> b
umap_params.repulsion_strength = <float> repulsion_strength
umap_params.negative_sample_rate = <int> negative_sample_rate
umap_params.n_epochs = <int> n_epochs
if init == 'spectral':
umap_params.init = <int> 1
else: # init == 'random'
umap_params.init = <int> 0
umap_params.random_state = <int> random_state
umap_params.deterministic = <bool> deterministic
try:
umap_params.metric = metric_parsing[metric.lower()]
except KeyError:
raise ValueError(f"Invalid value for metric: {metric}")
if metric_kwds is None:
umap_params.p = <float> 2.0
else:
umap_params.p = <float> metric_kwds.get("p", 2.0)
if output_metric == 'euclidean':
umap_params.target_metric = MetricType.EUCLIDEAN
else: # output_metric == 'categorical'
umap_params.target_metric = MetricType.CATEGORICAL
umap_params.target_weight = <float> output_metric_kwds['p'] \
if 'p' in output_metric_kwds else 0
umap_params.verbosity = <int> verbose
X_m, _, _, _ = \
input_to_cuml_array(data, order='C', check_dtype=np.float32)
graph = graph.tocoo()
graph.sum_duplicates()
if not isinstance(graph, cp.sparse.coo_matrix):
graph = cp.sparse.coo_matrix(graph)
handle = Handle()
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
cdef GraphHolder fss_graph = GraphHolder.from_coo_array(GraphHolder(),
handle,
graph)
embedding = CumlArray.zeros((X_m.shape[0], n_components),
order="C", dtype=np.float32,
index=X_m.index)
refine(handle_[0],
<float*><uintptr_t> X_m.ptr,
<int> X_m.shape[0],
<int> X_m.shape[1],
<COO*> fss_graph.get(),
<UMAPParams*> umap_params,
<float*><uintptr_t> embedding.ptr)
free(umap_params)
return embedding
| 0 |
rapidsai_public_repos/cuml/python | rapidsai_public_repos/cuml/python/cmake/ConfigureCythonAlgorithms.cmake | # =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
function(add_module_gpu_default FILENAME)
set (extra_args ${ARGN})
list(LENGTH extra_args extra_count)
if (${extra_count} GREATER 0 OR
${CUML_UNIVERSAL})
list(APPEND cython_sources
${FILENAME})
set (cython_sources ${cython_sources} PARENT_SCOPE)
endif()
endfunction()
| 0 |
rapidsai_public_repos/cuml/conda | rapidsai_public_repos/cuml/conda/environments/cpp_all_cuda-118_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- c-compiler
- cmake>=3.26.4
- cuda-version=11.8
- cudatoolkit
- cxx-compiler
- gcc_linux-64=11.*
- gmock>=1.13.0
- gtest>=1.13.0
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcufft-dev=10.9.0.58
- libcufft=10.9.0.58
- libcumlprims==23.12.*
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- libraft-headers==23.12.*
- libraft==23.12.*
- librmm==23.12.*
- ninja
- nvcc_linux-64=11.8
- sysroot_linux-64==2.17
name: cpp_all_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/cuml/conda | rapidsai_public_repos/cuml/conda/environments/cpp_all_cuda-120_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- c-compiler
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-profiler-api
- cuda-version=12.0
- cxx-compiler
- gcc_linux-64=11.*
- gmock>=1.13.0
- gtest>=1.13.0
- libcublas-dev
- libcufft-dev
- libcumlprims==23.12.*
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
- libraft-headers==23.12.*
- libraft==23.12.*
- librmm==23.12.*
- ninja
- sysroot_linux-64==2.17
name: cpp_all_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/cuml/conda | rapidsai_public_repos/cuml/conda/environments/all_cuda-120_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- c-compiler
- cmake>=3.26.4
- cuda-cudart-dev
- cuda-nvcc
- cuda-profiler-api
- cuda-python>=12.0,<13.0a0
- cuda-version=12.0
- cudf==23.12.*
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- dask-cuda==23.12.*
- dask-cudf==23.12.*
- dask-ml
- doxygen=1.9.1
- gcc_linux-64=11.*
- gmock>=1.13.0
- graphviz
- gtest>=1.13.0
- hdbscan<=0.8.30
- hypothesis>=6.0,<7
- ipykernel
- ipython
- joblib>=0.11
- libcublas-dev
- libcufft-dev
- libcumlprims==23.12.*
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
- libraft-headers==23.12.*
- libraft==23.12.*
- librmm==23.12.*
- nbsphinx
- ninja
- nltk
- numba>=0.57
- numpydoc
- pip
- pydata-sphinx-theme!=0.14.2
- pylibraft==23.12.*
- pynndescent==0.5.8
- pytest
- pytest-benchmark
- pytest-cases
- pytest-cov
- pytest-xdist
- python>=3.9,<3.11
- raft-dask==23.12.*
- rapids-dask-dependency==23.12.*
- recommonmark
- rmm==23.12.*
- scikit-build>=0.13.1
- scikit-learn==1.2
- scipy>=1.8.0
- seaborn
- sphinx-copybutton
- sphinx-markdown-tables
- sphinx<6
- statsmodels
- sysroot_linux-64==2.17
- treelite==3.9.1
- umap-learn==0.5.3
- pip:
- dask-glm==0.3.0
name: all_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/cuml/conda | rapidsai_public_repos/cuml/conda/environments/clang_tidy_cuda-118_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- c-compiler
- clang-tools==15.0.7
- clang==15.0.7
- cmake>=3.26.4
- cuda-version=11.8
- cudatoolkit
- cxx-compiler
- gcc_linux-64=11.*
- gmock>=1.13.0
- gtest>=1.13.0
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcufft-dev=10.9.0.58
- libcufft=10.9.0.58
- libcumlprims==23.12.*
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- libraft-headers==23.12.*
- libraft==23.12.*
- librmm==23.12.*
- ninja
- nvcc_linux-64=11.8
- sysroot_linux-64==2.17
- tomli
name: clang_tidy_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/cuml/conda | rapidsai_public_repos/cuml/conda/environments/all_cuda-118_arch-x86_64.yaml | # This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- conda-forge
- nvidia
dependencies:
- c-compiler
- cmake>=3.26.4
- cuda-python>=11.7.1,<12.0a0
- cuda-version=11.8
- cudatoolkit
- cudf==23.12.*
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.0
- dask-cuda==23.12.*
- dask-cudf==23.12.*
- dask-ml
- doxygen=1.9.1
- gcc_linux-64=11.*
- gmock>=1.13.0
- graphviz
- gtest>=1.13.0
- hdbscan<=0.8.30
- hypothesis>=6.0,<7
- ipykernel
- ipython
- joblib>=0.11
- libcublas-dev=11.11.3.6
- libcublas=11.11.3.6
- libcufft-dev=10.9.0.58
- libcufft=10.9.0.58
- libcumlprims==23.12.*
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libcusolver-dev=11.4.1.48
- libcusolver=11.4.1.48
- libcusparse-dev=11.7.5.86
- libcusparse=11.7.5.86
- libraft-headers==23.12.*
- libraft==23.12.*
- librmm==23.12.*
- nbsphinx
- ninja
- nltk
- numba>=0.57
- numpydoc
- nvcc_linux-64=11.8
- pip
- pydata-sphinx-theme!=0.14.2
- pylibraft==23.12.*
- pynndescent==0.5.8
- pytest
- pytest-benchmark
- pytest-cases
- pytest-cov
- pytest-xdist
- python>=3.9,<3.11
- raft-dask==23.12.*
- rapids-dask-dependency==23.12.*
- recommonmark
- rmm==23.12.*
- scikit-build>=0.13.1
- scikit-learn==1.2
- scipy>=1.8.0
- seaborn
- sphinx-copybutton
- sphinx-markdown-tables
- sphinx<6
- statsmodels
- sysroot_linux-64==2.17
- treelite==3.9.1
- umap-learn==0.5.3
- pip:
- dask-glm==0.3.0
name: all_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/cuml/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
cmake_version:
- ">=3.26.4"
sysroot_version:
- "=2.17"
treelite_version:
- "=3.9.1"
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/cuml/build.sh | #!/usr/bin/env bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
# This assumes the script is executed from the root of the repo directory
./build.sh cuml -v
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/cuml/meta.yaml | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set py_version = environ['CONDA_PY'] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: cuml
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=cuml-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=cuml-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cuda-python ==11.7.1
- cudatoolkit
{% else %}
- cuda-python ==12.0.0
{% endif %}
- cudf ={{ minor_version }}
- cython >=3.0.0
- libcuml ={{ version }}
- libcumlprims ={{ minor_version }}
- pylibraft ={{ minor_version }}
- python x.x
- raft-dask ={{ minor_version }}
- scikit-build >=0.13.1
- setuptools
- treelite {{ treelite_version }}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- cudf ={{ minor_version }}
- cupy >=12.0.0
- dask-cudf ={{ minor_version }}
- joblib >=0.11
- libcuml ={{ version }}
- libcumlprims ={{ minor_version }}
- pylibraft ={{ minor_version }}
- python x.x
- raft-dask ={{ minor_version }}
- rapids-dask-dependency ={{ minor_version }}
- treelite {{ treelite_version }}
tests:
requirements:
- cuda-version ={{ cuda_version }}
imports:
- cuml
about:
home: https://rapids.ai/
license: Apache-2.0
# license_file: LICENSE
summary: cuML library
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/libcuml/install_libcuml_tests.sh | #!/bin/bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
cmake --install cpp/build --component testing
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/libcuml/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "=2.17"
cmake_version:
- ">=3.26.4"
treelite_version:
- "=3.9.1"
gtest_version:
- ">=1.13.0"
# The CTK libraries below are missing from the conda-forge::cudatoolkit package
# for CUDA 11. The "*_host_*" version specifiers correspond to `11.8` packages
# and the "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_libcublas_host_version:
- "=11.11.3.6"
cuda11_libcublas_run_version:
- ">=11.5.2.43,<=11.11.3.6"
cuda11_libcusolver_host_version:
- "=11.4.1.48"
cuda11_libcusolver_run_version:
- ">=11.2.0.43,<=11.4.1.48"
cuda11_libcusparse_host_version:
- "=11.7.5.86"
cuda11_libcusparse_run_version:
- ">=11.6.0.43,<=11.7.5.86"
cuda11_libcurand_host_version:
- "=10.3.0.86"
cuda11_libcurand_run_version:
- ">=10.2.5.43,<=10.3.0.86"
cuda11_libcufft_host_version:
- "=10.9.0.58"
cuda11_libcufft_run_version:
- ">=10.5.0.43,<=10.9.0.58"
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/libcuml/install_libcuml.sh | #!/bin/bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
cmake --install cpp/build
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/libcuml/build.sh | #!/bin/bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
./build.sh -n libcuml prims -v --allgpuarch
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/libcuml/meta.yaml | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: libcuml-split
source:
path: ../../..
build:
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=libcuml-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=libcuml-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
- libcublas {{ cuda11_libcublas_host_version }}
- libcublas-dev {{ cuda11_libcublas_host_version }}
- libcufft {{ cuda11_libcufft_host_version }}
- libcufft-dev {{ cuda11_libcufft_host_version }}
- libcurand {{ cuda11_libcurand_host_version }}
- libcurand-dev {{ cuda11_libcurand_host_version }}
- libcusolver {{ cuda11_libcusolver_host_version }}
- libcusolver-dev {{ cuda11_libcusolver_host_version }}
- libcusparse {{ cuda11_libcusparse_host_version }}
- libcusparse-dev {{ cuda11_libcusparse_host_version }}
{% else %}
- cuda-cudart-dev
- libcublas-dev
- libcufft-dev
- libcurand-dev
- libcusolver-dev
- libcusparse-dev
{% endif %}
- gmock {{ gtest_version }}
- gtest {{ gtest_version }}
- libcumlprims ={{ minor_version }}
- libraft ={{ minor_version }}
- libraft-headers ={{ minor_version }}
- librmm ={{ minor_version }}
- treelite {{ treelite_version }}
outputs:
- name: libcuml
version: {{ version }}
script: install_libcuml.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
host:
- cuda-version ={{ cuda_version }}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
- libcublas {{ cuda11_libcublas_run_version }}
- libcufft {{ cuda11_libcufft_run_version }}
- libcurand {{ cuda11_libcurand_run_version }}
- libcusolver {{ cuda11_libcusolver_run_version }}
- libcusparse {{ cuda11_libcusparse_run_version }}
{% else %}
- libcublas
- libcufft
- libcurand
- libcusolver
- libcusparse
{% endif %}
- libcumlprims ={{ minor_version }}
- libraft ={{ minor_version }}
- librmm ={{ minor_version }}
- treelite {{ treelite_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libcuml library
- name: libcuml-tests
version: {{ version }}
script: install_libcuml_tests.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_subpackage('libcuml', exact=True) }}
- gtest {{ gtest_version }}
- gmock {{ gtest_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: libcuml test & benchmark executables
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/cuml-cpu/conda_build_config.yaml | c_compiler_version:
- 11
cxx_compiler_version:
- 11
cmake_version:
- ">=3.26.4"
sysroot_version:
- "=2.17"
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/cuml-cpu/build.sh | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION.
# This assumes the script is executed from the root of the repo directory
./build.sh cuml-cpu -v
| 0 |
rapidsai_public_repos/cuml/conda/recipes | rapidsai_public_repos/cuml/conda/recipes/cuml-cpu/meta.yaml | # Copyright (c) 2023, NVIDIA CORPORATION.
# Usage:
# conda build . -c conda-forge -c numba -c rapidsai -c pytorch
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %}
{% set py_version = environ['CONDA_PY'] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: cuml-cpu
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
script_env:
- VERSION_SUFFIX
requirements:
build:
- cmake {{ cmake_version }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
- sysroot_{{ target_platform }} {{ sysroot_version }}
- ninja
host:
- python x.x
- setuptools
- scikit-build>=0.13.1
- cython>=3.0.0
run:
- python x.x
- numpy
- pandas
- scikit-learn=1.2
- hdbscan<=0.8.30
- umap-learn=0.5.3
- nvtx
tests: # [linux64]
imports: # [linux64]
- cuml # [linux64]
about:
home: http://rapids.ai/
license: Apache-2.0
# license_file: LICENSE
summary: cuML-CPU library
| 0 |
rapidsai_public_repos/cuml | rapidsai_public_repos/cuml/cpp/CMakeLists.txt | #=============================================================================
# Copyright (c) 2018-2023 NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
include(../fetch_rapids.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-cuda)
include(rapids-export)
include(rapids-find)
rapids_cuda_init_architectures(CUML)
project(CUML VERSION 23.12.00 LANGUAGES CXX CUDA)
# Write the version header
rapids_cmake_write_version_file(include/cuml/version_config.hpp)
##############################################################################
# - build type ---------------------------------------------------------------
# Set a default build type if none was specified
rapids_cmake_build_type(Release)
# this is needed for clang-tidy runs
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
##############################################################################
# - User Options ------------------------------------------------------------
option(CUML_ENABLE_GPU "Enable building GPU-accelerated algorithms" ON)
option(BUILD_SHARED_LIBS "Build cuML shared libraries" ON)
option(BUILD_CUML_C_LIBRARY "Build libcuml_c shared library. Contains the cuML C API" ON)
option(BUILD_CUML_CPP_LIBRARY "Build libcuml shared library" ON)
option(BUILD_CUML_TESTS "Build cuML algorithm tests" ON)
option(BUILD_CUML_MG_TESTS "Build cuML multigpu algorithm tests" OFF)
option(BUILD_PRIMS_TESTS "Build ml-prim tests" ON)
option(BUILD_CUML_EXAMPLES "Build C++ API usage examples" ON)
option(BUILD_CUML_BENCH "Build cuML C++ benchmark tests" ON)
option(BUILD_CUML_MPI_COMMS "Build the MPI+NCCL Communicator (used for testing)" OFF)
option(CUDA_ENABLE_KERNEL_INFO "Enable kernel resource usage info" OFF)
option(CUDA_ENABLE_LINE_INFO "Enable lineinfo in nvcc" OFF)
option(DETECT_CONDA_ENV "Enable detection of conda environment for dependencies" ON)
option(DISABLE_DEPRECATION_WARNINGS "Disable deprecation warnings " OFF)
option(DISABLE_OPENMP "Disable OpenMP" OFF)
option(ENABLE_CUMLPRIMS_MG "Enable algorithms that use libcumlprims_mg" ON)
option(NVTX "Enable nvtx markers" OFF)
option(SINGLEGPU "Disable all mnmg components and comms libraries" OFF)
option(USE_CCACHE "Cache build artifacts with ccache" OFF)
option(CUDA_STATIC_RUNTIME "Statically link the CUDA toolkit runtime and libraries" OFF)
option(CUML_USE_RAFT_STATIC "Build and statically link the RAFT libraries" OFF)
option(CUML_RAFT_COMPILED "Use libraft shared library" ON)
option(CUML_USE_TREELITE_STATIC "Build and statically link the treelite library" OFF)
option(CUML_EXPORT_TREELITE_LINKAGE "Whether to publicly or privately link treelite to libcuml++" OFF)
option(CUML_USE_CUMLPRIMS_MG_STATIC "Build and statically link the cumlprims_mg library" OFF)
# The options below allow incorporating libcuml into another build process
# without installing all its components. This is useful if total file size is
# at a premium and we do not expect other consumers to use any APIs of the
# dependency except those that are directly linked to by the dependent library.
option(CUML_EXCLUDE_RAFT_FROM_ALL "Exclude RAFT targets from cuML's 'all' target" OFF)
option(CUML_EXCLUDE_TREELITE_FROM_ALL "Exclude Treelite targets from cuML's 'all' target" OFF)
option(CUML_EXCLUDE_CUMLPRIMS_MG_FROM_ALL "Exclude cumlprims_mg targets from cuML's 'all' target" OFF)
option(CUML_RAFT_CLONE_ON_PIN "Explicitly clone RAFT branch when pinned to non-feature branch" ON)
message(VERBOSE "CUML_CPP: Building libcuml_c shared library. Contains the cuML C API: ${BUILD_CUML_C_LIBRARY}")
message(VERBOSE "CUML_CPP: Building libcuml shared library: ${BUILD_CUML_CPP_LIBRARY}")
message(VERBOSE "CUML_CPP: Building cuML algorithm tests: ${BUILD_CUML_TESTS}")
message(VERBOSE "CUML_CPP: Building cuML multigpu algorithm tests: ${BUILD_CUML_MG_TESTS}")
message(VERBOSE "CUML_CPP: Building ml-prims tests: ${BUILD_PRIMS_TESTS}")
message(VERBOSE "CUML_CPP: Building C++ API usage examples: ${BUILD_CUML_EXAMPLES}")
message(VERBOSE "CUML_CPP: Building cuML C++ benchmark tests: ${BUILD_CUML_BENCH}")
message(VERBOSE "CUML_CPP: Building the MPI+NCCL Communicator (used for testing): ${BUILD_CUML_MPI_COMMS}")
message(VERBOSE "CUML_CPP: Enabling detection of conda environment for dependencies: ${DETECT_CONDA_ENV}")
message(VERBOSE "CUML_CPP: Disabling OpenMP: ${DISABLE_OPENMP}")
message(VERBOSE "CUML_CPP: Enabling algorithms that use libcumlprims_mg: ${ENABLE_CUMLPRIMS_MG}")
message(VERBOSE "CUML_CPP: Enabling kernel resource usage info: ${KERNEL_INFO}")
message(VERBOSE "CUML_CPP: Enabling kernelinfo in nvcc: ${CUDA_ENABLE_KERNEL_INFO}")
message(VERBOSE "CUML_CPP: Enabling lineinfo in nvcc: ${CUDA_ENABLE_LINE_INFO}")
message(VERBOSE "CUML_CPP: Enabling nvtx markers: ${NVTX}")
message(VERBOSE "CUML_CPP: Disabling all mnmg components and comms libraries: ${SINGLEGPU}")
message(VERBOSE "CUML_CPP: Cache build artifacts with ccache: ${USE_CCACHE}")
message(VERBOSE "CUML_CPP: Statically link the CUDA toolkit runtime and libraries: ${CUDA_STATIC_RUNTIME}")
message(VERBOSE "CUML_CPP: Build and statically link RAFT libraries: ${CUML_USE_RAFT_STATIC}")
message(VERBOSE "CUML_CPP: Build and statically link Treelite library: ${CUML_USE_TREELITE_STATIC}")
set(CUML_ALGORITHMS "ALL" CACHE STRING "Experimental: Choose which algorithms are built into libcuml++.so. Can specify individual algorithms or groups in a semicolon-separated list.")
message(VERBOSE "CUML_CPP: Building libcuml++ with algorithms: '${CUML_ALGORITHMS}'.")
# Set RMM logging level
set(RMM_LOGGING_LEVEL "INFO" CACHE STRING "Choose the logging level.")
set_property(CACHE RMM_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL" "OFF")
message(VERBOSE "CUML_CPP: RMM_LOGGING_LEVEL = '${RMM_LOGGING_LEVEL}'.")
if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS)
# Needed because GoogleBenchmark changes the state of FindThreads.cmake, causing subsequent runs to
# have different values for the `Threads::Threads` target. Setting this flag ensures
# `Threads::Threads` is the same value in first run and subsequent runs.
set(THREADS_PREFER_PTHREAD_FLAG ON)
endif()
##############################################################################
# - Target names -------------------------------------------------------------
set(CUML_CPP_TARGET "cuml++")
set(CUML_CPP_BENCH_TARGET "sg_benchmark")
if(${BUILD_CUML_C_LIBRARY})
set(CUML_C_TARGET "cuml")
endif()
set(CUML_C_TEST_TARGET "${CUML_C_TARGET}_test")
set(CUML_MG_TEST_TARGET "ml_mg")
set(PRIMS_BENCH_TARGET "prims_benchmark")
##############################################################################
# - Conda environment detection ----------------------------------------------
if(DETECT_CONDA_ENV)
rapids_cmake_support_conda_env( conda_env MODIFY_PREFIX_PATH )
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT AND DEFINED ENV{CONDA_PREFIX})
message(STATUS "CUML_CPP: No CMAKE_INSTALL_PREFIX argument detected, setting to: $ENV{CONDA_PREFIX}")
set(CMAKE_INSTALL_PREFIX "$ENV{CONDA_PREFIX}")
endif()
endif()
##############################################################################
# - compiler options ---------------------------------------------------------
set(_ctk_static_suffix "")
if(CUDA_STATIC_RUNTIME)
set(_ctk_static_suffix "_static_nocallback")
endif()
if (NOT DISABLE_OPENMP)
find_package(OpenMP)
if(OpenMP_FOUND)
message(STATUS "CUML_CPP: OpenMP found in ${OPENMP_INCLUDE_DIRS}")
list(APPEND CUML_CXX_FLAGS ${OpenMP_CXX_FLAGS})
endif()
endif()
# CUDA runtime
rapids_cuda_init_runtime(USE_STATIC ${CUDA_STATIC_RUNTIME})
# * find CUDAToolkit package
# * determine GPU architectures
# * enable the CMake CUDA language
# * set other CUDA compilation flags
rapids_find_package(CUDAToolkit REQUIRED
BUILD_EXPORT_SET cuml-exports
INSTALL_EXPORT_SET cuml-exports
)
include(cmake/modules/ConfigureCUDA.cmake)
##############################################################################
# - Set options based on user defined one -----------------------------------
set(CUML_USE_RAFT_NN OFF)
set(CUML_RAFT_COMPILED OFF)
set(LINK_TREELITE OFF)
set(LINK_CUFFT OFF)
include(cmake/modules/ConfigureAlgorithms.cmake)
# Enabling libcuml enables building libcuml++
if(BUILD_CUML_C_LIBRARY)
set(BUILD_CUML_CPP_LIBRARY ON)
endif()
# Disabling libcuml++ disables building algorithm tests and examples
if(NOT BUILD_CUML_CPP_LIBRARY)
set(BUILD_CUML_C_LIBRARY OFF)
set(BUILD_CUML_TESTS OFF)
set(BUILD_CUML_MG_TESTS OFF)
set(BUILD_CUML_EXAMPLES OFF)
endif()
# SingleGPU build disables cumlprims_mg and comms components
if(SINGLEGPU)
message(STATUS "CUML_CPP: Detected SINGLEGPU build option")
message(STATUS "CUML_CPP: Disabling Multi-GPU components and comms libraries")
set(BUILD_CUML_MG_TESTS OFF)
set(BUILD_CUML_MPI_COMMS OFF)
set(ENABLE_CUMLPRIMS_MG OFF)
set(WITH_UCX OFF)
endif()
if(BUILD_CUML_MG_TESTS AND NOT SINGLEGPU)
message(STATUS "CUML_CPP: Detected BUILD_CUML_MG_TESTS set to ON. Enabling BUILD_CUML_MPI_COMMS")
set(BUILD_CUML_MPI_COMMS ON)
endif()
if(USE_CCACHE)
set(CMAKE_C_COMPILER_LAUNCHER ccache)
set(CMAKE_CXX_COMPILER_LAUNCHER ccache)
set(CMAKE_CUDA_COMPILER_LAUNCHER ccache)
endif()
##############################################################################
# - Requirements -------------------------------------------------------------
# add third party dependencies using CPM
rapids_cpm_init()
rapids_cmake_install_lib_dir(lib_dir)
if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS)
find_package(Threads)
endif()
include(cmake/thirdparty/get_raft.cmake)
if(LINK_TREELITE)
include(cmake/thirdparty/get_treelite.cmake)
endif()
if(all_algo OR treeshap_algo)
include(cmake/thirdparty/get_gputreeshap.cmake)
# Workaround until https://github.com/rapidsai/rapids-cmake/issues/176 is resolved
if(NOT BUILD_SHARED_LIBS)
rapids_export_package(BUILD GPUTreeShap cuml-exports)
rapids_export_package(INSTALL GPUTreeShap cuml-exports)
endif()
endif()
if(ENABLE_CUMLPRIMS_MG)
include(cmake/thirdparty/get_cumlprims_mg.cmake)
endif()
if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS)
include(cmake/thirdparty/get_gtest.cmake)
endif()
if(BUILD_CUML_BENCH)
include(${rapids-cmake-dir}/cpm/gbench.cmake)
rapids_cpm_gbench()
endif()
##############################################################################
# - build libcuml++ shared library -------------------------------------------
if(BUILD_CUML_C_LIBRARY OR BUILD_CUML_CPP_LIBRARY)
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/fatbin.ld"
[=[
SECTIONS
{
.nvFatBinSegment : { *(.nvFatBinSegment) }
.nv_fatbin : { *(.nv_fatbin) }
}
]=])
endif()
# Copy the interface include directories from INCLUDED_TARGET to TARGET.
# This is necessary when INCLUDED_TARGET was compiled statically but includes
# public APIs that may still require consumers to have the same interface
# headers available.
function(copy_interface_excludes)
set(_options "")
set(_one_value TARGET INCLUDED_TARGET)
set(_multi_value "")
cmake_parse_arguments(_CUML_INCLUDES "${_options}" "${_one_value}"
"${_multi_value}" ${ARGN})
get_target_property(_includes ${_CUML_INCLUDES_INCLUDED_TARGET} INTERFACE_INCLUDE_DIRECTORIES)
target_include_directories(${_CUML_INCLUDES_TARGET} PUBLIC ${_includes})
endfunction()
if(BUILD_CUML_CPP_LIBRARY)
# single GPU components
# common components
add_library(${CUML_CPP_TARGET}
src/common/logger.cpp)
if (CUML_ENABLE_GPU)
target_compile_definitions(${CUML_CPP_TARGET} PUBLIC CUML_ENABLE_GPU)
endif()
if(all_algo OR arima_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/arima/batched_arima.cu
src/arima/batched_kalman.cu)
endif()
if(all_algo OR datasets_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/datasets/make_arima.cu
src/datasets/make_blobs.cu
src/datasets/make_regression.cu)
endif()
if(all_algo OR dbscan_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/dbscan/dbscan.cu)
endif()
if(all_algo OR decisiontree_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/decisiontree/batched-levelalgo/kernels/entropy-double.cu
src/decisiontree/batched-levelalgo/kernels/entropy-float.cu
src/decisiontree/batched-levelalgo/kernels/gamma-double.cu
src/decisiontree/batched-levelalgo/kernels/gamma-float.cu
src/decisiontree/batched-levelalgo/kernels/gini-double.cu
src/decisiontree/batched-levelalgo/kernels/gini-float.cu
src/decisiontree/batched-levelalgo/kernels/inverse_gaussian-double.cu
src/decisiontree/batched-levelalgo/kernels/inverse_gaussian-float.cu
src/decisiontree/batched-levelalgo/kernels/mse-double.cu
src/decisiontree/batched-levelalgo/kernels/mse-float.cu
src/decisiontree/batched-levelalgo/kernels/poisson-double.cu
src/decisiontree/batched-levelalgo/kernels/poisson-float.cu
src/decisiontree/batched-levelalgo/kernels/quantiles.cu
src/decisiontree/decisiontree.cu)
endif()
if(all_algo OR explainer_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/explainer/kernel_shap.cu
src/explainer/permutation_shap.cu)
endif()
if(all_algo OR treeshap_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/explainer/tree_shap.cu)
endif()
# FIL components
if(all_algo OR fil_algo)
if(CUML_ENABLE_GPU)
target_sources(${CUML_CPP_TARGET}
PRIVATE
# Current FIL
src/fil/fil.cu
src/fil/infer.cu
src/fil/treelite_import.cu
# Experimental FIL
src/experimental/fil/infer0.cu
src/experimental/fil/infer1.cu
src/experimental/fil/infer2.cu
src/experimental/fil/infer3.cu
src/experimental/fil/infer4.cu
src/experimental/fil/infer5.cu
src/experimental/fil/infer6.cu
src/experimental/fil/infer7.cu)
endif()
target_sources(${CUML_CPP_TARGET}
PRIVATE
# Experimental FIL
src/experimental/fil/infer0.cpp
src/experimental/fil/infer1.cpp
src/experimental/fil/infer2.cpp
src/experimental/fil/infer3.cpp
src/experimental/fil/infer4.cpp
src/experimental/fil/infer5.cpp
src/experimental/fil/infer6.cpp
src/experimental/fil/infer7.cpp)
endif()
# todo: organize linear models better
if(all_algo OR linearregression_algo OR ridge_algo OR lasso_algo OR logisticregression_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/glm/glm.cu)
endif()
if(all_algo OR genetic_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/genetic/genetic.cu
src/genetic/node.cu)
endif()
if(all_algo OR hdbscan_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/genetic/program.cu
src/genetic/node.cu
src/hdbscan/hdbscan.cu
src/hdbscan/condensed_hierarchy.cu
src/hdbscan/prediction_data.cu)
endif()
if(all_algo OR holtwinters_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/holtwinters/holtwinters.cu)
endif()
if(all_algo OR kmeans_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/kmeans/kmeans_transform.cu
src/kmeans/kmeans_fit_predict.cu
src/kmeans/kmeans_predict.cu
)
endif()
if(all_algo OR knn_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/knn/knn.cu
src/knn/knn_sparse.cu)
endif()
if(all_algo OR hierarchicalclustering_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/hierarchy/linkage.cu)
endif()
if(all_algo OR metrics_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/metrics/accuracy_score.cu
src/metrics/adjusted_rand_index.cu
src/metrics/completeness_score.cu
src/metrics/entropy.cu
src/metrics/homogeneity_score.cu
src/metrics/kl_divergence.cu
src/metrics/mutual_info_score.cu
src/metrics/pairwise_distance.cu
src/metrics/pairwise_distance_canberra.cu
src/metrics/pairwise_distance_chebyshev.cu
src/metrics/pairwise_distance_correlation.cu
src/metrics/pairwise_distance_cosine.cu
src/metrics/pairwise_distance_euclidean.cu
src/metrics/pairwise_distance_hamming.cu
src/metrics/pairwise_distance_hellinger.cu
src/metrics/pairwise_distance_jensen_shannon.cu
src/metrics/pairwise_distance_kl_divergence.cu
src/metrics/pairwise_distance_l1.cu
src/metrics/pairwise_distance_minkowski.cu
src/metrics/pairwise_distance_russell_rao.cu
src/metrics/r2_score.cu
src/metrics/rand_index.cu
src/metrics/silhouette_score.cu
src/metrics/silhouette_score_batched_double.cu
src/metrics/silhouette_score_batched_float.cu
src/metrics/trustworthiness.cu
src/metrics/v_measure.cu)
endif()
if(all_algo OR pca_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/pca/pca.cu)
endif()
if(all_algo OR randomforest_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/randomforest/randomforest.cu)
endif()
if(all_algo OR randomprojection_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/random_projection/rproj.cu)
endif()
# todo: separate solvers better
if(all_algo OR solvers_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/solver/lars.cu
src/solver/solver.cu)
endif()
if(all_algo OR spectralclustering_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/spectral/spectral.cu)
endif()
if(all_algo OR svm_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/svm/svc.cu
src/svm/svr.cu
src/svm/linear.cu
src/svm/ws_util.cu)
endif()
if(all_algo OR autoarima_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/tsa/auto_arima.cu
src/tsa/stationarity.cu)
endif()
if(all_algo OR tsne_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/tsne/tsne.cu)
endif()
if(all_algo OR tsvd_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/tsvd/tsvd.cu)
endif()
if(all_algo OR umap_algo)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/umap/umap.cu)
endif()
# multi GPU components
# todo: separate mnmg that require cumlprims from those that don't
if(NOT SINGLEGPU)
target_sources(${CUML_CPP_TARGET}
PRIVATE
src/glm/ols_mg.cu
src/glm/preprocess_mg.cu
src/glm/ridge_mg.cu
src/glm/qn_mg.cu
src/kmeans/kmeans_mg.cu
src/knn/knn_mg.cu
src/knn/knn_classify_mg.cu
src/knn/knn_regress_mg.cu
src/pca/pca_mg.cu
src/pca/sign_flip_mg.cu
src/solver/cd_mg.cu
src/tsvd/tsvd_mg.cu
)
endif()
add_library(cuml::${CUML_CPP_TARGET} ALIAS ${CUML_CPP_TARGET})
set_target_properties(${CUML_CPP_TARGET}
PROPERTIES BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
# set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_definitions(${CUML_CPP_TARGET}
PUBLIC
DISABLE_CUSPARSE_DEPRECATED
PRIVATE
CUML_CPP_API
)
target_compile_options(${CUML_CPP_TARGET}
PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${CUML_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${CUML_CUDA_FLAGS}>"
)
target_include_directories(${CUML_CPP_TARGET}
PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
PRIVATE
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src/metrics>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src_prims>
$<$<BOOL:${BUILD_CUML_MPI_COMMS}>:${MPI_CXX_INCLUDE_PATH}>
INTERFACE
$<INSTALL_INTERFACE:include>
)
set(_cuml_cpp_public_libs)
set(_cuml_cpp_private_libs)
if(CUML_USE_RAFT_STATIC AND (TARGET raft::raft))
copy_interface_excludes(INCLUDED_TARGET raft::raft TARGET ${CUML_CPP_TARGET})
if(CUML_USE_RAFT_DIST AND (TARGET cuco::cuco))
list(APPEND _cuml_cpp_private_libs cuco::cuco)
endif()
endif()
if(CUML_USE_TREELITE_STATIC AND (TARGET treelite::treelite_static))
# By default, TREELITE_LIBS will contain both treelite::treelite_static and
# treelite::treelite_runtime_static if we are linking statically, but these
# two targets have duplicate symbols so we can only link to one of them.
set(TREELITE_LIBS treelite::treelite_static)
copy_interface_excludes(INCLUDED_TARGET treelite::treelite_static TARGET ${CUML_CPP_TARGET})
elseif(CUML_EXPORT_TREELITE_LINKAGE)
list(APPEND _cuml_cpp_public_libs ${TREELITE_LIBS})
endif()
if(CUML_USE_CUMLPRIMS_MG_STATIC AND (TARGET cumlprims_mg::cumlprims_mg))
copy_interface_excludes(INCLUDED_TARGET cumlprims_mg::cumlprims_mg TARGET ${CUML_CPP_TARGET})
endif()
# These are always private:
list(APPEND _cuml_cpp_private_libs
raft::raft
$<TARGET_NAME_IF_EXISTS:GPUTreeShap::GPUTreeShap>
$<$<BOOL:${LINK_CUFFT}>:CUDA::cufft${_ctk_static_suffix}>
${TREELITE_LIBS}
${OpenMP_CXX_LIB_NAMES}
$<$<OR:$<BOOL:${BUILD_CUML_STD_COMMS}>,$<BOOL:${BUILD_CUML_MPI_COMMS}>>:NCCL::NCCL>
$<$<BOOL:${BUILD_CUML_MPI_COMMS}>:${MPI_CXX_LIBRARIES}>
)
set(_cuml_cpp_libs_var_name "_cuml_cpp_public_libs")
if(CUDA_STATIC_RUNTIME)
set(_cuml_cpp_libs_var_name "_cuml_cpp_private_libs")
# Add CTK include paths because we're going to make our CTK library links private below
target_include_directories(${CUML_CPP_TARGET} SYSTEM PUBLIC ${CUDAToolkit_INCLUDE_DIRS})
endif()
# The visibility of these depend on whether we're linking the CTK statically,
# because cumlprims_mg and cuML inherit their CUDA libs from the raft::raft
# INTERFACE target.
list(APPEND ${_cuml_cpp_libs_var_name}
$<$<BOOL:${CUML_RAFT_COMPILED}>:${RAFT_COMPILED_LIB}>
$<TARGET_NAME_IF_EXISTS:cumlprims_mg::cumlprims_mg>
)
target_link_libraries(${CUML_CPP_TARGET}
PUBLIC rmm::rmm
${_cuml_cpp_public_libs}
PRIVATE ${_cuml_cpp_private_libs}
)
# If we export the libdmlc symbols, they can lead to weird crashes with other
# libraries that use libdmlc. This just hides the symbols internally.
target_link_options(${CUML_CPP_TARGET} PRIVATE "-Wl,--exclude-libs,libdmlc.a")
# same as above, but for protobuf library
target_link_options(${CUML_CPP_TARGET} PRIVATE "-Wl,--exclude-libs,libprotobuf.a")
# ensure CUDA symbols aren't relocated to the middle of the debug build binaries
target_link_options(${CUML_CPP_TARGET} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/fatbin.ld")
endif()
#############################################################################
# - build libcuml C shared library -------------------------------------------
if(BUILD_CUML_C_LIBRARY)
add_library(${CUML_C_TARGET}
src/common/cumlHandle.cpp
src/common/cuml_api.cpp
src/dbscan/dbscan_api.cpp
src/glm/glm_api.cpp
src/holtwinters/holtwinters_api.cpp
src/knn/knn_api.cpp
src/svm/svm_api.cpp
)
add_library(cuml::${CUML_C_TARGET} ALIAS ${CUML_C_TARGET})
target_compile_definitions(${CUML_C_TARGET}
PRIVATE
CUML_C_API)
target_include_directories(${CUML_C_TARGET}
PRIVATE
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
)
target_link_libraries(${CUML_C_TARGET}
PUBLIC
${CUML_CPP_TARGET}
)
# ensure CUDA symbols aren't relocated to the middle of the debug build binaries
target_link_options(${CUML_C_TARGET} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/fatbin.ld")
endif()
##############################################################################
# - build test executables ---------------------------------------------------
if(BUILD_CUML_TESTS)
include(CTest)
add_subdirectory(test)
endif()
##############################################################################
# - build examples -----------------------------------------------------------
if(BUILD_CUML_EXAMPLES)
add_subdirectory(examples)
endif()
# ###################################################################################################
# # - install targets -------------------------------------------------------------------------------
include(CPack)
set(CUML_TARGETS ${CUML_CPP_TARGET})
if(BUILD_CUML_C_LIBRARY)
list(APPEND CUML_TARGETS
${CUML_C_TARGET})
endif()
install(TARGETS
${CUML_TARGETS}
DESTINATION
${lib_dir}
EXPORT
cuml-exports)
install(DIRECTORY include/cuml/
DESTINATION include/cuml)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/include/cuml/version_config.hpp
DESTINATION include/cuml)
# ################################################################################################
# # - install export -------------------------------------------------------------------------------
set(doc_string
[=[
Provide targets for cuML.
cuML is a suite of libraries that implement machine learning algorithms and mathematical primitives
functions that share compatible APIs with other RAPIDS projects.
]=])
set(code_string )
if (TARGET treelite::treelite)
string(APPEND code_string
[=[
if (TARGET treelite::treelite AND (NOT TARGET treelite))
add_library(treelite ALIAS treelite::treelite)
endif()
if (TARGET treelite::treelite_runtime AND (NOT TARGET treelite_runtime))
add_library(treelite_runtime ALIAS treelite::treelite_runtime)
endif()
]=])
else()
string(APPEND code_string
[=[
if (TARGET treelite::treelite_static AND (NOT TARGET treelite_static))
add_library(treelite_static ALIAS treelite::treelite_static)
endif()
if (TARGET treelite::treelite_runtime_static AND (NOT TARGET treelite_runtime_static))
add_library(treelite_runtime_static ALIAS treelite::treelite_runtime_static)
endif()
]=])
endif()
rapids_export(INSTALL cuml
EXPORT_SET cuml-exports
GLOBAL_TARGETS ${CUML_C_TARGET} ${CUML_CPP_TARGET}
NAMESPACE cuml::
DOCUMENTATION doc_string
FINAL_CODE_BLOCK code_string
)
################################################################################################
# - build export -------------------------------------------------------------------------------
rapids_export(BUILD cuml
EXPORT_SET cuml-exports
GLOBAL_TARGETS ${CUML_C_TARGET} ${CUML_CPP_TARGET}
NAMESPACE cuml::
DOCUMENTATION doc_string
FINAL_CODE_BLOCK code_string
)
##############################################################################
# - build benchmark executable -----------------------------------------------
if(BUILD_CUML_BENCH)
add_subdirectory(bench)
endif()
##############################################################################
# - doxygen targets ----------------------------------------------------------
include(cmake/doxygen.cmake)
add_doxygen_target(IN_DOXYFILE Doxyfile.in
OUT_DOXYFILE ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile
CWD ${CMAKE_CURRENT_SOURCE_DIR})
| 0 |
rapidsai_public_repos/cuml | rapidsai_public_repos/cuml/cpp/Doxyfile.in | # Doxyfile 1.9.1
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
#
# All text after a double hash (##) is considered a comment and is placed in
# front of the TAG it is preceding.
#
# All text after a single hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists, items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (\" \").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the configuration
# file that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
# double-quotes, unless you are using Doxywizard) that should identify the
# project for which the documentation is generated. This name is used in the
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "cuML C++ API"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = "23.12"
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF =
# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
# the logo to the output directory.
PROJECT_LOGO =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
OUTPUT_DIRECTORY =
# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
# will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
# putting all generated files in the same directory would otherwise causes
# performance problems for the file system.
# The default value is: NO.
CREATE_SUBDIRS = NO
# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
# characters to appear in the names of generated files. If set to NO, non-ASCII
# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
# U+3044.
# The default value is: NO.
ALLOW_UNICODE_NAMES = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
# Ukrainian and Vietnamese.
# The default value is: English.
OUTPUT_LANGUAGE = English
# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all generated output in the proper direction.
# Possible values are: None, LTR, RTL and Context.
# The default value is: None.
OUTPUT_TEXT_DIRECTION = None
# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
# The default value is: YES.
REPEAT_BRIEF = YES
# This tag implements a quasi-intelligent brief description abbreviator that is
# used to form the text in various listings. Each string in this list, if found
# as the leading text of the brief description, will be stripped from the text
# and the result, after processing the whole list, is used as the annotated
# text. Otherwise, the brief description is used as-is. If left blank, the
# following values are used ($name is automatically replaced with the name of
# the entity):The $name class, The $name widget, The $name file, is, provides,
# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF =
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# doxygen will generate a detailed section even if there is only a brief
# description.
# The default value is: NO.
ALWAYS_DETAILED_SEC = NO
# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
# The default value is: NO.
INLINE_INHERITED_MEMB = NO
# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
FULL_PATH_NAMES = YES
# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
# Stripping is only done if one of the specified strings matches the left-hand
# part of the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the path to
# strip.
#
# Note that you can specify absolute paths here, but also relative paths, which
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH =
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
# header file to include in order to use a class. If left blank only the name of
# the header file containing the class definition is used. Otherwise one should
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
# support long names like on DOS, Mac, or CD-ROM.
# The default value is: NO.
SHORT_NAMES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
# first line (until the first dot) of a Javadoc-style comment as the brief
# description. If set to NO, the Javadoc-style will behave just like regular Qt-
# style comments (thus requiring an explicit @brief command for a brief
# description.)
# The default value is: NO.
JAVADOC_AUTOBRIEF = NO
# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
# such as
# /***************
# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
# Javadoc-style will behave just like regular comments and it will not be
# interpreted by doxygen.
# The default value is: NO.
JAVADOC_BANNER = NO
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
# requiring an explicit \brief command for a brief description.)
# The default value is: NO.
QT_AUTOBRIEF = NO
# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
# a brief description. This used to be the default behavior. The new default is
# to treat a multi-line C++ comment block as a detailed description. Set this
# tag to YES if you prefer the old behavior instead.
#
# Note that setting this tag to YES also means that rational rose comments are
# not recognized any more.
# The default value is: NO.
MULTILINE_CPP_IS_BRIEF = NO
# By default Python docstrings are displayed as preformatted text and doxygen's
# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
# doxygen's special commands can be used and the contents of the docstring
# documentation blocks is shown as doxygen documentation.
# The default value is: YES.
PYTHON_DOCSTRING = YES
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
# The default value is: YES.
INHERIT_DOCS = YES
# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
# page for each member. If set to NO, the documentation of a member will be part
# of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
# uses this value to replace tabs by spaces in code fragments.
# Minimum value: 1, maximum value: 16, default value: 4.
TAB_SIZE = 4
# This tag can be used to specify a number of aliases that act as commands in
# the documentation. An alias has the form:
# name=value
# For example adding
# "sideeffect=@par Side Effects:\n"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines (in the resulting output). You can put ^^ in the value part of an
# alias to insert a newline as if a physical newline was in the original file.
# When you need a literal { or } or , in the value part of an alias you have to
# escape them by means of a backslash (\), this can lead to conflicts with the
# commands \{ and \} for these it is advised to use the version @{ and @} or use
# a double escape (\\{ and \\})
ALIASES =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
# instance, some of the names that are used will be different. The list of all
# members will be omitted, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_FOR_C = NO
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
# Python sources only. Doxygen will then generate output that is more tailored
# for that language. For instance, namespaces will be presented as packages,
# qualified scopes will look different, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_JAVA = NO
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
# sources. Doxygen will then generate output that is tailored for Fortran.
# The default value is: NO.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
# sources. Doxygen will then generate output that is tailored for VHDL.
# The default value is: NO.
OPTIMIZE_OUTPUT_VHDL = NO
# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
# sources only. Doxygen will then generate output that is more tailored for that
# language. For instance, namespaces will be presented as modules, types will be
# separated into more groups, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_SLICE = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
# default for Fortran type files). For instance to make doxygen treat .inc files
# as Fortran files (default is PHP), and .f files as C (default is Fortran),
# use: inc=Fortran f=C.
#
# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen. When specifying no_extension you should add
# * to the FILE_PATTERNS.
#
# Note see also the list of default file extension mappings.
EXTENSION_MAPPING =
# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
# documentation. See https://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you can
# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
# case of backward compatibilities issues.
# The default value is: YES.
MARKDOWN_SUPPORT = YES
# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
# to that level are automatically included in the table of contents, even if
# they do not have an id attribute.
# Note: This feature currently applies only to Markdown headings.
# Minimum value: 0, maximum value: 99, default value: 5.
# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
TOC_INCLUDE_HEADINGS = 5
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by putting a % sign in front of the word or
# globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should set this
# tag to YES in order to let doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string);
# versus func(std::string) {}). This also make the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
# The default value is: NO.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
# The default value is: NO.
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
# will parse them like normal C++ but will assume all classes use public instead
# of private inheritance when no explicit protection keyword is present.
# The default value is: NO.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate
# getter and setter methods for a property. Setting this option to YES will make
# doxygen to replace the get and set methods by a property in the documentation.
# This will only work if the methods are indeed getting or setting a simple
# type. If this is not the case, or you want to show the methods anyway, you
# should set this option to NO.
# The default value is: YES.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
# If one adds a struct or class to a group and this option is enabled, then also
# any nested class or struct is added to the same group. By default this option
# is disabled and one has to add nested compounds explicitly via \ingroup.
# The default value is: NO.
GROUP_NESTED_COMPOUNDS = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
# subgrouping. Alternatively, this can be done per class using the
# \nosubgrouping command.
# The default value is: YES.
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
# are shown inside the group in which they are included (e.g. using \ingroup)
# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
# and RTF).
#
# Note that this feature does not work in combination with
# SEPARATE_MEMBER_PAGES.
# The default value is: NO.
INLINE_GROUPED_CLASSES = NO
# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
# with only public data fields or simple typedef fields will be shown inline in
# the documentation of the scope in which they are defined (i.e. file,
# namespace, or group documentation), provided this scope is documented. If set
# to NO, structs, classes, and unions are shown on a separate page (for HTML and
# Man pages) or section (for LaTeX and RTF).
# The default value is: NO.
INLINE_SIMPLE_STRUCTS = NO
# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
# enum is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
# namespace, or class. And the struct will be named TypeS. This can typically be
# useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
# The default value is: NO.
TYPEDEF_HIDES_STRUCT = NO
# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
# cache is used to resolve symbols given their name and scope. Since this can be
# an expensive process and often the same symbol appears multiple times in the
# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
# doxygen will become slower. If the cache is too large, memory is wasted. The
# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
# symbols. At the end of a run doxygen will report the cache usage and suggest
# the optimal cache size from a speed point of view.
# Minimum value: 0, maximum value: 9, default value: 0.
LOOKUP_CACHE_SIZE = 0
# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use
# during processing. When set to 0 doxygen will based this on the number of
# cores available in the system. You can set it explicitly to a value larger
# than 0 to get more control over the balance between CPU load and processing
# speed. At this moment only the input processing can be done using multiple
# threads. Since this is still an experimental feature the default is set to 1,
# which efficively disables parallel processing. Please report any issues you
# encounter. Generating dot graphs in parallel is controlled by the
# DOT_NUM_THREADS setting.
# Minimum value: 0, maximum value: 32, default value: 1.
NUM_PROC_THREADS = 1
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
# Note: This will also disable the warnings about undocumented members that are
# normally produced when WARNINGS is set to YES.
# The default value is: NO.
EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
# methods of a class will be included in the documentation.
# The default value is: NO.
EXTRACT_PRIV_VIRTUAL = NO
# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = NO
# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = NO
# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
# locally in source files will be included in the documentation. If set to NO,
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = NO
# This flag is only useful for Objective-C code. If set to YES, local methods,
# which are defined in the implementation section but not in the interface are
# included in the documentation. If set to NO, only methods in the interface are
# included.
# The default value is: NO.
EXTRACT_LOCAL_METHODS = NO
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base name of
# the file that contains the anonymous namespace. By default anonymous namespace
# are hidden.
# The default value is: NO.
EXTRACT_ANON_NSPACES = NO
# If this flag is set to YES, the name of an unnamed parameter in a declaration
# will be determined by the corresponding definition. By default unnamed
# parameters remain unnamed in the output.
# The default value is: YES.
RESOLVE_UNNAMED_PARAMS = YES
# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
# members will be included in the various overviews, but no documentation
# section is generated. This option has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
# to NO, these classes will be included in the various overviews. This option
# has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
# declarations. If set to NO, these declarations will be included in the
# documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
# documentation blocks found inside the body of a function. If set to NO, these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
HIDE_IN_BODY_DOCS = NO
# The INTERNAL_DOCS tag determines if documentation that is typed after a
# \internal command is included. If the tag is set to NO then the documentation
# will be excluded. Set it to YES to include the internal documentation.
# The default value is: NO.
INTERNAL_DOCS = NO
# With the correct setting of option CASE_SENSE_NAMES doxygen will better be
# able to match the capabilities of the underlying filesystem. In case the
# filesystem is case sensitive (i.e. it supports files in the same directory
# whose names only differ in casing), the option must be set to YES to properly
# deal with such files in case they appear in the input. For filesystems that
# are not case sensitive the option should be be set to NO to properly deal with
# output files written for symbols that only differ in casing, such as for two
# classes, one named CLASS and the other named Class, and to also support
# references to files without having to specify the exact matching casing. On
# Windows (including Cygwin) and MacOS, users should typically set this option
# to NO, whereas on Linux or other Unix flavors it should typically be set to
# YES.
# The default value is: system dependent.
CASE_SENSE_NAMES = YES
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
# their full class and namespace scopes in the documentation. If set to YES, the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
# append additional text to a page's title, such as Class Reference. If set to
# YES the compound reference will be hidden.
# The default value is: NO.
HIDE_COMPOUND_REFERENCE= NO
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
SHOW_INCLUDE_FILES = YES
# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
# grouped member an include statement to the documentation, telling the reader
# which file to include in order to use the member.
# The default value is: NO.
SHOW_GROUPED_MEMB_INC = NO
# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
# files with double quotes in the documentation rather than with sharp brackets.
# The default value is: NO.
FORCE_LOCAL_INCLUDES = NO
# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
# documentation for inline members.
# The default value is: YES.
INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
SORT_BRIEF_DOCS = NO
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
# (brief and detailed) documentation of class members so that constructors and
# destructors are listed first. If set to NO the constructors will appear in the
# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
# member documentation.
# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
# detailed member documentation.
# The default value is: NO.
SORT_MEMBERS_CTORS_1ST = NO
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
# of group names into alphabetical order. If set to NO the group names will
# appear in their defined order.
# The default value is: NO.
SORT_GROUP_NAMES = NO
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
# fully-qualified names, including namespaces. If set to NO, the class list will
# be sorted only by class name, not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
# Note: This option applies only to the class list, not to the alphabetical
# list.
# The default value is: NO.
SORT_BY_SCOPE_NAME = NO
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
# type resolution of all parameters of a function it will reject a match between
# the prototype and the implementation of a member function even if there is
# only one candidate or it is obvious which candidate to choose by doing a
# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
# accept a match between prototype and implementation in such cases.
# The default value is: NO.
STRICT_PROTO_MATCHING = NO
# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
# list. This list is created by putting \todo commands in the documentation.
# The default value is: YES.
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional documentation
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
# documentation. If the initializer consists of more lines than specified here
# it will be hidden. Use a value of 0 to hide initializers completely. The
# appearance of the value of individual variables and macros / defines can be
# controlled using \showinitializer or \hideinitializer command in the
# documentation regardless of this setting.
# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
# the bottom of the documentation of classes and structs. If set to YES, the
# list will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
# will remove the Files entry from the Quick Index and from the Folder Tree View
# (if specified).
# The default value is: YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
# page. This will remove the Namespaces entry from the Quick Index and from the
# Folder Tree View (if specified).
# The default value is: YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command command input-file, where command is the value of the
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
# by doxygen. Whatever the program writes to standard output is used as the file
# version. For an example see the documentation.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. See also \cite for info how to create references.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated to
# standard output by doxygen. If QUIET is set to YES this implies that the
# messages are off.
# The default value is: NO.
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
# The default value is: YES.
WARNINGS = YES
# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as not documenting some parameters
# in a documented function, or documenting parameters that don't exist or using
# markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO, doxygen will only warn about wrong or incomplete
# parameter documentation, but not about the absence of documentation. If
# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
# The default value is: NO.
WARN_NO_PARAMDOC = YES
# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
# at the end of the doxygen process doxygen will return with a non-zero status.
# Possible values are: NO, YES and FAIL_ON_WARNINGS.
# The default value is: NO.
WARN_AS_ERROR = YES
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr).
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
INPUT = include \
src \
src_prims
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see:
# https://www.gnu.org/software/libiconv/) for the list of possible encodings.
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# read by doxygen.
#
# Note the list of default checked file patterns might differ from the list of
# default file extension mappings.
#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl,
# *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.cpp \
*.h \
*.hpp \
*.hxx \
*.cu \
*.cuh
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
#
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
# The default value is: NO.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories.
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS = columnWiseSort.cuh \
smoblocksolve.h
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# AClass::ANamespace, ANamespace::*Test
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS =
# The EXAMPLE_PATH tag can be used to specify one or more files or directories
# that contain example code fragments that are included (see the \include
# command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
# irrespective of the value of the RECURSIVE tag.
# The default value is: NO.
EXAMPLE_RECURSIVE = NO
# The IMAGE_PATH tag can be used to specify one or more files or directories
# that contain images that are to be included in the documentation (see the
# \image command).
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command:
#
# <filter> <input-file>
#
# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
# name of an input file. Doxygen will then use the output that the filter
# program writes to standard output. If FILTER_PATTERNS is specified, this tag
# will be ignored.
#
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis. Doxygen will compare the file name with each pattern and apply the
# filter if there is a match. The filters are a list of the form: pattern=filter
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
# it is also possible to disable source filtering for a specific pattern using
# *.ext= (so without naming a filter).
# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE =
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
# generated. Documented entities will be cross-referenced with these sources.
#
# Note: To get rid of all source code in the generated output, make sure that
# also VERBATIM_HEADERS is set to NO.
# The default value is: NO.
SOURCE_BROWSER = NO
# Setting the INLINE_SOURCES tag to YES will include the body of functions,
# classes and enums directly into the documentation.
# The default value is: NO.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
# Fortran comments will always remain visible.
# The default value is: YES.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# entity all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
REFERENCES_LINK_SOURCE = YES
# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
# source code will show a tooltip with additional information such as prototype,
# brief description and links to the definition and documentation. Since this
# will make the HTML file larger and loading of large files a bit slower, you
# can opt to disable this feature.
# The default value is: YES.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see https://www.gnu.org/software/global/global.html). You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
# Doxygen will invoke htags (and that will in turn invoke gtags), so these
# tools must be available from the command line (i.e. in the search path).
#
# The result: instead of the source browser generated by doxygen, the links to
# source code will now point to the output of htags.
# The default value is: NO.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
# verbatim copy of the header file for each class for which an include is
# specified. Set to NO to disable this.
# See also: Section \class.
# The default value is: YES.
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
# compounds will be generated. Enable this if the project contains a lot of
# classes, structs, unions or interfaces.
# The default value is: YES.
ALPHABETICAL_INDEX = YES
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
# while generating the index headers.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
# generated HTML page (for example: .htm, .php, .asp).
# The default value is: .html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
# each generated HTML page. If the tag is left blank doxygen will generate a
# standard header.
#
# To get valid HTML the header file that includes any scripts and style sheets
# that doxygen needs, which is dependent on the configuration options used (e.g.
# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
# default header using
# doxygen -w html new_header.html new_footer.html new_stylesheet.css
# YourConfigFile
# and then modify the file new_header.html. See also section "Doxygen usage"
# for information on how to generate the default header that doxygen normally
# uses.
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of doxygen. For a description
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_HEADER = header.html
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
# footer. See HTML_HEADER for more information on how to generate a default
# footer and what special commands can be used inside the footer. See also
# section "Doxygen usage" for information on how to generate the default footer
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
# the HTML output. If left blank doxygen will generate a default style sheet.
# See also section "Doxygen usage" for information on how to generate the style
# sheet that doxygen normally uses.
# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
# it is more robust and this tag (HTML_STYLESHEET) will in the future become
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_STYLESHEET =
# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
# standard style sheet and is therefore more robust against future updates.
# Doxygen will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
# files will be copied as-is; there are no commands or markers available.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
# Minimum value: 0, maximum value: 359, default value: 220.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use grayscales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_SAT = 100
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
# luminance component of the colors in the HTML output. Values below 100
# gradually make the output lighter, whereas values above 100 make the output
# darker. The value divided by 100 is the actual gamma applied, so 80 represents
# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
# change the gamma.
# Minimum value: 40, maximum value: 240, default value: 80.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
# to YES can help to show when doxygen was last run and thus if the
# documentation is up to date.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = NO
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via JavaScript. If disabled, the navigation index will
# consists of multiple levels of tabs that are statically embedded in every HTML
# page. Disable this option to support browsers that do not have JavaScript,
# like the Qt help browser.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_MENUS = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_SECTIONS = NO
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
# and collapse entries dynamically later on. Doxygen will expand the tree to
# such a level that at most the specified number of entries are visible (unless
# a fully collapsed tree already exceeds this amount). So setting the number of
# entries 1 will produce a full collapsed tree by default. 0 is a special value
# representing an infinite number of entries and will result in a full expanded
# tree by default.
# Minimum value: 0, maximum value: 9999, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
# environment (see:
# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To
# create a documentation set, doxygen will generate a Makefile in the HTML
# output directory. Running make will produce the docset in that directory and
# running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
# genXcode/_index.html for more information.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_DOCSET = NO
# This tag determines the name of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# The default value is: Doxygen generated docs.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDNAME = "Doxygen generated docs"
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_BUNDLE_ID = org.doxygen.Project
# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
# string, e.g. com.mycompany.MyDocSet.documentation.
# The default value is: org.doxygen.Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
# The default value is: Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# (see:
# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
# files are now used as the Windows 98 help format, and will replace the old
# Windows help format (.hlp) on all Windows platforms in the future. Compressed
# HTML files also contain an index, a table of contents, and you can search for
# words in the documentation. The HTML workshop also contains a viewer for
# compressed HTML files.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_HTMLHELP = NO
# The CHM_FILE tag can be used to specify the file name of the resulting .chm
# file. You can add a path in front of the file if the result should not be
# written to the html output directory.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
# including file name) of the HTML help compiler (hhc.exe). If non-empty,
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated
# (YES) or that it should be included in the main .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
# The BINARY_TOC flag controls whether a binary table of contents is generated
# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
BINARY_TOC = NO
# The TOC_EXPAND flag can be set to YES to add extra items for group members to
# the table of contents of the HTML help documentation and to the tree view.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
TOC_EXPAND = NO
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
# (.qch) of the generated HTML documentation.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
# the file name of the resulting .qch file. The path specified is relative to
# the HTML output folder.
# This tag requires that the tag GENERATE_QHP is set to YES.
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
# (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
# Folders (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
# Filters (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
# Filters (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
# The QHG_LOCATION tag can be used to specify the location (absolute path
# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to
# run qhelpgenerator on the generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
# generated, together with the HTML files, they form an Eclipse help plugin. To
# install this plugin and make it available under the help contents menu in
# Eclipse, the contents of the directory containing the HTML and XML files needs
# to be copied into the plugins directory of eclipse. The name of the directory
# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
# After copying Eclipse needs to be restarted before the help appears.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_ECLIPSEHELP = NO
# A unique identifier for the Eclipse help plugin. When installing the plugin
# the directory name containing the HTML and XML files should also have this
# name. Each documentation set should have its own identifier.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
ECLIPSE_DOC_ID = org.doxygen.Project
# If you want full control over the layout of the generated HTML pages it might
# be necessary to disable the index and replace it with your own. The
# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
# of each HTML page. A value of NO enables the index and the value YES disables
# it. Since the tabs in the index contain the same information as the navigation
# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information. If the tag
# value is set to YES, a side panel will be generated containing a tree-like
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
# the same information as the tab index, you could consider setting
# DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
#
# Note that a value of 0 will completely suppress the enum values from appearing
# in the overview section.
# Minimum value: 0, maximum value: 20, default value: 4.
# This tag requires that the tag GENERATE_HTML is set to YES.
ENUM_VALUES_PER_LINE = 4
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
# to set the initial width (in pixels) of the frame in which the tree is shown.
# Minimum value: 0, maximum value: 1500, default value: 250.
# This tag requires that the tag GENERATE_HTML is set to YES.
TREEVIEW_WIDTH = 250
# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
EXT_LINKS_IN_WINDOW = NO
# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
# the HTML output. These images will generally look nicer at scaled resolutions.
# Possible values are: png (the default) and svg (looks nicer but requires the
# pdf2svg or inkscape tool).
# The default value is: png.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FORMULA_FORMAT = png
# Use this tag to change the font size of LaTeX formulas included as images in
# the HTML documentation. When you change the font size after a successful
# doxygen run you need to manually remove any form_*.png images from the HTML
# output directory to force them to be regenerated.
# Minimum value: 8, maximum value: 50, default value: 10.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_FONTSIZE = 10
# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
# generated for formulas are transparent PNGs. Transparent PNGs are not
# supported properly for IE 6.0, but are supported on all modern browsers.
#
# Note that when changing this option you need to delete any form_*.png files in
# the HTML output directory before the changes have effect.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_TRANSPARENT = YES
# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
# to create new LaTeX commands to be used in formulas as building blocks. See
# the section "Including formulas" for details.
FORMULA_MACROFILE =
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# https://www.mathjax.org) which uses client side JavaScript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = YES
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. See the MathJax site (see:
# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details.
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility), NativeMML (i.e. MathML) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_FORMAT = HTML-CSS
# When MathJax is enabled you need to specify the location relative to the HTML
# output directory using the MATHJAX_RELPATH option. The destination directory
# should contain the MathJax.js script. For instance, if the mathjax directory
# is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from https://www.mathjax.org before deployment.
# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
# (see:
# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_CODEFILE =
# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
# the HTML output. The underlying search engine uses javascript and DHTML and
# should work on any modern browser. Note that when using HTML help
# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
# there is already a search function so this one should typically be disabled.
# For large projects the javascript based search engine can be slow, then
# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
# search using the keyboard; to jump to the search box use <access key> + S
# (what the <access key> is depends on the OS and browser, but it is typically
# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
# key> to jump into the search results window, the results can be navigated
# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
# the search. The filter options can be selected when the cursor is inside the
# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
# to select a filter and <Enter> or <escape> to activate or cancel the filter
# option.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using JavaScript. There
# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
# setting. When disabled, doxygen will generate a PHP script for searching and
# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
# and searching needs to be provided by external tools. See the section
# "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
SERVER_BASED_SEARCH = NO
# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
# script for searching. Instead the search results are written to an XML file
# which needs to be processed by an external indexer. Doxygen will invoke an
# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
# search results.
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see:
# https://xapian.org/).
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH = NO
# The SEARCHENGINE_URL should point to a search engine hosted by a web server
# which will return the search results when EXTERNAL_SEARCH is enabled.
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see:
# https://xapian.org/). See the section "External Indexing and Searching" for
# details.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHENGINE_URL =
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
# search data is written to a file for indexing by an external tool. With the
# SEARCHDATA_FILE tag the name of this file can be specified.
# The default file is: searchdata.xml.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHDATA_FILE = searchdata.xml
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
# projects and redirect the results back to the right project.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH_ID =
# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
# projects other than the one defined by this configuration file, but that are
# all added to the same external search index. Each project needs to have a
# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
# to a relative location where the documentation can be found. The format is:
# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTRA_SEARCH_MAPPINGS =
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = NO
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked.
#
# Note that when not enabling USE_PDFLATEX the default is latex when enabling
# USE_PDFLATEX the default is pdflatex and when in the later case latex is
# chosen this is overwritten by pdflatex. For specific output languages the
# default can have been set differently, this depends on the implementation of
# the output language.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
# index for LaTeX.
# Note: This tag is used in the Makefile / make.bat.
# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
# (.tex).
# The default file is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
MAKEINDEX_CMD_NAME = makeindex
# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
# generate index for LaTeX. In case there is no backslash (\) as first character
# it will be automatically added in the LaTeX code.
# Note: This tag is used in the generated output file (.tex).
# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
# The default value is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_MAKEINDEX_CMD = makeindex
# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used by the
# printer.
# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
# 14 inches) and executive (7.25 x 10.5 inches).
# The default value is: a4.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
# that should be included in the LaTeX output. The package can be specified just
# by its name or with the correct syntax as to be used with the LaTeX
# \usepackage command. To get the times font for instance you can specify :
# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
# To use the option intlimits with the amsmath package you can specify:
# EXTRA_PACKAGES=[intlimits]{amsmath}
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
# chapter. If it is left blank doxygen will generate a standard header. See
# section "Doxygen usage" for information on how to let doxygen write the
# default header to a separate file.
#
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
# string, for the replacement values of the other commands the user is referred
# to HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
# chapter. If it is left blank doxygen will generate a standard footer. See
# LATEX_HEADER for more information on how to generate a default footer and what
# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# LaTeX style sheets that are included after the standard style sheets created
# by doxygen. Using this option one can overrule certain style aspects. Doxygen
# will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list).
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_STYLESHEET =
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
# directory. Note that the files will be copied as-is; there are no commands or
# markers available.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_FILES =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
# contain links (just like the HTML output) instead of page references. This
# makes the output suitable for online browsing using a PDF viewer.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
PDF_HYPERLINKS = YES
# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as
# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX
# files. Set this option to YES, to get a higher quality PDF documentation.
#
# See also section LATEX_CMD_NAME for selecting the engine.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
# command to the generated LaTeX files. This will instruct LaTeX to keep running
# if errors occur, instead of asking the user for help. This option is also used
# when generating formulas in HTML.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BATCHMODE = NO
# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
# index chapters (such as File Index, Compound Index, etc.) in the output.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HIDE_INDICES = NO
# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
# code with syntax highlighting in the LaTeX output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
# The default value is: plain.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
# page will contain the date and time when the page was generated. Setting this
# to NO can help when comparing the output of multiple runs.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_TIMESTAMP = NO
# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
# path from which the emoji images will be read. If a relative path is entered,
# it will be relative to the LATEX_OUTPUT directory. If left blank the
# LATEX_OUTPUT directory will be used.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EMOJI_DIRECTORY =
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
# RTF output is optimized for Word 97 and may not look too pretty with other RTF
# readers/editors.
# The default value is: NO.
GENERATE_RTF = NO
# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: rtf.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_OUTPUT = rtf
# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
COMPACT_RTF = NO
# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
# contain hyperlink fields. The RTF file will contain links (just like the HTML
# output) instead of page references. This makes the output suitable for online
# browsing using Word or some other Word compatible readers that support those
# fields.
#
# Note: WordPad (write) and others do not support links.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_HYPERLINKS = NO
# Load stylesheet definitions from file. Syntax is similar to doxygen's
# configuration file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
#
# See also section "Doxygen usage" for information on how to generate the
# default style sheet that doxygen normally uses.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an RTF document. Syntax is
# similar to doxygen's configuration file. A template extensions file can be
# generated using doxygen -e rtf extensionFile.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
# with syntax highlighting in the RTF output.
#
# Note that which sources are shown also depends on other settings such as
# SOURCE_BROWSER.
# The default value is: NO.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_SOURCE_CODE = NO
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
# classes and files.
# The default value is: NO.
GENERATE_MAN = NO
# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it. A directory man3 will be created inside the directory specified by
# MAN_OUTPUT.
# The default directory is: man.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_OUTPUT = man
# The MAN_EXTENSION tag determines the extension that is added to the generated
# man pages. In case the manual section does not start with a number, the number
# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
# optional.
# The default value is: .3.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_EXTENSION = .3
# The MAN_SUBDIR tag determines the name of the directory created within
# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
# MAN_EXTENSION with the initial . removed.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_SUBDIR =
# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
# will generate one additional man file for each entity documented in the real
# man page(s). These additional files only source the real man page, but without
# them the man command would be unable to find the correct page.
# The default value is: NO.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_LINKS = NO
#---------------------------------------------------------------------------
# Configuration options related to the XML output
#---------------------------------------------------------------------------
# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
# captures the structure of the code including all documentation.
# The default value is: NO.
GENERATE_XML = NO
# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: xml.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_OUTPUT = xml
# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
# of the XML output.
# The default value is: YES.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_PROGRAMLISTING = YES
# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
# namespace members in file scope as well, matching the HTML output.
# The default value is: NO.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_NS_MEMB_FILE_SCOPE = NO
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
# that can be used to generate PDF.
# The default value is: NO.
GENERATE_DOCBOOK = NO
# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
# front of it.
# The default directory is: docbook.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_OUTPUT = docbook
# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
# program listings (including syntax highlighting and cross-referencing
# information) to the DOCBOOK output. Note that enabling this will significantly
# increase the size of the DOCBOOK output.
# The default value is: NO.
# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
# the structure of the code including all documentation. Note that this feature
# is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
# file that captures the structure of the code including all documentation.
#
# Note that this feature is still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_PERLMOD = NO
# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
# output from the Perl module output.
# The default value is: NO.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_LATEX = NO
# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
# formatted so it can be parsed by a human reader. This is useful if you want to
# understand what is going on. On the other hand, if this tag is set to NO, the
# size of the Perl module output will be much smaller and Perl will parse it
# just the same.
# The default value is: YES.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_PRETTY = YES
# The names of the make variables in the generated doxyrules.make file are
# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
# so different doxyrules.make files included by the same Makefile don't
# overwrite each other's variables.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
# in the source code. If set to NO, only conditional compilation will be
# performed. Macro expansion can be done in a controlled way by setting
# EXPAND_ONLY_PREDEF to YES.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
MACRO_EXPANSION = NO
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
# the macro expansion is limited to the macros specified with the PREDEFINED and
# EXPAND_AS_DEFINED tags.
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES, the include files in the
# INCLUDE_PATH will be searched if a #include is found.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by the
# preprocessor.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will be
# used.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that are
# defined before the preprocessor is started (similar to the -D option of e.g.
# gcc). The argument of the tag is a list of macros of the form: name or
# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
# is assumed. To prevent a macro definition from being undefined via #undef or
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED =
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
# macro definition that is found in the sources will be used. Use the PREDEFINED
# tag if you want to use a different macro definition that overrules the
# definition found in the source code.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
# remove all references to function-like macros that are alone on a line, have
# an all uppercase name, and do not end with a semicolon. Such function macros
# are typically used for boiler-plate code, and will confuse the parser if not
# removed.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration options related to external references
#---------------------------------------------------------------------------
# The TAGFILES tag can be used to specify one or more tag files. For each tag
# file the location of the external documentation should be added. The format of
# a tag file without this location is as follows:
# TAGFILES = file1 file2 ...
# Adding location for the tag files is done as follows:
# TAGFILES = file1=loc1 "file2 = loc2" ...
# where loc1 and loc2 can be relative or absolute paths or URLs. See the
# section "Linking to external documentation" for more information about the use
# of tag files.
# Note: Each tag file must have a unique name (where the name does NOT include
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to
# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE =
# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
# the class index. If set to NO, only the inherited external classes will be
# listed.
# The default value is: NO.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
# in the modules index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = YES
# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
EXTERNAL_PAGES = YES
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
# NO turns the diagrams off. Note that this option also works with HAVE_DOT
# disabled, but it is recommended to install and use dot, since it yields more
# powerful graphs.
# The default value is: YES.
CLASS_DIAGRAMS = YES
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
# If left empty dia is assumed to be found in the default search path.
DIA_PATH =
# If set to YES the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz (see:
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
HAVE_DOT = YES
# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
# to run in parallel. When set to 0 doxygen will base this on the number of
# processors available in the system. You can set it explicitly to a value
# larger than 0 to get control over the balance between CPU load and processing
# speed.
# Minimum value: 0, maximum value: 32, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_NUM_THREADS = 0
# When you want a differently looking font in the dot files that doxygen
# generates you can specify the font name using DOT_FONTNAME. You need to make
# sure dot is able to find the font, which can be done by putting it in a
# standard location or by setting the DOTFONTPATH environment variable or by
# setting DOT_FONTPATH to the directory containing the font.
# The default value is: Helvetica.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTNAME = Helvetica
# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
# dot graphs.
# Minimum value: 4, maximum value: 24, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTSIZE = 10
# By default doxygen will tell dot to use the default font as specified with
# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
# the path where dot can find it using this tag.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
# each documented class showing the direct and indirect inheritance relations.
# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
# graph for each documented class showing the direct and indirect implementation
# dependencies (inheritance, containment, and class references variables) of the
# class with other documented classes.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
# groups, showing the direct groups dependencies.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GROUP_GRAPHS = YES
# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LOOK = YES
# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
# class node. If there are many fields or methods and many nodes the graph may
# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
# number of items for each type to make the size more manageable. Set this to 0
# for no limit. Note that the threshold may be exceeded by 50% before the limit
# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
# but if the number exceeds 15, the total amount of fields shown is limited to
# 10.
# Minimum value: 0, maximum value: 100, default value: 10.
# This tag requires that the tag UML_LOOK is set to YES.
UML_LIMIT_NUM_FIELDS = 10
# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and
# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS
# tag is set to YES, doxygen will add type and arguments for attributes and
# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen
# will not generate fields with class member information in the UML graphs. The
# class diagrams will look similar to the default class diagrams but using UML
# notation for the relationships.
# Possible values are: NO, YES and NONE.
# The default value is: NO.
# This tag requires that the tag UML_LOOK is set to YES.
DOT_UML_DETAILS = NO
# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters
# to display on a single line. If the actual line length exceeds this threshold
# significantly it will wrapped across multiple lines. Some heuristics are apply
# to avoid ugly line breaks.
# Minimum value: 0, maximum value: 1000, default value: 17.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_WRAP_THRESHOLD = 17
# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
# collaboration graphs will show the relations between templates and their
# instances.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
# direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDE_GRAPH = YES
# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
# set to YES then doxygen will generate a graph for each documented file showing
# the direct and indirect include dependencies of the file with other documented
# files.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
INCLUDED_BY_GRAPH = YES
# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
# functions only using the \callgraph command. Disabling a call graph can be
# accomplished by means of the command \hidecallgraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALL_GRAPH = NO
# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
# dependency graph for every global function or class method.
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
# functions only using the \callergraph command. Disabling a caller graph can be
# accomplished by means of the command \hidecallergraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALLER_GRAPH = NO
# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
# hierarchy of all classes instead of a textual one.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
# dependencies a directory has on other directories in a graphical way. The
# dependency relations are determined by the #include relations between the
# files in the directories.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
# http://www.graphviz.org/)).
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
# png:gdiplus:gdiplus.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_IMAGE_FORMAT = png
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
#
# Note that this requires a modern browser other than Internet Explorer. Tested
# and working are Firefox, Chrome, Safari, and Opera.
# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
# the SVG files visible. Older versions of IE do not have SVG support.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
INTERACTIVE_SVG = NO
# The DOT_PATH tag can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the \dotfile
# command).
# This tag requires that the tag HAVE_DOT is set to YES.
DOTFILE_DIRS =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
# contain msc files that are included in the documentation (see the \mscfile
# command).
MSCFILE_DIRS =
# The DIAFILE_DIRS tag can be used to specify one or more directories that
# contain dia files that are included in the documentation (see the \diafile
# command).
DIAFILE_DIRS =
# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
# path where java can find the plantuml.jar file. If left blank, it is assumed
# PlantUML is not used or called during a preprocessing step. Doxygen will
# generate a warning when it encounters a \startuml command in this case and
# will not generate output for the diagram.
PLANTUML_JAR_PATH =
# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
# configuration file for plantuml.
PLANTUML_CFG_FILE =
# When using plantuml, the specified paths are searched for files specified by
# the !include statement in a plantuml block.
PLANTUML_INCLUDE_PATH =
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes
# larger than this value, doxygen will truncate the graph, which is visualized
# by representing a node as a red box. Note that doxygen if the number of direct
# children of the root node in a graph is already larger than
# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
# Minimum value: 0, maximum value: 10000, default value: 50.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_GRAPH_MAX_NODES = 50
# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
# generated by dot. A depth value of 3 means that only nodes reachable from the
# root by following a path via at most 3 edges will be shown. Nodes that lay
# further from the root node will be omitted. Note that setting this option to 1
# or 2 may greatly reduce the computation time needed for large code bases. Also
# note that the size of a graph can be further restricted by
# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
# Minimum value: 0, maximum value: 1000, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
MAX_DOT_GRAPH_DEPTH = 0
# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
# background. This is disabled by default, because dot on Windows does not seem
# to support this out of the box.
#
# Warning: Depending on the platform used, enabling this option may lead to
# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
# read).
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_TRANSPARENT = NO
# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
# this, this feature is disabled by default.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_MULTI_TARGETS = NO
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
# graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
GENERATE_LEGEND = YES
# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate
# files that are used to generate the various graphs.
#
# Note: This setting is not only used for dot files but also for msc and
# plantuml temporary files.
# The default value is: YES.
DOT_CLEANUP = YES
| 0 |
rapidsai_public_repos/cuml | rapidsai_public_repos/cuml/cpp/README.md | # cuML C++
This folder contains the C++ and CUDA code of the algorithms and ML primitives of cuML. The build system uses CMake for build configuration, and an out-of-source build is recommended.
## Source Code Folders
The source code of cuML is divided in three main directories: `src`, `src_prims`, and `comms`.
- `src` contains the source code of the Machine Learning algorithms, and the main cuML C++ API. The main consumable is the shared library `libcuml++`, that can be used stand alone by C++ consumers or is consumed by our Python package `cuml` to provide a Python API.
- `src_prims` contains most of the common components and computational primitives that form part of the machine learning algorithms in cuML, and can be used individually as well in the form of a header only library.
- `comms` contains the source code of the communications implementations that enable multi-node multi-GPU algorithms. There are currently two communications implementations. The implementation in the `mpi` directory is for MPI environments. It can also be used for automated tested. The implementation in the `std` directory is required for running cuML in multi-node multi-GPU Dask environments.
The `test` directory has subdirectories that reflect this distinction between the `src` and `prims` components of cuML.
## Setup
### Dependencies
1. cmake (>= 3.26.4)
2. CUDA (>= 11.0)
3. gcc (>=9.3.0)
4. clang-format (= 16.0.6) - enforces uniform C++ coding style; required to build cuML from source. The packages `clang=16` and `clang-tools=16` from the conda-forge channel should be sufficient, if you are on conda. If not using conda, install the right version using your OS package manager.
### Building cuML:
The main artifact produced by the build system is the shared library libcuml++. Additionally, executables to run tests for the algorithms can be built. To see detailed steps see the [BUILD](../BUILD.md) document of the repository.
Current cmake offers the following configuration options:
- Build Configuration Options:
| Flag | Possible Values | Default Value | Behavior |
| --- | --- | --- | --- |
| BUILD_CUML_CPP_LIBRARY | [ON, OFF] | ON | Enable/disable building libcuml++ shared library. Setting this variable to `OFF` sets the variables BUILD_CUML_TESTS, BUILD_CUML_MG_TESTS and BUILD_CUML_EXAMPLES to `OFF` |
| BUILD_CUML_C_LIBRARY | [ON, OFF] | ON | Enable/disable building libcuml++ shared library. Setting this variable to `OFF` sets the variables BUILD_CUML_TESTS, BUILD_CUML_MG_TESTS and BUILD_CUML_EXAMPLES to `OFF` |
| BUILD_CUML_TESTS | [ON, OFF] | ON | Enable/disable building cuML algorithm test executable `ml_test`. |
| BUILD_CUML_MG_TESTS | [ON, OFF] | ON | Enable/disable building cuML algorithm test executable `ml_mg_test`. Requires MPI to be installed. When enabled, BUILD_CUML_MPI_COMMS will be automatically set to ON. See section about additional requirements.|
| BUILD_PRIMS_TESTS | [ON, OFF] | ON | Enable/disable building cuML algorithm test executable `prims_test`. |
| BUILD_CUML_EXAMPLES | [ON, OFF] | ON | Enable/disable building cuML C++ API usage examples. |
| BUILD_CUML_BENCH | [ON, OFF] | ON | Enable/disable building of cuML C++ benchark. |
| BUILD_CUML_PRIMS_BENCH | [ON, OFF] | ON | Enable/disable building of ml-prims C++ benchark. |
| BUILD_CUML_STD_COMMS | [ON, OFF] | ON | Enable/disable building cuML NCCL+UCX communicator for running multi-node multi-GPU algorithms. Note that UCX support can also be enabled/disabled (see below). The standard communicator and MPI communicator are not mutually exclusive and can both be installed at the same time. |
| WITH_UCX | [ON, OFF] | OFF | Enable/disable UCX support in the standard cuML communicator. Algorithms requiring point-to-point messaging will not work when this is disabled. This flag is ignored if BUILD_CUML_STD_COMMS is set to OFF. |
| BUILD_CUML_MPI_COMMS | [ON, OFF] | OFF | Enable/disable building cuML MPI+NCCL communicator for running multi-node multi-GPU C++ tests. MPI communicator and STD communicator may both be installed at the same time. If OFF, it overrides BUILD_CUML_MG_TESTS to be OFF as well. |
| SINGLEGPU | [ON, OFF] | OFF | Disable all mnmg components. Disables building of all multi-GPU algorithms and all comms library components. Removes libcumlprims, UCX-py and NCCL dependencies. Overrides values of BUILD_CUML_MG_TESTS, BUILD_CUML_STD_COMMS, WITH_UCX and BUILD_CUML_MPI_COMMS. |
| DISABLE_OPENMP | [ON, OFF] | OFF | Set to `ON` to disable OpenMP |
| CMAKE_CUDA_ARCHITECTURES | List of GPU architectures, semicolon-separated | Empty | List the GPU architectures to compile the GPU targets for. Set to "NATIVE" to auto detect GPU architecture of the system, set to "ALL" to compile for all RAPIDS supported archs: ["60" "62" "70" "72" "75" "80" "86"]. |
| USE_CCACHE | [ON, OFF] | ON | Cache build artifacts with ccache. |
- Debug configuration options:
| Flag | Possible Values | Default Value | Behavior |
| --- | --- | --- | --- |
| KERNEL_INFO | [ON, OFF] | OFF | Enable/disable kernel resource usage info in nvcc. |
| LINE_INFO | [ON, OFF] | OFF | Enable/disable lineinfo in nvcc. |
| NVTX | [ON, OFF] | OFF | Enable/disable nvtx markers in libcuml++. |
After running CMake in a `build` directory, if the `BUILD_*` options were not turned `OFF`, the following targets can be built:
```bash
$ cmake --build . -j # Build libcuml++ and all tests
$ cmake --build . -j --target sg_benchmark # Build c++ cuml single gpu benchmark
$ cmake --build . -j --target cuml++ # Build libcuml++
$ cmake --build . -j --target ml # Build ml_test algorithm tests binary
$ cmake --build . -j --target ml_mg # Build ml_mg_test multi GPU algorithms tests binary
$ cmake --build . -j --target prims # Build prims_test ML primitive unit tests binary
```
### MultiGPU Tests Requirements Note:
To build the MultiGPU tests (CMake option `BUILD_CUML_MG_TESTS`), the following dependencies are required:
- MPI (OpenMPI recommended)
- NCCL, version corresponding to [RAFT's requirement](https://github.com/rapidsai/raft/blob/branch-23.02/conda/recipes/raft-dask/meta.yaml#L49.
### Third Party Modules
The external folder contains submodules that cuML depends on.
Current external submodules are:
1. [CUB](https://github.com/NVlabs/cub)
2. [Faiss](https://github.com/facebookresearch/faiss)
3. [Google Test](https://github.com/google/googletest)
## Using cuML libraries
After building cuML, you can use its functionality in other C/C++ applications
by linking against the generated libraries. The following trivial example shows
how to make external use of cuML's logger:
```cpp
// main.cpp
#include <cuml/common/logger.hpp>
int main(int argc, char *argv[]) {
CUML_LOG_WARN("This is a warning from the cuML logger!");
return 0;
}
```
To compile this example, we must point the compiler to where cuML was
installed. Assuming you did not provide a custom `$CMAKE_INSTALL_PREFIX`, this
will default to the `$CONDA_PREFIX` environment variable.
```bash
$ export LD_LIBRARY_PATH="${CONDA_PREFIX}/lib"
$ nvcc \
main.cpp \
-o cuml_logger_example \
"-L${CONDA_PREFIX}/lib" \
"-I${CONDA_PREFIX}/include" \
"-I${CONDA_PREFIX}/include/cuml/raft" \
-lcuml++
$ ./cuml_logger_example
[W] [13:26:43.503068] This is a warning from the cuML logger!
```
| 0 |
rapidsai_public_repos/cuml | rapidsai_public_repos/cuml/cpp/.clang-tidy | ---
Checks: 'clang-diagnostic-*,clang-analyzer-*,-modernize-*,-clang-diagnostic-#pragma-messages,-readability-identifier-naming,-clang-diagnostic-switch'
WarningsAsErrors: '*'
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
FormatStyle: none
CheckOptions:
- key: cert-dcl16-c.NewSuffixes
value: 'L;LL;LU;LLU'
- key: cppcoreguidelines-non-private-member-variables-in-classes.IgnoreClassesWithAllMemberVariablesBeingPublic
value: '1'
- key: modernize-loop-convert.MaxCopySize
value: '16'
- key: modernize-loop-convert.MinConfidence
value: reasonable
- key: modernize-loop-convert.NamingStyle
value: CamelCase
- key: modernize-pass-by-value.IncludeStyle
value: llvm
- key: modernize-pass-by-value.ValuesOnly
value: '0'
- key: modernize-replace-auto-ptr.IncludeStyle
value: llvm
- key: modernize-replace-random-shuffle.IncludeStyle
value: llvm
- key: modernize-use-auto.MinTypeNameLength
value: '5'
- key: modernize-use-auto.RemoveStars
value: '0'
- key: modernize-use-default-member-init.IgnoreMacros
value: '1'
- key: modernize-use-default-member-init.UseAssignment
value: '0'
- key: modernize-use-emplace.ContainersWithPushBack
value: '::std::vector;::std::list;::std::deque'
- key: modernize-use-emplace.SmartPointers
value: '::std::shared_ptr;::std::unique_ptr;::std::auto_ptr;::std::weak_ptr'
- key: modernize-use-emplace.TupleMakeFunctions
value: '::std::make_pair;::std::make_tuple'
- key: modernize-use-emplace.TupleTypes
value: '::std::pair;::std::tuple'
- key: modernize-use-equals-default.IgnoreMacros
value: '1'
- key: modernize-use-equals-delete.IgnoreMacros
value: '1'
- key: modernize-use-nodiscard.ReplacementString
value: '[[nodiscard]]'
- key: modernize-use-noexcept.ReplacementString
value: ''
- key: modernize-use-noexcept.UseNoexceptFalse
value: '1'
- key: modernize-use-nullptr.NullMacros
value: 'NULL'
- key: modernize-use-transparent-functors.SafeMode
value: '0'
- key: modernize-use-using.IgnoreMacros
value: '1'
- key: readability-identifier-naming.ClassCase
value: CamelCase
- key: readability-identifier-naming.ClassPrefix
value: ''
- key: readability-identifier-naming.ClassSuffix
value: ''
- key: readability-identifier-naming.ConstexprVariableCase
value: CamelCase
- key: readability-identifier-naming.ConstexprVariablePrefix
value: k
- key: readability-identifier-naming.ConstexprVariableSuffix
value: ''
- key: readability-identifier-naming.EnumCase
value: CamelCase
- key: readability-identifier-naming.EnumConstantPrefix
value: k
- key: readability-identifier-naming.EnumConstantSuffix
value: ''
- key: readability-identifier-naming.EnumPrefix
value: ''
- key: readability-identifier-naming.EnumSuffix
value: ''
- key: readability-identifier-naming.FunctionCase
value: CamelCase
- key: readability-identifier-naming.FunctionPrefix
value: ''
- key: readability-identifier-naming.FunctionSuffix
value: ''
- key: readability-identifier-naming.GlobalConstantCase
value: CamelCase
- key: readability-identifier-naming.GlobalConstantPrefix
value: k
- key: readability-identifier-naming.GlobalConstantSuffix
value: ''
- key: readability-identifier-naming.IgnoreFailedSplit
value: '0'
- key: readability-identifier-naming.MemberCase
value: lower_case
- key: readability-identifier-naming.MemberPrefix
value: ''
- key: readability-identifier-naming.MemberSuffix
value: ''
- key: readability-identifier-naming.NamespaceCase
value: lower_case
- key: readability-identifier-naming.NamespacePrefix
value: ''
- key: readability-identifier-naming.NamespaceSuffix
value: ''
- key: readability-identifier-naming.PrivateMemberPrefix
value: ''
- key: readability-identifier-naming.PrivateMemberSuffix
value: _
- key: readability-identifier-naming.ProtectedMemberPrefix
value: ''
- key: readability-identifier-naming.ProtectedMemberSuffix
value: _
- key: readability-identifier-naming.StaticConstantCase
value: CamelCase
- key: readability-identifier-naming.StaticConstantPrefix
value: k
- key: readability-identifier-naming.StaticConstantSuffix
value: ''
- key: readability-identifier-naming.StructCase
value: CamelCase
- key: readability-identifier-naming.StructPrefix
value: ''
- key: readability-identifier-naming.StructSuffix
value: ''
- key: readability-identifier-naming.TypeAliasCase
value: CamelCase
- key: readability-identifier-naming.TypeAliasPrefix
value: ''
- key: readability-identifier-naming.TypeAliasSuffix
value: ''
- key: readability-identifier-naming.TypeTemplateParameterCase
value: CamelCase
- key: readability-identifier-naming.TypeTemplateParameterPrefix
value: ''
- key: readability-identifier-naming.TypeTemplateParameterSuffix
value: ''
- key: readability-identifier-naming.TypedefCase
value: CamelCase
- key: readability-identifier-naming.TypedefPrefix
value: ''
- key: readability-identifier-naming.TypedefSuffix
value: ''
...
| 0 |
rapidsai_public_repos/cuml | rapidsai_public_repos/cuml/cpp/.clang-format | ---
# Refer to the following link for the explanation of each params:
# http://releases.llvm.org/8.0.0/tools/clang/docs/ClangFormatStyleOptions.html
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: true
AlignConsecutiveBitFields: true
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortEnumsOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: true
AllowShortLambdasOnASingleLine: true
AllowShortLoopsOnASingleLine: false
# This is deprecated
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
# disabling the below splits, else, they'll just add to the vertical length of source files!
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
BreakAfterJavaFieldAnnotations: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: WebKit
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakInheritanceList: BeforeColon
BreakStringLiterals: true
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
# Kept the below 2 to be the same as `IndentWidth` to keep everything uniform
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentCaseLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
RawStringFormats:
- Language: Cpp
Delimiters:
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: google
# Enabling comment reflow causes doxygen comments to be messed up in their formats!
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceBeforeSquareBrackets: false
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++17
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
# Be consistent with indent-width, even for people who use tab for indentation!
TabWidth: 2
UseTab: Never
| 0 |
rapidsai_public_repos/cuml | rapidsai_public_repos/cuml/cpp/header.html | <!-- HTML header for doxygen 1.8.20-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
$treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
<!-- RAPIDS CUSTOM JS & CSS: START, Please add these two lines back after every version upgrade -->
<script defer src="https://docs.rapids.ai/assets/js/custom.js"></script>
<link rel="stylesheet" href="https://docs.rapids.ai/assets/css/custom.css">
<!-- RAPIDS CUSTOM JS & CSS: END -->
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<!--BEGIN PROJECT_LOGO-->
<td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
<td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname">$projectname
<!--BEGIN PROJECT_NUMBER--> <span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
</div>
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
</td>
<!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME-->
<!--BEGIN PROJECT_BRIEF-->
<td style="padding-left: 0.5em;">
<div id="projectbrief">$projectbrief</div>
</td>
<!--END PROJECT_BRIEF-->
<!--END !PROJECT_NAME-->
<!--BEGIN DISABLE_INDEX-->
<!--BEGIN SEARCHENGINE-->
<td>$searchbox</td>
<!--END SEARCHENGINE-->
<!--END DISABLE_INDEX-->
</tr>
</tbody>
</table>
</div>
<!--END TITLEAREA-->
<!-- end header part -->
| 0 |
rapidsai_public_repos/cuml/cpp/include | rapidsai_public_repos/cuml/cpp/include/cuml/cuml_api.h | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stddef.h>
#include <cuda_runtime_api.h>
// Block inclusion of this header when compiling libcuml++.so. If this error is
// shown during compilation, there is an issue with how the `#include` have
// been set up. To debug the issue, run `./build.sh cppdocs` and open the page
// 'cpp/build/html/cuml__api_8h.html' in a browser. This will show which files
// directly and indirectly include this file. Only files ending in '*_api' or
// 'cumlHandle' should include this header.
#ifdef CUML_CPP_API
#error "This header is only for the C-API and should not be included from the C++ API."
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef int cumlHandle_t;
typedef enum cumlError_t { CUML_SUCCESS, CUML_ERROR_UNKNOWN, CUML_INVALID_HANDLE } cumlError_t;
typedef cudaError_t (*cuml_allocate)(void** p, size_t n, cudaStream_t stream);
typedef cudaError_t (*cuml_deallocate)(void* p, size_t n, cudaStream_t stream);
/**
* @brief Get a human readable error string for the passed in error code.
*
* @param[in] error the error code to decipher.
* @return a string with a human readable error message.
*/
const char* cumlGetErrorString(cumlError_t error);
/**
* @brief Creates a cumlHandle_t
*
* @param[inout] handle pointer to the handle to create.
* @param[in] stream the stream to which cuML work should be ordered.
* @return CUML_SUCCESS on success, @todo: add more error codes
*/
cumlError_t cumlCreate(cumlHandle_t* handle, cudaStream_t stream);
/**
* @brief sets the stream to which all cuML work issued via the passed handle should be ordered.
*
* @param[inout] handle handle to set the stream for.
* @param[in] stream the stream to which cuML work should be ordered.
* @return CUML_SUCCESS on success, @todo: add more error codes
*/
cumlError_t cumlGetStream(cumlHandle_t handle, cudaStream_t* stream);
/**
* @brief sets the allocator to use for all device allocations done in cuML.
*
* Example use:
* @code{.c}
* cudaError_t device_allocate(void** p,size_t n, cudaStream_t)
* {
* return cudaMalloc(p,n);
* }
*
* cudaError_t device_deallocate(void* p, size_t, cudaStream_t)
* {
* return cudaFree(p);
* }
*
* void foo()
* {
* cumlHandle_t cumlHandle;
* cumlCreate( &cumlHandle );
* cumlSetDeviceAllocator( cumlHandle, device_allocate, device_deallocate );
* cumlDestroy( cumlHandle );
* }
* @endcode
* @param[inout] handle the cumlHandle_t to set the device allocator for.
* @param[in] allocate_fn function pointer to the allocate function to use for device
allocations.
* @param[in] deallocate_fn function pointer to the deallocate function to use for device
allocations.
* @return CUML_SUCCESS on success, @todo: add more error codes
*/
cumlError_t cumlSetDeviceAllocator(cumlHandle_t handle,
cuml_allocate allocate_fn,
cuml_deallocate deallocate_fn);
/**
* @brief sets the allocator to use for substantial host allocations done in cuML.
*
* Example use:
* @code{.c}
* cudaError_t host_allocate(void** p,size_t n, cudaStream_t)
* {
* *p = malloc(n);
* return NULL != *p ? cudaSuccess : cudaErrorUnknown;
* }
*
* cudaError_t host_deallocate(void* p, size_t, cudaStream_t stream)
* {
* free(p);
* return cudaSuccess;
* }
*
* void foo()
* {
* cumlHandle_t cumlHandle;
* cumlCreate( &cumlHandle );
* cumlSetHostAllocator( cumlHandle, host_allocate, host_deallocate );
* cumlDestroy( cumlHandle );
* }
* @endcode
* @param[inout] handle the cumlHandle_t to set the host allocator for.
* @param[in] allocate_fn function pointer to the allocate function to use for host allocations.
* @param[in] deallocate_fn function pointer to the deallocate function to use for host
allocations.
* @return CUML_SUCCESS on success, @todo: add more error codes
*/
cumlError_t cumlSetHostAllocator(cumlHandle_t handle,
cuml_allocate allocate_fn,
cuml_deallocate deallocate_fn);
/**
* @brief Release all resource internally managed by cumlHandle_t
*
* @param[inout] handle the cumlHandle_t to destroy.
* @return CUML_SUCCESS on success, @todo: add more error codes
*/
cumlError_t cumlDestroy(cumlHandle_t handle);
#ifdef __cplusplus
}
#endif
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/decomposition/pca_mg.hpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cumlprims/opg/matrix/data.hpp>
#include <cumlprims/opg/matrix/part_descriptor.hpp>
#include "pca.hpp"
namespace ML {
namespace PCA {
namespace opg {
/**
* @brief performs MNMG fit operation for the pca
* @param[in] handle: the internal cuml handle object
* @param[in] input_data: input data
* @param[in] input_desc: descriptor for input data
* @param[out] components: principal components of the input data
* @param[out] explained_var: explained var
* @param[out] explained_var_ratio: the explained var ratio
* @param[out] singular_vals: singular values of the data
* @param[out] mu: mean of every column in input
* @param[out] noise_vars: variance of the noise
* @param[in] prms: data structure that includes all the parameters from input size to algorithm
* @param[in] verbose
*/
void fit(raft::handle_t& handle,
std::vector<MLCommon::Matrix::Data<float>*>& input_data,
MLCommon::Matrix::PartDescriptor& input_desc,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose = false);
void fit(raft::handle_t& handle,
std::vector<MLCommon::Matrix::Data<double>*>& input_data,
MLCommon::Matrix::PartDescriptor& input_desc,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose = false);
/**
* @brief performs MNMG fit and transform operation for the pca
* @param[in] handle: the internal cuml handle object
* @param[in] rank_sizes: includes all the partition size information for the rank
* @param[in] n_parts: number of partitions
* @param[in] input: input data
* @param[out] trans_input: transformed input data
* @param[out] components: principal components of the input data
* @param[out] explained_var: explained var
* @param[out] explained_var_ratio: the explained var ratio
* @param[out] singular_vals: singular values of the data
* @param[out] mu: mean of every column in input
* @param[out] noise_vars: variance of the noise
* @param[in] prms: data structure that includes all the parameters from input size to algorithm
* @param[in] verbose
*/
void fit_transform(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::floatData_t** input,
MLCommon::Matrix::floatData_t** trans_input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
paramsPCAMG prms,
bool verbose);
void fit_transform(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::doubleData_t** input,
MLCommon::Matrix::doubleData_t** trans_input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
paramsPCAMG prms,
bool verbose);
/**
* @brief performs MNMG transform operation for the pca
* @param[in] handle: the internal cuml handle object
* @param[in] rank_sizes: includes all the partition size information for the rank
* @param[in] n_parts: number of partitions
* @param[in] input: input data
* @param[in] components: principal components of the input data
* @param[out] trans_input: transformed input data
* @param[in] singular_vals: singular values of the data
* @param[in] mu: mean of every column in input
* @param[in] prms: data structure that includes all the parameters from input size to algorithm
* @param[in] verbose
*/
void transform(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::Data<float>** input,
float* components,
MLCommon::Matrix::Data<float>** trans_input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose);
void transform(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::Data<double>** input,
double* components,
MLCommon::Matrix::Data<double>** trans_input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose);
/**
* @brief performs MNMG inverse transform operation for the pca
* @param[in] handle: the internal cuml handle object
* @param[in] rank_sizes: includes all the partition size information for the rank
* @param[in] n_parts: number of partitions
* @param[in] trans_input: transformed input data
* @param[in] components: principal components of the input data
* @param[out] input: input data
* @param[in] singular_vals: singular values of the data
* @param[in] mu: mean of every column in input
* @param[in] prms: data structure that includes all the parameters from input size to algorithm
* @param[in] verbose
*/
void inverse_transform(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::Data<float>** trans_input,
float* components,
MLCommon::Matrix::Data<float>** input,
float* singular_vals,
float* mu,
paramsPCAMG prms,
bool verbose);
void inverse_transform(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::Data<double>** trans_input,
double* components,
MLCommon::Matrix::Data<double>** input,
double* singular_vals,
double* mu,
paramsPCAMG prms,
bool verbose);
}; // end namespace opg
}; // end namespace PCA
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/decomposition/params.hpp | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
namespace ML {
/**
* @param COV_EIG_DQ: covariance of input will be used along with eigen decomposition using divide
* and conquer method for symmetric matrices
* @param COV_EIG_JACOBI: covariance of input will be used along with eigen decomposition using
* jacobi method for symmetric matrices
*/
enum class solver : int {
COV_EIG_DQ,
COV_EIG_JACOBI,
};
class params {
public:
std::size_t n_rows;
std::size_t n_cols;
int gpu_id = 0;
};
class paramsSolver : public params {
public:
// math_t tol = 0.0;
float tol = 0.0;
std::uint32_t n_iterations = 15;
int verbose = 0;
};
template <typename enum_solver = solver>
class paramsTSVDTemplate : public paramsSolver {
public:
std::size_t n_components = 1;
enum_solver algorithm = enum_solver::COV_EIG_DQ;
};
/**
* @brief structure for pca parameters. Ref:
* http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
* @param n_components: Number of components to keep. if n_components is not set all components are
* kept:
* @param copy: If False, data passed to fit are overwritten and running fit(X).transform(X) will
* not yield the expected results, use fit_transform(X) instead.
* @param whiten: When True (False by default) the components_ vectors are multiplied by the square
* root of n_samples and then divided by the singular values to ensure uncorrelated outputs with
* unit component-wise variances.
* @param algorithm: the solver to be used in PCA.
* @param tol: Tolerance for singular values computed by svd_solver == ‘arpack’ or svd_solver ==
* ‘COV_EIG_JACOBI’
* @param n_iterations: Number of iterations for the power method computed by jacobi method
* (svd_solver == 'COV_EIG_JACOBI').
* @param verbose: 0: no error message printing, 1: print error messages
*/
template <typename enum_solver = solver>
class paramsPCATemplate : public paramsTSVDTemplate<enum_solver> {
public:
bool copy = true; // TODO unused, see #2830 and #2833
bool whiten = false;
};
typedef paramsTSVDTemplate<> paramsTSVD;
typedef paramsPCATemplate<> paramsPCA;
enum class mg_solver { COV_EIG_DQ, COV_EIG_JACOBI, QR };
typedef paramsPCATemplate<mg_solver> paramsPCAMG;
typedef paramsTSVDTemplate<mg_solver> paramsTSVDMG;
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/decomposition/tsvd_mg.hpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cumlprims/opg/matrix/data.hpp>
#include <cumlprims/opg/matrix/part_descriptor.hpp>
#include "tsvd.hpp"
namespace ML {
namespace TSVD {
namespace opg {
/**
* @brief performs MNMG fit operation for the tsvd
* @param[in] handle: the internal cuml handle object
* @param[in] rank_sizes: includes all the partition size information for the rank
* @param[in] n_parts: number of partitions
* @param[in] input: input data
* @param[out] components: principal components of the input data
* @param[out] singular_vals: singular values of the data
* @param[in] prms: data structure that includes all the parameters from input size to algorithm
* @param[in] verbose
*/
void fit(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::floatData_t** input,
float* components,
float* singular_vals,
paramsTSVDMG& prms,
bool verbose = false);
void fit(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::doubleData_t** input,
double* components,
double* singular_vals,
paramsTSVDMG& prms,
bool verbose = false);
/**
* @brief performs MNMG fit and transform operation for the tsvd.
* @param[in] handle: the internal cuml handle object
* @param[in] input_data: input data
* @param[in] input_desc: input descriptor for data
* @param[out] trans_data: transformed input data
* @param[out] trans_desc: transformed input data descriptor
* @param[out] components: principal components of the input data
* @param[out] explained_var: explained var
* @param[out] explained_var_ratio: the explained var ratio
* @param[out] singular_vals: singular values of the data
* @param[in] prms: data structure that includes all the parameters from input size to algorithm
* @param[in] verbose
*/
void fit_transform(raft::handle_t& handle,
std::vector<MLCommon::Matrix::Data<float>*>& input_data,
MLCommon::Matrix::PartDescriptor& input_desc,
std::vector<MLCommon::Matrix::Data<float>*>& trans_data,
MLCommon::Matrix::PartDescriptor& trans_desc,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
paramsTSVDMG& prms,
bool verbose);
void fit_transform(raft::handle_t& handle,
std::vector<MLCommon::Matrix::Data<double>*>& input_data,
MLCommon::Matrix::PartDescriptor& input_desc,
std::vector<MLCommon::Matrix::Data<double>*>& trans_data,
MLCommon::Matrix::PartDescriptor& trans_desc,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
paramsTSVDMG& prms,
bool verbose);
/**
* @brief performs MNMG transform operation for the tsvd.
* @param[in] handle: the internal cuml handle object
* @param[in] rank_sizes: includes all the partition size information for the rank
* @param[in] n_parts: number of partitions
* @param[in] input: input data
* @param[in] components: principal components of the input data
* @param[out] trans_input: transformed input data
* @param[in] prms: data structure that includes all the parameters from input size to algorithm
* @param[in] verbose
*/
void transform(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::Data<float>** input,
float* components,
MLCommon::Matrix::Data<float>** trans_input,
paramsTSVDMG& prms,
bool verbose);
void transform(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::Data<double>** input,
double* components,
MLCommon::Matrix::Data<double>** trans_input,
paramsTSVDMG& prms,
bool verbose);
/**
* @brief performs MNMG inverse transform operation for the output.
* @param[in] handle: the internal cuml handle object
* @param[in] rank_sizes: includes all the partition size information for the rank
* @param[in] n_parts: number of partitions
* @param[in] trans_input: transformed input data
* @param[in] components: principal components of the input data
* @param[out] input: input data
* @param[in] prms: data structure that includes all the parameters from input size to algorithm
* @param[in] verbose
*/
void inverse_transform(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::Data<float>** trans_input,
float* components,
MLCommon::Matrix::Data<float>** input,
paramsTSVDMG& prms,
bool verbose);
void inverse_transform(raft::handle_t& handle,
MLCommon::Matrix::RankSizePair** rank_sizes,
std::uint32_t n_parts,
MLCommon::Matrix::Data<double>** trans_input,
double* components,
MLCommon::Matrix::Data<double>** input,
paramsTSVDMG& prms,
bool verbose);
}; // end namespace opg
}; // namespace TSVD
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/decomposition/tsvd.hpp | /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "params.hpp"
namespace raft {
class handle_t;
}
namespace ML {
void tsvdFit(raft::handle_t& handle,
float* input,
float* components,
float* singular_vals,
const paramsTSVD& prms);
void tsvdFit(raft::handle_t& handle,
double* input,
double* components,
double* singular_vals,
const paramsTSVD& prms);
void tsvdInverseTransform(raft::handle_t& handle,
float* trans_input,
float* components,
float* input,
const paramsTSVD& prms);
void tsvdInverseTransform(raft::handle_t& handle,
double* trans_input,
double* components,
double* input,
const paramsTSVD& prms);
void tsvdTransform(raft::handle_t& handle,
float* input,
float* components,
float* trans_input,
const paramsTSVD& prms);
void tsvdTransform(raft::handle_t& handle,
double* input,
double* components,
double* trans_input,
const paramsTSVD& prms);
void tsvdFitTransform(raft::handle_t& handle,
float* input,
float* trans_input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
const paramsTSVD& prms);
void tsvdFitTransform(raft::handle_t& handle,
double* input,
double* trans_input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
const paramsTSVD& prms);
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/decomposition/pca.hpp | /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "params.hpp"
namespace raft {
class handle_t;
}
namespace ML {
void pcaFit(raft::handle_t& handle,
float* input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
const paramsPCA& prms);
void pcaFit(raft::handle_t& handle,
double* input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
const paramsPCA& prms);
void pcaFitTransform(raft::handle_t& handle,
float* input,
float* trans_input,
float* components,
float* explained_var,
float* explained_var_ratio,
float* singular_vals,
float* mu,
float* noise_vars,
const paramsPCA& prms);
void pcaFitTransform(raft::handle_t& handle,
double* input,
double* trans_input,
double* components,
double* explained_var,
double* explained_var_ratio,
double* singular_vals,
double* mu,
double* noise_vars,
const paramsPCA& prms);
void pcaInverseTransform(raft::handle_t& handle,
float* trans_input,
float* components,
float* singular_vals,
float* mu,
float* input,
const paramsPCA& prms);
void pcaInverseTransform(raft::handle_t& handle,
double* trans_input,
double* components,
double* singular_vals,
double* mu,
double* input,
const paramsPCA& prms);
void pcaTransform(raft::handle_t& handle,
float* input,
float* components,
float* trans_input,
float* singular_vals,
float* mu,
const paramsPCA& prms);
void pcaTransform(raft::handle_t& handle,
double* input,
double* components,
double* trans_input,
double* singular_vals,
double* mu,
const paramsPCA& prms);
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/decomposition/sign_flip_mg.hpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cumlprims/opg/matrix/data.hpp>
#include <cumlprims/opg/matrix/part_descriptor.hpp>
#include <raft/core/handle.hpp>
namespace ML {
namespace PCA {
namespace opg {
/**
* @brief sign flip for PCA and tSVD. This is used to stabilize the sign of column major eigen
* vectors
* @param[in] handle: the internal cuml handle object
* @param[in] input_data: input matrix that will be used to determine the sign.
* @param[in] input_desc: MNMG description of the input
* @param[out] components: components matrix.
* @param[in] n_components: number of columns of components matrix
* @param[in] streams: cuda streams
* @param[in] n_stream: number of streams
* @{
*/
void sign_flip(raft::handle_t& handle,
std::vector<MLCommon::Matrix::Data<float>*>& input_data,
MLCommon::Matrix::PartDescriptor& input_desc,
float* components,
std::size_t n_components,
cudaStream_t* streams,
std::uint32_t n_stream);
void sign_flip(raft::handle_t& handle,
std::vector<MLCommon::Matrix::Data<double>*>& input_data,
MLCommon::Matrix::PartDescriptor& input_desc,
double* components,
std::size_t n_components,
cudaStream_t* streams,
std::uint32_t n_stream);
}; // end namespace opg
}; // end namespace PCA
}; // end namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/fil/fil.h | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file fil.h Interface to the forest inference library. */
#pragma once
#include <stddef.h>
#include <variant> // for std::get<>, std::variant<>
#include <cuml/ensemble/treelite_defs.hpp>
namespace raft {
class handle_t;
}
namespace ML {
namespace fil {
/** @note FIL supports inference with both single and double precision. However,
the floating-point type used in the data and model must be the same. */
/** Inference algorithm to use. */
enum algo_t {
/** choose the algorithm automatically; currently chooses NAIVE for sparse forests
and BATCH_TREE_REORG for dense ones */
ALGO_AUTO,
/** naive algorithm: 1 thread block predicts 1 row; the row is cached in
shared memory, and the trees are distributed cyclically between threads */
NAIVE,
/** tree reorg algorithm: same as naive, but the tree nodes are rearranged
into a more coalescing-friendly layout: for every node position,
nodes of all trees at that position are stored next to each other */
TREE_REORG,
/** batch tree reorg algorithm: same as tree reorg, but predictions multiple rows (up to 4)
in a single thread block */
BATCH_TREE_REORG
};
/** storage_type_t defines whether to import the forests as dense or sparse */
enum storage_type_t {
/** decide automatically; currently always builds dense forests */
AUTO,
/** import the forest as dense (8 or 16-bytes nodes, depending on model precision */
DENSE,
/** import the forest as sparse (currently always with 16-byte nodes) */
SPARSE,
/** (experimental) import the forest as sparse with 8-byte nodes; can fail if
8-byte nodes are not enough to store the forest, e.g. there are too many
nodes in a tree or too many features or the thresholds are double precision;
note that the number of bits used to store the child or feature index can
change in the future; this can affect whether a particular forest can be
imported as SPARSE8 */
SPARSE8,
};
static const char* storage_type_repr[] = {"AUTO", "DENSE", "SPARSE", "SPARSE8"};
/** precision_t defines the precision of the FIL model imported from a treelite model */
enum precision_t {
/** use the native precision of the treelite model, i.e. float64 if it has weights or
thresholds of type float64, otherwise float32 */
PRECISION_NATIVE,
/** always create a float32 FIL model; this may lead to loss of precision if the
treelite model contains float64 parameters */
PRECISION_FLOAT32,
/** always create a float64 FIL model */
PRECISION_FLOAT64
};
template <typename real_t>
struct forest;
/** forest_t is the predictor handle */
template <typename real_t>
using forest_t = forest<real_t>*;
/** forest32_t and forest64_t are definitions required in Cython */
using forest32_t = forest<float>*;
using forest64_t = forest<double>*;
/** forest_variant is used to get a forest represented with either float or double. */
using forest_variant = std::variant<forest_t<float>, forest_t<double>>;
/** MAX_N_ITEMS determines the maximum allowed value for tl_params::n_items */
constexpr int MAX_N_ITEMS = 4;
/** treelite_params_t are parameters for importing treelite models */
struct treelite_params_t {
// algo is the inference algorithm
algo_t algo;
// output_class indicates whether thresholding will be applied
// to the model output
bool output_class;
// threshold may be used for thresholding if output_class == true,
// and is ignored otherwise. threshold is ignored if leaves store
// vectorized class labels. in that case, a class with most votes
// is returned regardless of the absolute vote count
float threshold;
// storage_type indicates whether the forest should be imported as dense or sparse
storage_type_t storage_type;
// blocks_per_sm, if nonzero, works as a limit to improve cache hit rate for larger forests
// suggested values (if nonzero) are from 2 to 7
// if zero, launches ceildiv(num_rows, NITEMS) blocks
int blocks_per_sm;
// threads_per_tree determines how many threads work on a single tree at once inside a block
// can only be a power of 2
int threads_per_tree;
// n_items is how many input samples (items) any thread processes. If 0 is given,
// choose most (up to MAX_N_ITEMS) that fit into shared memory.
int n_items;
// if non-nullptr, *pforest_shape_str will be set to caller-owned string that
// contains forest shape
char** pforest_shape_str;
// precision in which to load the treelite model
precision_t precision;
};
/** from_treelite uses a treelite model to initialize the forest
* @param handle cuML handle used by this function
* @param pforest pointer to where to store the newly created forest
* @param model treelite model used to initialize the forest
* @param tl_params additional parameters for the forest
*/
void from_treelite(const raft::handle_t& handle,
forest_variant* pforest,
ModelHandle model,
const treelite_params_t* tl_params);
/** free deletes forest and all resources held by it; after this, forest is no longer usable
* @param h cuML handle used by this function
* @param f the forest to free; not usable after the call to this function
*/
template <typename real_t>
void free(const raft::handle_t& h, forest_t<real_t> f);
/** predict predicts on data (with n rows) using forest and writes results into preds;
* the number of columns is stored in forest, and both preds and data point to GPU memory
* @param h cuML handle used by this function
* @param f forest used for predictions
* @param preds array in GPU memory to store predictions into
* size = predict_proba ? (2*num_rows) : num_rows
* @param data array of size n * cols (cols is the number of columns
* for the forest f) from which to predict
* @param num_rows number of data rows
* @param predict_proba for classifier models, this forces to output both class probabilities
* instead of binary class prediction. format matches scikit-learn API
*/
template <typename real_t>
void predict(const raft::handle_t& h,
forest_t<real_t> f,
real_t* preds,
const real_t* data,
size_t num_rows,
bool predict_proba = false);
} // namespace fil
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/fil/multi_sum.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file multi_sum.cuh */
#pragma once
#include <raft/util/cuda_utils.cuh>
/**
template parameters: data [T]ype, reduction [R]adix
function parameters:
@data[] holds one value per thread in shared memory
@n_groups is the number of independent reductions
@n_values is the size of each individual reduction,
that is the number of values to be reduced to a single value
function returns: one sum per thread, for @n_groups first threads.
important: @data[] is "spoiled" during the process: at the end,
it will contain neither the initial nor the final values. the only valid
result is the one returned by the function. That makes it faster.
other assumptions:
data[n_groups * n_values - 1] is within range
T::operator+= is defined, and the implied addition is associative.
@data[] layout assumption:
@data[] values are ordered such that the stride is 1 for values belonging
to the same group and @n_groups for values that are to be added together
*/
template <int R = 5, typename T>
__device__ T multi_sum(T* data, int n_groups, int n_values)
{
T acc = threadIdx.x < n_groups * n_values ? data[threadIdx.x] : T();
while (n_values > 1) {
// n_targets is the number of values per group after the end of this iteration
int n_targets = raft::ceildiv(n_values, R);
if (threadIdx.x < n_targets * n_groups) {
#pragma unroll
for (int i = 1; i < R; ++i) {
int idx = threadIdx.x + i * n_targets * n_groups;
if (idx < n_values * n_groups) acc += data[idx];
}
if (n_targets > 1) data[threadIdx.x] = acc;
}
n_values = n_targets;
if (n_values > 1) __syncthreads();
}
return acc;
}
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/fil/fnv_hash.h | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <limits.h>
#include <cstdint>
#include <numeric>
// Implements https://tools.ietf.org/html/draft-eastlake-fnv-17.html
// Algorithm is public domain, non-cryptographic strength and no patents or rights to patent.
// If input elements are not 8-bit, such a computation does not match
// the FNV spec.
template <typename It>
unsigned long long fowler_noll_vo_fingerprint64(It begin, It end)
{
static_assert(sizeof(*begin) == 1, "FNV deals with byte-sized (octet) input arrays only");
return std::accumulate(
begin, end, 14695981039346656037ull, [](const unsigned long long& fingerprint, auto x) {
return (fingerprint * 0x100000001b3ull) ^ x;
});
}
// xor-folded fingerprint64 to ensure first bits are affected by other input bits
// should give a 1% collision probability within a 10'000 hash set
template <typename It>
uint32_t fowler_noll_vo_fingerprint64_32(It begin, It end)
{
unsigned long long fp64 = fowler_noll_vo_fingerprint64(begin, end);
return (fp64 & UINT_MAX) ^ (fp64 >> 32);
}
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/metrics/metrics.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/distance/distance_types.hpp>
#include <cstdint>
namespace raft {
class handle_t;
}
namespace ML {
namespace Metrics {
/**
* Calculates the "Coefficient of Determination" (R-Squared) score
* normalizing the sum of squared errors by the total sum of squares
* with single precision.
*
* This score indicates the proportionate amount of variation in an
* expected response variable is explained by the independent variables
* in a linear regression model. The larger the R-squared value, the
* more variability is explained by the linear regression model.
*
* @param handle: raft::handle_t
* @param y: Array of ground-truth response variables
* @param y_hat: Array of predicted response variables
* @param n: Number of elements in y and y_hat
* @return: The R-squared value.
*/
float r2_score_py(const raft::handle_t& handle, float* y, float* y_hat, int n);
/**
* Calculates the "Coefficient of Determination" (R-Squared) score
* normalizing the sum of squared errors by the total sum of squares
* with double precision.
*
* This score indicates the proportionate amount of variation in an
* expected response variable is explained by the independent variables
* in a linear regression model. The larger the R-squared value, the
* more variability is explained by the linear regression model.
*
* @param handle: raft::handle_t
* @param y: Array of ground-truth response variables
* @param y_hat: Array of predicted response variables
* @param n: Number of elements in y and y_hat
* @return: The R-squared value.
*/
double r2_score_py(const raft::handle_t& handle, double* y, double* y_hat, int n);
/**
* Calculates the "rand index"
*
* This metric is a measure of similarity between two data clusterings.
*
* @param handle: raft::handle_t
* @param y: Array of response variables of the first clustering classifications
* @param y_hat: Array of response variables of the second clustering classifications
* @param n: Number of elements in y and y_hat
* @return: The rand index value
*/
double rand_index(const raft::handle_t& handle, double* y, double* y_hat, int n);
/**
* Calculates the "Silhouette Score"
*
* The Silhouette Coefficient is calculated using the mean intra-cluster distance (a)
* and the mean nearest-cluster distance (b) for each sample. The Silhouette Coefficient
* for a sample is (b - a) / max(a, b). To clarify, b is the distance between a sample
* and the nearest cluster that the sample is not a part of. Note that Silhouette Coefficient
* is only defined if number of labels is 2 <= n_labels <= n_samples - 1.
*
* @param handle: raft::handle_t
* @param y: Array of data samples with dimensions (nRows x nCols)
* @param nRows: number of data samples
* @param nCols: number of features
* @param labels: Array containing labels for every data sample (1 x nRows)
* @param nLabels: number of Labels
* @param metric: the numerical value that maps to the type of distance metric to be used in the
* calculations
* @param silScores: Array that is optionally taken in as input if required to be populated with the
* silhouette score for every sample (1 x nRows), else nullptr is passed
*/
double silhouette_score(const raft::handle_t& handle,
double* y,
int nRows,
int nCols,
int* labels,
int nLabels,
double* silScores,
raft::distance::DistanceType metric);
namespace Batched {
/**
* Calculates Batched "Silhouette Score" by tiling the pairwise distance matrix to remove use of
* quadratic memory
*
* The Silhouette Coefficient is calculated using the mean intra-cluster distance (a)
* and the mean nearest-cluster distance (b) for each sample. The Silhouette Coefficient
* for a sample is (b - a) / max(a, b). To clarify, b is the distance between a sample
* and the nearest cluster that the sample is not a part of. Note that Silhouette Coefficient
* is only defined if number of labels is 2 <= n_labels <= n_samples - 1.
*
* @param[in] handle: raft::handle_t
* @param[in] X: Array of data samples with dimensions (n_rows x n_cols)
* @param[in] n_rows: number of data samples
* @param[in] n_cols: number of features
* @param[in] y: Array containing labels for every data sample (1 x n_rows)
* @param[in] n_labels: number of Labels
* @param[in] metric: the numerical value that maps to the type of distance metric to be used in the
* calculations
* @param[in] chunk: the row-wise chunk size on which the pairwise distance matrix is tiled
* @param[out] scores: Array that is optionally taken in as input if required to be populated with
* the silhouette score for every sample (1 x nRows), else nullptr is passed
*/
float silhouette_score(const raft::handle_t& handle,
float* X,
int n_rows,
int n_cols,
int* y,
int n_labels,
float* scores,
int chunk,
raft::distance::DistanceType metric);
double silhouette_score(const raft::handle_t& handle,
double* X,
int n_rows,
int n_cols,
int* y,
int n_labels,
double* scores,
int chunk,
raft::distance::DistanceType metric);
} // namespace Batched
/**
* Calculates the "adjusted rand index"
*
* This metric is the corrected-for-chance version of the rand index
*
* @param handle: raft::handle_t
* @param y: Array of response variables of the first clustering classifications
* @param y_hat: Array of response variables of the second clustering classifications
* @param n: Number of elements in y and y_hat
* @return: The adjusted rand index value
* @{
*/
double adjusted_rand_index(const raft::handle_t& handle,
const int64_t* y,
const int64_t* y_hat,
const int64_t n);
double adjusted_rand_index(const raft::handle_t& handle,
const int* y,
const int* y_hat,
const int n);
/** @} */
/**
* Calculates the "Kullback-Leibler Divergence"
*
* The KL divergence tells us how well the probability distribution Q
* approximates the probability distribution P
* It is often also used as a 'distance metric' between two probability distributions (not
* symmetric)
*
* @param handle: raft::handle_t
* @param y: Array of probabilities corresponding to distribution P
* @param y_hat: Array of probabilities corresponding to distribution Q
* @param n: Number of elements in y and y_hat
* @return: The KL Divergence value
*/
double kl_divergence(const raft::handle_t& handle, const double* y, const double* y_hat, int n);
/**
* Calculates the "Kullback-Leibler Divergence"
*
* The KL divergence tells us how well the probability distribution Q
* approximates the probability distribution P
* It is often also used as a 'distance metric' between two probability distributions (not
* symmetric)
*
* @param handle: raft::handle_t
* @param y: Array of probabilities corresponding to distribution P
* @param y_hat: Array of probabilities corresponding to distribution Q
* @param n: Number of elements in y and y_hat
* @return: The KL Divergence value
*/
float kl_divergence(const raft::handle_t& handle, const float* y, const float* y_hat, int n);
/**
* Calculates the "entropy" of a labelling
*
* This metric is a measure of the purity/polarity of the clustering
*
* @param handle: raft::handle_t
* @param y: Array of response variables of the clustering
* @param n: Number of elements in y
* @param lower_class_range: the lowest value in the range of classes
* @param upper_class_range: the highest value in the range of classes
* @return: The entropy value of the clustering
*/
double entropy(const raft::handle_t& handle,
const int* y,
const int n,
const int lower_class_range,
const int upper_class_range);
/**
* Calculates the "Mutual Information score" between two clusters
*
* Mutual Information is a measure of the similarity between two labels of
* the same data.
*
* @param handle: raft::handle_t
* @param y: Array of response variables of the first clustering classifications
* @param y_hat: Array of response variables of the second clustering classifications
* @param n: Number of elements in y and y_hat
* @param lower_class_range: the lowest value in the range of classes
* @param upper_class_range: the highest value in the range of classes
* @return: The mutual information score
*/
double mutual_info_score(const raft::handle_t& handle,
const int* y,
const int* y_hat,
const int n,
const int lower_class_range,
const int upper_class_range);
/**
* Calculates the "homogeneity score" between two clusters
*
* A clustering result satisfies homogeneity if all of its clusters
* contain only data points which are members of a single class.
*
* @param handle: raft::handle_t
* @param y: truth labels
* @param y_hat: predicted labels
* @param n: Number of elements in y and y_hat
* @param lower_class_range: the lowest value in the range of classes
* @param upper_class_range: the highest value in the range of classes
* @return: The homogeneity score
*/
double homogeneity_score(const raft::handle_t& handle,
const int* y,
const int* y_hat,
const int n,
const int lower_class_range,
const int upper_class_range);
/**
* Calculates the "completeness score" between two clusters
*
* A clustering result satisfies completeness if all the data points
* that are members of a given class are elements of the same cluster.
*
* @param handle: raft::handle_t
* @param y: truth labels
* @param y_hat: predicted labels
* @param n: Number of elements in y and y_hat
* @param lower_class_range: the lowest value in the range of classes
* @param upper_class_range: the highest value in the range of classes
* @return: The completeness score
*/
double completeness_score(const raft::handle_t& handle,
const int* y,
const int* y_hat,
const int n,
const int lower_class_range,
const int upper_class_range);
/**
* Calculates the "v-measure" between two clusters
*
* v-measure is the harmonic mean between the homogeneity
* and completeness scores of 2 cluster classifications
*
* @param handle: raft::handle_t
* @param y: truth labels
* @param y_hat: predicted labels
* @param n: Number of elements in y and y_hat
* @param lower_class_range: the lowest value in the range of classes
* @param upper_class_range: the highest value in the range of classes
* @param beta: Ratio of weight attributed to homogeneity vs completeness
* @return: The v-measure
*/
double v_measure(const raft::handle_t& handle,
const int* y,
const int* y_hat,
const int n,
const int lower_class_range,
const int upper_class_range,
double beta);
/**
* Calculates the "accuracy" between two input numpy arrays/ cudf series
*
* The accuracy metric is used to calculate the accuracy of the predict labels
* predict labels
*
* @param handle: raft::handle_t
* @param predictions: predicted labels
* @param ref_predictions: truth labels
* @param n: Number of elements in y and y_hat
* @return: The accuracy
*/
float accuracy_score_py(const raft::handle_t& handle,
const int* predictions,
const int* ref_predictions,
int n);
/**
* @brief Calculates the ij pairwise distances between two input arrays of
* double type
*
* @param handle raft::handle_t
* @param x pointer to the input data samples array (mRows x kCols)
* @param y pointer to the second input data samples array. Can use the same
* pointer as x (nRows x kCols)
* @param dist output pointer where the results will be stored (mRows x nCols)
* @param m number of rows in x
* @param n number of rows in y
* @param k number of cols in x and y (must be the same)
* @param metric the distance metric to use for the calculation
* @param isRowMajor specifies whether the x and y data pointers are row (C
* type array) or col (F type array) major
* @param metric_arg the value of `p` for Minkowski (l-p) distances.
*/
void pairwise_distance(const raft::handle_t& handle,
const double* x,
const double* y,
double* dist,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor = true,
double metric_arg = 2.0);
/**
* @brief Calculates the ij pairwise distances between two input arrays of float type
*
* @param handle raft::handle_t
* @param x pointer to the input data samples array (mRows x kCols)
* @param y pointer to the second input data samples array. Can use the same
* pointer as x (nRows x kCols)
* @param dist output pointer where the results will be stored (mRows x nCols)
* @param m number of rows in x
* @param n number of rows in y
* @param k number of cols in x and y (must be the same)
* @param metric the distance metric to use for the calculation
* @param isRowMajor specifies whether the x and y data pointers are row (C
* type array) or col (F type array) major
* @param metric_arg the value of `p` for Minkowski (l-p) distances.
*/
void pairwise_distance(const raft::handle_t& handle,
const float* x,
const float* y,
float* dist,
int m,
int n,
int k,
raft::distance::DistanceType metric,
bool isRowMajor = true,
float metric_arg = 2.0f);
void pairwiseDistance_sparse(const raft::handle_t& handle,
double* x,
double* y,
double* dist,
int x_nrows,
int y_nrows,
int n_cols,
int x_nnz,
int y_nnz,
int* x_indptr,
int* y_indptr,
int* x_indices,
int* y_indices,
raft::distance::DistanceType metric,
float metric_arg);
void pairwiseDistance_sparse(const raft::handle_t& handle,
float* x,
float* y,
float* dist,
int x_nrows,
int y_nrows,
int n_cols,
int x_nnz,
int y_nnz,
int* x_indptr,
int* y_indptr,
int* x_indices,
int* y_indices,
raft::distance::DistanceType metric,
float metric_arg);
/**
* @brief Compute the trustworthiness score
*
* @param h Raft handle
* @param X Data in original dimension
* @param X_embedded Data in target dimension (embedding)
* @param n Number of samples
* @param m Number of features in high/original dimension
* @param d Number of features in low/embedded dimension
* @param n_neighbors Number of neighbors considered by trustworthiness score
* @param batchSize Batch size
* @tparam distance_type: Distance type to consider
* @return Trustworthiness score
*/
template <typename math_t, raft::distance::DistanceType distance_type>
double trustworthiness_score(const raft::handle_t& h,
const math_t* X,
math_t* X_embedded,
int n,
int m,
int d,
int n_neighbors,
int batchSize = 512);
} // namespace Metrics
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/random_projection/rproj_c.h | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/handle.hpp>
#include <rmm/device_uvector.hpp>
namespace ML {
/**
* @defgroup paramsRPROJ: structure holding parameters used by random projection model
* @param n_samples: Number of samples
* @param n_features: Number of features (original dimension)
* @param n_components: Number of components (target dimension)
* @param eps: error tolerance used to decide automatically of n_components
* @param gaussian_method: boolean describing random matrix generation method
* @param density: Density of the random matrix
* @param dense_output: boolean describing sparsity of transformed matrix
* @param random_state: seed used by random generator
* @{
*/
struct paramsRPROJ {
int n_samples;
int n_features;
int n_components;
double eps;
bool gaussian_method;
double density;
bool dense_output;
int random_state;
};
enum random_matrix_type { unset, dense, sparse };
template <typename math_t>
struct rand_mat {
rand_mat(cudaStream_t stream)
: dense_data(0, stream),
indices(0, stream),
indptr(0, stream),
sparse_data(0, stream),
stream(stream),
type(unset)
{
}
~rand_mat() { this->reset(); }
// For dense matrices
rmm::device_uvector<math_t> dense_data;
// For sparse CSC matrices
rmm::device_uvector<int> indices;
rmm::device_uvector<int> indptr;
rmm::device_uvector<math_t> sparse_data;
cudaStream_t stream;
random_matrix_type type;
void reset()
{
this->dense_data.release();
this->indices.release();
this->indptr.release();
this->sparse_data.release();
this->type = unset;
};
};
template <typename math_t>
void RPROJfit(const raft::handle_t& handle, rand_mat<math_t>* random_matrix, paramsRPROJ* params);
template <typename math_t>
void RPROJtransform(const raft::handle_t& handle,
math_t* input,
rand_mat<math_t>* random_matrix,
math_t* output,
paramsRPROJ* params);
size_t johnson_lindenstrauss_min_dim(size_t n_samples, double eps);
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/datasets/make_blobs.hpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
namespace raft {
class handle_t;
}
namespace ML {
namespace Datasets {
/**
* @defgroup MakeBlobs scikit-learn-esq make_blobs
*
* @brief GPU-equivalent of sklearn.datasets.make_blobs
*
* @param[out] out generated data [on device]
* [dim = n_rows x n_cols]
* @param[out] labels labels for the generated data [on device]
* [len = n_rows]
* @param[in] n_rows number of rows in the generated data
* @param[in] n_cols number of columns in the generated data
* @param[in] n_clusters number of clusters (or classes) to generate
* @param[in] row_major whether input `centers` and output `out`
* buffers are to be stored in row or column
* major layout
* @param[in] centers centers of each of the cluster, pass a nullptr
* if you need this also to be generated randomly
* [on device] [dim = n_clusters x n_cols]
* @param[in] cluster_std standard deviation of each cluster center,
* pass a nullptr if this is to be read from the
* `cluster_std_scalar`. [on device]
* [len = n_clusters]
* @param[in] cluster_std_scalar if 'cluster_std' is nullptr, then use this as
* the std-dev across all dimensions.
* @param[in] shuffle shuffle the generated dataset and labels
* @param[in] center_box_min min value of box from which to pick cluster
* centers. Useful only if 'centers' is nullptr
* @param[in] center_box_max max value of box from which to pick cluster
* centers. Useful only if 'centers' is nullptr
* @param[in] seed seed for the RNG
* @{
*/
void make_blobs(const raft::handle_t& handle,
float* out,
int64_t* labels,
int64_t n_rows,
int64_t n_cols,
int64_t n_clusters,
bool row_major = true,
const float* centers = nullptr,
const float* cluster_std = nullptr,
const float cluster_std_scalar = 1.f,
bool shuffle = true,
float center_box_min = -10.f,
float center_box_max = 10.f,
uint64_t seed = 0ULL);
void make_blobs(const raft::handle_t& handle,
double* out,
int64_t* labels,
int64_t n_rows,
int64_t n_cols,
int64_t n_clusters,
bool row_major = true,
const double* centers = nullptr,
const double* cluster_std = nullptr,
const double cluster_std_scalar = 1.0,
bool shuffle = true,
double center_box_min = -10.0,
double center_box_max = 10.0,
uint64_t seed = 0ULL);
void make_blobs(const raft::handle_t& handle,
float* out,
int* labels,
int n_rows,
int n_cols,
int n_clusters,
bool row_major = true,
const float* centers = nullptr,
const float* cluster_std = nullptr,
const float cluster_std_scalar = 1.f,
bool shuffle = true,
float center_box_min = -10.f,
float center_box_max = 10.0,
uint64_t seed = 0ULL);
void make_blobs(const raft::handle_t& handle,
double* out,
int* labels,
int n_rows,
int n_cols,
int n_clusters,
bool row_major = true,
const double* centers = nullptr,
const double* cluster_std = nullptr,
const double cluster_std_scalar = 1.0,
bool shuffle = true,
double center_box_min = -10.0,
double center_box_max = 10.0,
uint64_t seed = 0ULL);
/** @} */
} // namespace Datasets
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/cpp/include/cuml | rapidsai_public_repos/cuml/cpp/include/cuml/datasets/make_arima.hpp | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuml/tsa/arima_common.h>
namespace raft {
class handle_t;
}
namespace ML {
namespace Datasets {
/**
* Generates a dataset of time series by simulating an ARIMA process
* of a given order.
*
* @param[in] handle cuML handle
* @param[out] out Generated time series
* @param[in] batch_size Batch size
* @param[in] n_obs Number of observations per series
* @param[in] order ARIMA order
* @param[in] scale Scale used to draw the starting values
* @param[in] noise_scale Scale used to draw the residuals
* @param[in] intercept_scale Scale used to draw the intercept
* @param[in] seed Seed for the random number generator
* @{
*/
void make_arima(const raft::handle_t& handle,
float* out,
int batch_size,
int n_obs,
ARIMAOrder order,
float scale = 1.0f,
float noise_scale = 0.2f,
float intercept_scale = 1.0f,
uint64_t seed = 0ULL);
void make_arima(const raft::handle_t& handle,
double* out,
int batch_size,
int n_obs,
ARIMAOrder order,
double scale = 1.0,
double noise_scale = 0.2,
double intercept_scale = 1.0,
uint64_t seed = 0ULL);
/** @} */
} // namespace Datasets
} // namespace ML
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.