repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuml/python/cuml/_thirdparty/sklearn | rapidsai_public_repos/cuml/python/cuml/_thirdparty/sklearn/utils/extmath.py | # Original authors from Sckit-Learn:
# Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
# This code originates from the Scikit-Learn library,
# it was since modified to allow GPU acceleration.
# This code is under BSD 3 clause license.
# Authors mentioned above do not endorse or promote this production.
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import
np = gpu_only_import('cupy')
cupyx = gpu_only_import('cupyx')
sparse = gpu_only_import_from('cupyx.scipy', 'sparse')
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices.
Performs no input validation.
Parameters
----------
X : array_like
The input array
squared : bool, optional (default = False)
If True, return squared norms.
Returns
-------
array_like
The row-wise (squared) Euclidean norm of X.
"""
if sparse.issparse(X):
if isinstance(X, (sparse.csr_matrix, sparse.csc_matrix,
sparse.coo_matrix)):
X_copy = X.copy()
X_copy.data = np.square(X_copy.data)
norms = X_copy.sum(axis=1).squeeze()
else:
raise ValueError('Sparse matrix not compatible')
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def _incremental_mean_and_var(X, last_mean, last_variance, last_sample_count):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : array-like, shape (n_features,)
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : array, shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = _safe_accumulator_op(np.nansum, X, axis=0)
new_sample_count = np.sum(~np.isnan(X), axis=0)
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = (
_safe_accumulator_op(np.nanvar, X, axis=0) * new_sample_count)
last_unnormalized_variance = last_variance * last_sample_count
with cupyx.errstate(divide=None, invalid=None):
last_over_new_count = last_sample_count / new_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance + new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
zeros = last_sample_count == 0
updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# Use at least float64 for the accumulating functions to avoid precision issue
# see https://github.com/numpy/numpy/issues/9393. The float64 is also retained
# as it is in case the float overflows
def _safe_accumulator_op(op, x, *args, **kwargs):
"""
This function provides numpy accumulator functions with a float64 dtype
when used on a floating point input. This prevents accumulator overflow on
smaller floating point dtypes.
Parameters
----------
op : function
A numpy accumulator function such as np.mean or np.sum
x : numpy array
A numpy array to apply the accumulator function
*args : positional arguments
Positional arguments passed to the accumulator function after the
input x
**kwargs : keyword arguments
Keyword arguments passed to the accumulator function
Returns
-------
result : The output of the accumulator function passed to this function
"""
if np.issubdtype(x.dtype, np.floating) and x.dtype.itemsize < 8:
result = op(x, *args, **kwargs, dtype=np.float64)
else:
result = op(x, *args, **kwargs)
return result
| 0 |
rapidsai_public_repos/cuml/python/cuml/_thirdparty/sklearn | rapidsai_public_repos/cuml/python/cuml/_thirdparty/sklearn/utils/__init__.py | # This code originates from the Scikit-Learn library,
# it was since modified to allow GPU acceleration.
# This code is under BSD 3 clause license.
from . import validation
from . import extmath
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/preprocessing/onehotencoder_mg.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.dask_arr_utils import to_dask_cudf
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.preprocessing.encoders import OneHotEncoder
import dask
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
DataFrame = gpu_only_import_from("cudf", "DataFrame")
class OneHotEncoderMG(OneHotEncoder):
"""
A Multi-Node Multi-GPU implementation of OneHotEncoder
Refer to the Dask OneHotEncoder implementation
in `cuml.dask.preprocessing.encoders`.
"""
def __init__(self, *, client=None, **kwargs):
super().__init__(**kwargs)
self.client = client
def _check_input_fit(self, X, is_categories=False):
"""Helper function to check input of fit within the multi-gpu model"""
if isinstance(X, (dask.array.core.Array, cp.ndarray)):
self._set_input_type("array")
if is_categories:
X = X.transpose()
if isinstance(X, cp.ndarray):
return DataFrame(X)
else:
return to_dask_cudf(X, client=self.client)
else:
self._set_input_type("df")
return X
def _unique(self, inp):
return inp.unique().compute()
def _has_unknown(self, X_cat, encoder_cat):
return not X_cat.isin(encoder_cat).all().compute()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/preprocessing/ordinalencoder_mg.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
import dask
from cuml.dask.common.dask_arr_utils import to_dask_cudf
from cuml.internals.safe_imports import gpu_only_import, gpu_only_import_from
from cuml.preprocessing.encoders import OrdinalEncoder
cp = gpu_only_import("cupy")
DataFrame = gpu_only_import_from("cudf", "DataFrame")
class OrdinalEncoderMG(OrdinalEncoder):
def __init__(self, *, client=None, **kwargs):
super().__init__(**kwargs)
self.client = client
def _check_input_fit(self, X, is_categories=False):
"""Helper function to check input of fit within the multi-gpu model"""
if isinstance(X, (dask.array.core.Array, cp.ndarray)):
self._set_input_type("array")
if is_categories:
X = X.transpose()
if isinstance(X, cp.ndarray):
return DataFrame(X)
else:
return to_dask_cudf(X, client=self.client)
else:
self._set_input_type("df")
return X
def _unique(self, inp):
return inp.unique().compute()
def _has_unknown(self, X_cat, encoder_cat):
return not X_cat.isin(encoder_cat).all().compute()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/preprocessing/LabelEncoder.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.exceptions import NotFittedError
from cuml.internals.safe_imports import cpu_only_import_from
from cuml import Base
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
pdSeries = cpu_only_import_from("pandas", "Series")
class LabelEncoder(Base):
"""
An nvcategory based implementation of ordinal label encoding
Parameters
----------
handle_unknown : {'error', 'ignore'}, default='error'
Whether to raise an error or ignore if an unknown categorical feature
is present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform or inverse transform, the resulting encoding will be null.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Examples
--------
Converting a categorical implementation to a numerical one
>>> from cudf import DataFrame, Series
>>> from cuml.preprocessing import LabelEncoder
>>> data = DataFrame({'category': ['a', 'b', 'c', 'd']})
>>> # There are two functionally equivalent ways to do this
>>> le = LabelEncoder()
>>> le.fit(data.category) # le = le.fit(data.category) also works
LabelEncoder()
>>> encoded = le.transform(data.category)
>>> print(encoded)
0 0
1 1
2 2
3 3
dtype: uint8
>>> # This method is preferred
>>> le = LabelEncoder()
>>> encoded = le.fit_transform(data.category)
>>> print(encoded)
0 0
1 1
2 2
3 3
dtype: uint8
>>> # We can assign this to a new column
>>> data = data.assign(encoded=encoded)
>>> print(data.head())
category encoded
0 a 0
1 b 1
2 c 2
3 d 3
>>> # We can also encode more data
>>> test_data = Series(['c', 'a'])
>>> encoded = le.transform(test_data)
>>> print(encoded)
0 2
1 0
dtype: uint8
>>> # After train, ordinal label can be inverse_transform() back to
>>> # string labels
>>> ord_label = cudf.Series([0, 0, 1, 2, 1])
>>> str_label = le.inverse_transform(ord_label)
>>> print(str_label)
0 a
1 a
2 b
3 c
4 b
dtype: object
"""
def __init__(
self,
*,
handle_unknown="error",
handle=None,
verbose=False,
output_type=None,
):
super().__init__(
handle=handle, verbose=verbose, output_type=output_type
)
self.classes_ = None
self.dtype = None
self._fitted: bool = False
self.handle_unknown = handle_unknown
def _check_is_fitted(self):
if not self._fitted:
msg = (
"This LabelEncoder instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator."
)
raise NotFittedError(msg)
def _validate_keywords(self):
if self.handle_unknown not in ("error", "ignore"):
msg = (
"handle_unknown should be either 'error' or 'ignore', "
"got {0}.".format(self.handle_unknown)
)
raise ValueError(msg)
def fit(self, y, _classes=None):
"""
Fit a LabelEncoder (nvcategory) instance to a set of categories
Parameters
----------
y : cudf.Series, pandas.Series, cupy.ndarray or numpy.ndarray
Series containing the categories to be encoded. It's elements
may or may not be unique
_classes: int or None.
Passed by the dask client when dask LabelEncoder is used.
Returns
-------
self : LabelEncoder
A fitted instance of itself to allow method chaining
"""
self._validate_keywords()
if _classes is None:
y = (
self._to_cudf_series(y)
.drop_duplicates()
.sort_values(ignore_index=True)
) # dedupe and sort
self.classes_ = y
else:
self.classes_ = _classes
self.dtype = y.dtype if y.dtype != cp.dtype("O") else str
self._fitted = True
return self
def transform(self, y) -> cudf.Series:
"""
Transform an input into its categorical keys.
This is intended for use with small inputs relative to the size of the
dataset. For fitting and transforming an entire dataset, prefer
`fit_transform`.
Parameters
----------
y : cudf.Series, pandas.Series, cupy.ndarray or numpy.ndarray
Input keys to be transformed. Its values should match the
categories given to `fit`
Returns
-------
encoded : cudf.Series
The ordinally encoded input series
Raises
------
KeyError
if a category appears that was not seen in `fit`
"""
y = self._to_cudf_series(y)
self._check_is_fitted()
y = y.astype("category")
encoded = y.cat.set_categories(self.classes_)._column.codes
encoded = cudf.Series(encoded, index=y.index)
if encoded.has_nulls and self.handle_unknown == "error":
raise KeyError("Attempted to encode unseen key")
return encoded
def fit_transform(self, y, z=None) -> cudf.Series:
"""
Simultaneously fit and transform an input
This is functionally equivalent to (but faster than)
`LabelEncoder().fit(y).transform(y)`
"""
y = self._to_cudf_series(y)
self.dtype = y.dtype if y.dtype != cp.dtype("O") else str
y = y.astype("category")
self.classes_ = y._column.categories
self._fitted = True
return cudf.Series(y._column.codes, index=y.index)
def inverse_transform(self, y: cudf.Series) -> cudf.Series:
"""
Revert ordinal label to original label
Parameters
----------
y : cudf.Series, pandas.Series, cupy.ndarray or numpy.ndarray
dtype=int32
Ordinal labels to be reverted
Returns
-------
reverted : the same type as y
Reverted labels
"""
# check LabelEncoder is fitted
self._check_is_fitted()
# check input type is cudf.Series
y = self._to_cudf_series(y)
# check if ord_label out of bound
ord_label = y.unique()
category_num = len(self.classes_)
if self.handle_unknown == "error":
for ordi in ord_label.values_host:
if ordi < 0 or ordi >= category_num:
raise ValueError(
"y contains previously unseen label {}".format(ordi)
)
y = y.astype(self.dtype)
ran_idx = cudf.Series(cp.arange(len(self.classes_))).astype(self.dtype)
reverted = y._column.find_and_replace(ran_idx, self.classes_, False)
res = cudf.Series(reverted)
return res
def get_param_names(self):
return super().get_param_names() + [
"handle_unknown",
]
def _to_cudf_series(self, y):
if isinstance(y, pdSeries):
y = cudf.from_pandas(y)
elif isinstance(y, cp.ndarray):
y = cudf.Series(y)
elif isinstance(y, np.ndarray):
y = cudf.Series(y)
elif not isinstance(y, cudf.Series):
msg = (
"input should be either 'cupy.ndarray'"
" or 'numpy.ndarray' or 'pandas.Series',"
" or 'cudf.Series'"
"got {0}.".format(type(y))
)
raise TypeError(msg)
return y
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/preprocessing/TargetEncoder.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from cuml.common.exceptions import NotFittedError
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
pandas = cpu_only_import("pandas")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
def get_stat_func(stat):
def func(ds):
if hasattr(ds, stat):
return getattr(ds, stat)()
else:
# implement stat
raise ValueError(f"{stat} function is not implemented.")
return func
class TargetEncoder:
"""
A cudf based implementation of target encoding [1]_, which converts
one or multiple categorical variables, 'Xs', with the average of
corresponding values of the target variable, 'Y'. The input data is
grouped by the columns `Xs` and the aggregated mean value of `Y` of
each group is calculated to replace each value of `Xs`. Several
optimizations are applied to prevent label leakage and parallelize
the execution.
Parameters
----------
n_folds : int (default=4)
Default number of folds for fitting training data. To prevent
label leakage in `fit`, we split data into `n_folds` and
encode one fold using the target variables of the remaining folds.
smooth : int or float (default=0)
Count of samples to smooth the encoding. 0 means no smoothing.
seed : int (default=42)
Random seed
split_method : {'random', 'continuous', 'interleaved'}, \
(default='interleaved')
Method to split train data into `n_folds`.
'random': random split.
'continuous': consecutive samples are grouped into one folds.
'interleaved': samples are assign to each fold in a round robin way.
'customize': customize splitting by providing a `fold_ids` array
in `fit()` or `fit_transform()` functions.
output_type : {'cupy', 'numpy', 'auto'}, default = 'auto'
The data type of output. If 'auto', it matches input data.
stat : {'mean','var','median'}, default = 'mean'
The statistic used in encoding, mean, variance or median of the
target.
References
----------
.. [1] https://maxhalford.github.io/blog/target-encoding/
Examples
--------
Converting a categorical implementation to a numerical one
>>> from cudf import DataFrame, Series
>>> from cuml.preprocessing import TargetEncoder
>>> train = DataFrame({'category': ['a', 'b', 'b', 'a'],
... 'label': [1, 0, 1, 1]})
>>> test = DataFrame({'category': ['a', 'c', 'b', 'a']})
>>> encoder = TargetEncoder()
>>> train_encoded = encoder.fit_transform(train.category, train.label)
>>> test_encoded = encoder.transform(test.category)
>>> print(train_encoded)
[1. 1. 0. 1.]
>>> print(test_encoded)
[1. 0.75 0.5 1. ]
"""
def __init__(
self,
n_folds=4,
smooth=0,
seed=42,
split_method="interleaved",
output_type="auto",
stat="mean",
):
if smooth < 0:
raise ValueError(f"smooth {smooth} is not zero or positive")
if n_folds < 0 or not isinstance(n_folds, int):
raise ValueError(
"n_folds {} is not a positive integer".format(n_folds)
)
if output_type not in {"cupy", "numpy", "auto"}:
msg = (
"output_type should be either 'cupy'"
" or 'numpy' or 'auto', "
"got {0}.".format(output_type)
)
raise ValueError(msg)
if stat not in {"mean", "var", "median"}:
msg = "stat should be 'mean', 'var' or 'median'." f"got {stat}."
raise ValueError(msg)
if not isinstance(seed, int):
raise ValueError("seed {} is not an integer".format(seed))
if split_method not in {
"random",
"continuous",
"interleaved",
"customize",
}:
msg = (
"split_method should be either 'random'"
" or 'continuous' or 'interleaved', or 'customize'"
"got {0}.".format(split_method)
)
raise ValueError(msg)
self.n_folds = n_folds
self.seed = seed
self.smooth = smooth
self.split_method = split_method
self.y_col = "__TARGET__"
self.y_col2 = "__TARGET__SQUARE__"
self.x_col = "__FEA__"
self.out_col = "__TARGET_ENCODE__"
self.out_col2 = "__TARGET_ENCODE__SQUARE__"
self.fold_col = "__FOLD__"
self.id_col = "__INDEX__"
self.train = None
self.output_type = output_type
self.stat = stat
def fit(self, x, y, fold_ids=None):
"""
Fit a TargetEncoder instance to a set of categories
Parameters
----------
x : cudf.Series or cudf.DataFrame or cupy.ndarray
categories to be encoded. It's elements may or may
not be unique
y : cudf.Series or cupy.ndarray
Series containing the target variable.
fold_ids : cudf.Series or cupy.ndarray
Series containing the indices of the customized
folds. Its values should be integers in range
`[0, N-1]` to split data into `N` folds. If None,
fold_ids is generated based on `split_method`.
Returns
-------
self : TargetEncoder
A fitted instance of itself to allow method chaining
"""
if self.split_method == "customize" and fold_ids is None:
raise ValueError(
"`fold_ids` is required "
"since split_method is set to"
"'customize'."
)
if fold_ids is not None and self.split_method != "customize":
self.split_method == "customize"
warnings.warn(
"split_method is set to 'customize'"
"since `fold_ids` are provided."
)
if fold_ids is not None and len(fold_ids) != len(x):
raise ValueError(
f"`fold_ids` length {len(fold_ids)}"
"is different from input data length"
f"{len(x)}"
)
res, train = self._fit_transform(x, y, fold_ids=fold_ids)
self.train_encode = res
self.train = train
self._fitted = True
return self
def fit_transform(self, x, y, fold_ids=None):
"""
Simultaneously fit and transform an input
This is functionally equivalent to (but faster than)
`TargetEncoder().fit(y).transform(y)`
Parameters
----------
x : cudf.Series or cudf.DataFrame or cupy.ndarray
categories to be encoded. It's elements may or may
not be unique
y : cudf.Series or cupy.ndarray
Series containing the target variable.
fold_ids : cudf.Series or cupy.ndarray
Series containing the indices of the customized
folds. Its values should be integers in range
`[0, N-1]` to split data into `N` folds. If None,
fold_ids is generated based on `split_method`.
Returns
-------
encoded : cupy.ndarray
The ordinally encoded input series
"""
self.fit(x, y, fold_ids=fold_ids)
return self.train_encode
def transform(self, x):
"""
Transform an input into its categorical keys.
This is intended for test data. For fitting and transforming
the training data, prefer `fit_transform`.
Parameters
----------
x : cudf.Series
Input keys to be transformed. Its values doesn't have to
match the categories given to `fit`
Returns
-------
encoded : cupy.ndarray
The ordinally encoded input series
"""
self._check_is_fitted()
test = self._data_with_strings_to_cudf_dataframe(x)
if self._is_train_df(test):
return self.train_encode
x_cols = [i for i in test.columns.tolist() if i != self.id_col]
test = test.merge(self.encode_all, on=x_cols, how="left")
return self._impute_and_sort(test)
def _fit_transform(self, x, y, fold_ids):
"""
Core function of target encoding
"""
self.output_type = self._get_output_type(x)
cp.random.seed(self.seed)
train = self._data_with_strings_to_cudf_dataframe(x)
x_cols = [i for i in train.columns.tolist() if i != self.id_col]
train[self.y_col] = self._make_y_column(y)
self.n_folds = min(self.n_folds, len(train))
train[self.fold_col] = self._make_fold_column(len(train), fold_ids)
self.y_stat_val = get_stat_func(self.stat)(train[self.y_col])
if self.stat in ["median"]:
return self._fit_transform_for_loop(train, x_cols)
self.mean = train[self.y_col].mean()
if self.stat == "var":
y_cols = [self.y_col, self.y_col2]
train[self.y_col2] = self._make_y_column(y * y)
self.mean2 = train[self.y_col2].mean()
else:
y_cols = [self.y_col]
y_count_each_fold, y_count_all = self._groupby_agg(
train, x_cols, op="count", y_cols=y_cols
)
y_sum_each_fold, y_sum_all = self._groupby_agg(
train, x_cols, op="sum", y_cols=y_cols
)
"""
Note:
encode_each_fold is used to encode train data.
encode_all is used to encode test data.
"""
cols = [self.fold_col] + x_cols
encode_each_fold = self._compute_output(
y_sum_each_fold,
y_count_each_fold,
cols,
f"{self.y_col}_x",
f"{self.y_col2}_x",
)
encode_all = self._compute_output(
y_sum_all, y_count_all, x_cols, self.y_col, self.y_col2
)
self.encode_all = encode_all
train = train.merge(encode_each_fold, on=cols, how="left")
del encode_each_fold
return self._impute_and_sort(train), train
def _fit_transform_for_loop(self, train, x_cols):
def _rename_col(df, col):
df.columns = [col]
return df.reset_index()
res = []
for f in train[self.fold_col].unique().values_host:
mask = train[self.fold_col] == f
dg = train.loc[~mask].groupby(x_cols).agg({self.y_col: self.stat})
dg = _rename_col(dg, self.out_col)
res.append(train.loc[mask].merge(dg, on=x_cols, how="left"))
res = cudf.concat(res, axis=0)
self.encode_all = train.groupby(x_cols).agg({self.y_col: self.stat})
self.encode_all = _rename_col(self.encode_all, self.out_col)
return self._impute_and_sort(res), train
def _make_y_column(self, y):
"""
Create a target column given y
"""
if isinstance(y, cudf.Series) or isinstance(y, pandas.Series):
return y.values
elif isinstance(y, cp.ndarray) or isinstance(y, np.ndarray):
if len(y.shape) == 1:
return y
elif y.shape[1] == 1:
return y[:, 0]
else:
raise ValueError(
f"Input of shape {y.shape} " "is not a 1-D array."
)
else:
raise TypeError(
f"Input of type {type(y)} is not cudf.Series, "
"or pandas.Series"
"or numpy.ndarray"
"or cupy.ndarray"
)
def _make_fold_column(self, len_train, fold_ids):
"""
Create a fold id column for each split
"""
if self.split_method == "random":
return cp.random.randint(0, self.n_folds, len_train)
elif self.split_method == "continuous":
return (
cp.arange(len_train) / (len_train / self.n_folds)
) % self.n_folds
elif self.split_method == "interleaved":
return cp.arange(len_train) % self.n_folds
elif self.split_method == "customize":
if fold_ids is None:
raise ValueError(
"fold_ids can't be None"
"since split_method is set to"
"'customize'."
)
return fold_ids
else:
msg = (
"split_method should be either 'random'"
" or 'continuous' or 'interleaved', "
"got {0}.".format(self.split_method)
)
raise ValueError(msg)
def _compute_output(self, df_sum, df_count, cols, y_col, y_col2=None):
"""
Compute the output encoding based on aggregated sum and count
"""
df_sum = df_sum.merge(df_count, on=cols, how="left")
smooth = self.smooth
df_sum[self.out_col] = (df_sum[f"{y_col}_x"] + smooth * self.mean) / (
df_sum[f"{y_col}_y"] + smooth
)
if self.stat == "var":
df_sum[self.out_col2] = (
df_sum[f"{y_col2}_x"] + smooth * self.mean2
) / (df_sum[f"{y_col2}_y"] + smooth)
df_sum[self.out_col] = (
df_sum[self.out_col2] - df_sum[self.out_col] ** 2
)
df_sum[self.out_col] = (
df_sum[self.out_col]
* df_sum[f"{y_col2}_y"]
/ (df_sum[f"{y_col2}_y"] - 1)
)
return df_sum
def _groupby_agg(self, train, x_cols, op, y_cols):
"""
Compute aggregated value of each fold and overall dataframe
grouped by `x_cols` and agg by `op`
"""
cols = [self.fold_col] + x_cols
df_each_fold = train.groupby(cols, as_index=False).agg(
{y_col: op for y_col in y_cols}
)
df_all = df_each_fold.groupby(x_cols, as_index=False).agg(
{y_col: "sum" for y_col in y_cols}
)
df_each_fold = df_each_fold.merge(df_all, on=x_cols, how="left")
for y_col in y_cols:
df_each_fold[f"{y_col}_x"] = (
df_each_fold[f"{y_col}_y"] - df_each_fold[f"{y_col}_x"]
)
return df_each_fold, df_all
def _check_is_fitted(self):
if not self._fitted or self.train is None:
msg = (
"This LabelEncoder instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator."
)
raise NotFittedError(msg)
def _is_train_df(self, df):
"""
Return True if the dataframe `df` is the training dataframe, which
is used in `fit_transform`
"""
if len(df) != len(self.train):
return False
self.train = self.train.sort_values(self.id_col).reset_index(drop=True)
for col in df.columns:
if col not in self.train.columns:
raise ValueError(
f"Input column {col} " "is not in train data."
)
if not (df[col] == self.train[col]).all():
return False
return True
def _impute_and_sort(self, df):
"""
Impute and sort the result encoding in the same row order as input
"""
df[self.out_col] = df[self.out_col].nans_to_nulls()
df[self.out_col] = df[self.out_col].fillna(self.y_stat_val)
df = df.sort_values(self.id_col)
res = df[self.out_col].values.copy()
if self.output_type == "numpy":
return cp.asnumpy(res)
return res
def _data_with_strings_to_cudf_dataframe(self, x):
"""
Convert input data with strings to cudf dataframe.
Supported data types are:
1D or 2D numpy/cupy arrays
pandas/cudf Series
pandas/cudf DataFrame
Input data could have one or more string columns.
"""
if isinstance(x, cudf.DataFrame):
df = x.copy()
elif isinstance(x, cudf.Series):
df = x.to_frame().copy()
elif isinstance(x, cp.ndarray) or isinstance(x, np.ndarray):
df = cudf.DataFrame()
if len(x.shape) == 1:
df[self.x_col] = x
else:
df = cudf.DataFrame(
x, columns=[f"{self.x_col}_{i}" for i in range(x.shape[1])]
)
elif isinstance(x, pandas.DataFrame):
df = cudf.from_pandas(x)
elif isinstance(x, pandas.Series):
df = cudf.from_pandas(x.to_frame())
else:
raise TypeError(
f"Input of type {type(x)} is not cudf.Series, cudf.DataFrame "
"or pandas.Series or pandas.DataFrame"
"or cupy.ndarray or numpy.ndarray"
)
df[self.id_col] = cp.arange(len(x))
return df.reset_index(drop=True)
def _get_output_type(self, x):
"""
Infer output type if 'auto'
"""
if self.output_type != "auto":
return self.output_type
if (
isinstance(x, np.ndarray)
or isinstance(x, pandas.DataFrame)
or isinstance(x, pandas.Series)
):
return "numpy"
return "cupy"
def get_param_names(self):
return [
"n_folds",
"smooth",
"seed",
"split_method",
]
def get_params(self, deep=False):
"""
Returns a dict of all params owned by this class.
"""
params = dict()
variables = self.get_param_names()
for key in variables:
var_value = getattr(self, key, None)
params[key] = var_value
return params
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/preprocessing/label.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.prims.label import check_labels, invert_labels, make_monotonic
from cuml.internals.array_sparse import SparseCumlArray
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.common import CumlArray, has_scipy
import cuml.internals
from cuml import Base
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
@cuml.internals.api_return_sparse_array()
def label_binarize(
y, classes, neg_label=0, pos_label=1, sparse_output=False
) -> SparseCumlArray:
"""
A stateless helper function to dummy encode multi-class labels.
Parameters
----------
y : array-like of size [n_samples,] or [n_samples, n_classes]
classes : the set of unique classes in the input
neg_label : integer the negative value for transformed output
pos_label : integer the positive value for transformed output
sparse_output : bool whether to return sparse array
"""
classes = cp.asarray(classes, dtype=classes.dtype)
labels = cp.asarray(y, dtype=y.dtype)
if not check_labels(labels, classes):
raise ValueError("Unseen classes encountered in input")
row_ind = cp.arange(0, labels.shape[0], 1, dtype=y.dtype)
col_ind, _ = make_monotonic(labels, classes, copy=True)
# Convert from CumlArray to cupy
col_ind = cp.asarray(col_ind)
val = cp.full(row_ind.shape[0], pos_label, dtype=y.dtype)
sp = cupyx.scipy.sparse.coo_matrix(
(val, (row_ind, col_ind)),
shape=(col_ind.shape[0], classes.shape[0]),
dtype=cp.float32,
)
cp.cuda.Stream.null.synchronize()
if sparse_output:
sp = sp.tocsr()
return sp
else:
arr = sp.toarray().astype(y.dtype)
arr[arr == 0] = neg_label
return arr
class LabelBinarizer(Base):
"""
A multi-class dummy encoder for labels.
Parameters
----------
neg_label : integer (default=0)
label to be used as the negative binary label
pos_label : integer (default=1)
label to be used as the positive binary label
sparse_output : bool (default=False)
whether to return sparse arrays for transformed output
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Examples
--------
Create an array with labels and dummy encode them
.. code-block:: python
>>> import cupy as cp
>>> import cupyx
>>> from cuml.preprocessing import LabelBinarizer
>>> labels = cp.asarray([0, 5, 10, 7, 2, 4, 1, 0, 0, 4, 3, 2, 1],
... dtype=cp.int32)
>>> lb = LabelBinarizer()
>>> encoded = lb.fit_transform(labels)
>>> print(str(encoded))
[[1 0 0 0 0 0 0 0]
[0 0 0 0 0 1 0 0]
[0 0 0 0 0 0 0 1]
[0 0 0 0 0 0 1 0]
[0 0 1 0 0 0 0 0]
[0 0 0 0 1 0 0 0]
[0 1 0 0 0 0 0 0]
[1 0 0 0 0 0 0 0]
[1 0 0 0 0 0 0 0]
[0 0 0 0 1 0 0 0]
[0 0 0 1 0 0 0 0]
[0 0 1 0 0 0 0 0]
[0 1 0 0 0 0 0 0]]
>>> decoded = lb.inverse_transform(encoded)
>>> print(str(decoded))
[ 0 5 10 7 2 4 1 0 0 4 3 2 1]
"""
classes_ = CumlArrayDescriptor()
def __init__(
self,
*,
neg_label=0,
pos_label=1,
sparse_output=False,
handle=None,
verbose=False,
output_type=None,
):
super().__init__(
handle=handle, verbose=verbose, output_type=output_type
)
if neg_label >= pos_label:
raise ValueError(
"neg_label=%s must be less "
"than pos_label=%s." % (neg_label, pos_label)
)
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError(
"Sparse binarization is only supported "
"with non-zero"
"pos_label and zero neg_label, got pos_label=%s "
"and neg_label=%s" % (pos_label, neg_label)
)
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
self.classes_ = None
def fit(self, y) -> "LabelBinarizer":
"""
Fit label binarizer
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
if y.ndim > 2:
raise ValueError("labels cannot be greater than 2 dimensions")
if y.ndim == 2:
unique_classes = cp.unique(y)
if unique_classes != [0, 1]:
raise ValueError("2-d array can must be binary")
self.classes_ = cp.arange(0, y.shape[1])
else:
self.classes_ = cp.unique(y).astype(y.dtype)
cp.cuda.Stream.null.synchronize()
return self
def fit_transform(self, y) -> SparseCumlArray:
"""
Fit label binarizer and transform multi-class labels to their
dummy-encoded representation.
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Returns
-------
arr : array with encoded labels
"""
return self.fit(y).transform(y)
def transform(self, y) -> SparseCumlArray:
"""
Transform multi-class labels to their dummy-encoded representation
labels.
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Returns
-------
arr : array with encoded labels
"""
return label_binarize(
y,
self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output,
)
def inverse_transform(self, y, threshold=None) -> CumlArray:
"""
Transform binary labels back to original multi-class labels
Parameters
----------
y : array of shape [n_samples, n_classes]
threshold : float this value is currently ignored
Returns
-------
arr : array with original labels
"""
if has_scipy():
from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
else:
from cuml.internals.import_utils import (
dummy_function_always_false as scipy_sparse_isspmatrix,
)
# If we are already given multi-class, just return it.
if cupyx.scipy.sparse.isspmatrix(y):
y_mapped = y.tocsr().indices.astype(self.classes_.dtype)
elif scipy_sparse_isspmatrix(y):
y = y.tocsr()
y_mapped = cp.array(y.indices, dtype=y.indices.dtype)
else:
y_mapped = cp.argmax(cp.asarray(y, dtype=y.dtype), axis=1).astype(
y.dtype
)
return invert_labels(y_mapped, self.classes_)
def get_param_names(self):
return super().get_param_names() + [
"neg_label",
"pos_label",
"sparse_output",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/preprocessing/encoders.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import List, Optional, TypeVar
import cuml.internals.logger as logger
from cudf import DataFrame, Series
from cuml import Base
from cuml.common.doc_utils import generate_docstring
from cuml.common.exceptions import NotFittedError
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import,
gpu_only_import_from,
)
from cuml.preprocessing import LabelEncoder
np = cpu_only_import("numpy")
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
GenericIndex = gpu_only_import_from("cudf", "GenericIndex")
class CheckFeaturesMixIn:
def _check_n_features(self, X, reset: bool = False):
n_features = X.shape[1]
if reset:
self.n_features_in_ = n_features
if hasattr(X, "columns"):
self.feature_names_in_ = [str(c) for c in X.columns]
else:
if not hasattr(self, "n_features_in_"):
raise RuntimeError(
"The reset parameter is False but there is no "
"n_features_in_ attribute. Is this estimator fitted?"
)
if n_features != self.n_features_in_:
raise ValueError(
"X has {} features, but this {} is expecting {} features "
"as input.".format(
n_features,
self.__class__.__name__,
self.n_features_in_,
)
)
class BaseEncoder(Base, CheckFeaturesMixIn):
"""Base implementation for encoding categorical values, uses
:py:class:`~cuml.preprocessing.LabelEncoder` for obtaining unique values.
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
"""
def _set_input_type(self, value):
if self.input_type is None:
self.input_type = value
def _check_input(self, X, is_categories=False):
"""If input is cupy, convert it to a DataFrame with 0 copies."""
if isinstance(X, cp.ndarray):
self._set_input_type("array")
if is_categories:
X = X.transpose()
return DataFrame(X)
else:
self._set_input_type("df")
return X
def _check_input_fit(self, X, is_categories=False):
"""Helper function used in fit, can be overridden in subclasses."""
self._check_n_features(X, reset=True)
return self._check_input(X, is_categories=is_categories)
def _unique(self, inp):
"""Helper function used in fit. Can be overridden in subclasses."""
# Default implementation passes input through directly since this is
# performed in `LabelEncoder.fit()`
return inp
def _fit(self, X, need_drop: bool):
X = self._check_input_fit(X)
if type(self.categories) is str and self.categories == "auto":
self._features = X.columns
self._encoders = {
feature: LabelEncoder(
handle=self.handle,
verbose=self.verbose,
output_type=self.output_type,
handle_unknown=self.handle_unknown,
).fit(self._unique(X[feature]))
for feature in self._features
}
else:
self.categories = self._check_input_fit(self.categories, True)
self._features = self.categories.columns
if len(self._features) != X.shape[1]:
raise ValueError(
"Shape mismatch: if categories is not 'auto',"
" it has to be of shape (n_features, _)."
)
self._encoders = dict()
for feature in self._features:
le = LabelEncoder(
handle=self.handle,
verbose=self.verbose,
output_type=self.output_type,
handle_unknown=self.handle_unknown,
)
self._encoders[feature] = le.fit(self.categories[feature])
if self.handle_unknown == "error":
if self._has_unknown(
X[feature], self._encoders[feature].classes_
):
msg = (
"Found unknown categories in column {0}"
" during fit".format(feature)
)
raise KeyError(msg)
if need_drop:
self.drop_idx_ = self._compute_drop_idx()
self._fitted = True
@property
def categories_(self):
"""Returns categories used for the one hot encoding in the correct order."""
return [self._encoders[f].classes_ for f in self._features]
class OneHotEncoder(BaseEncoder):
"""
Encode categorical features as a one-hot numeric array.
The input to this estimator should be a :py:class:`cuDF.DataFrame` or a
:py:class:`cupy.ndarray`, denoting the unique values taken on by categorical
(discrete) features. The features are encoded using a one-hot (aka 'one-of-K' or
'dummy') encoding scheme. This creates a binary column for each category and returns
a sparse matrix or dense array (depending on the ``sparse`` parameter).
By default, the encoder derives the categories based on the unique values
in each feature. Alternatively, you can also specify the `categories`
manually.
.. note:: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Parameters
----------
categories : 'auto' an cupy.ndarray or a cudf.DataFrame, default='auto'
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- DataFrame/ndarray : ``categories[col]`` holds the categories expected
in the feature col.
drop : 'first', None, a dict or a list, default=None
Specifies a methodology to use to drop one of the categories per
feature. This is useful in situations where perfectly collinear
features cause problems, such as when feeding the resulting data
into a neural network or an unregularized regression.
- None : retain all features (the default).
- 'first' : drop the first category in each feature. If only one
category is present, the feature will be dropped entirely.
- dict/list : ``drop[col]`` is the category in feature col that
should be dropped.
sparse : bool, default=True
This feature is not fully supported by cupy
yet, causing incorrect values when computing one hot encodings.
See https://github.com/cupy/cupy/issues/3223
dtype : number type, default=np.float
Desired datatype of transform's output.
handle_unknown : {'error', 'ignore'}, default='error'
Whether to raise an error or ignore if an unknown categorical feature
is present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros. In the inverse transform, an unknown category
will be denoted as None.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
drop_idx_ : array of shape (n_features,)
``drop_idx_[i]`` is the index in ``categories_[i]`` of the category to
be dropped for each feature. None if all the transformed features will
be retained.
"""
def __init__(
self,
*,
categories="auto",
drop=None,
sparse=True,
dtype=np.float32,
handle_unknown="error",
handle=None,
verbose=False,
output_type=None,
):
super().__init__(
handle=handle, verbose=verbose, output_type=output_type
)
self.categories = categories
self.sparse = sparse
self.dtype = dtype
self.handle_unknown = handle_unknown
self.drop = drop
self._fitted = False
self.drop_idx_ = None
self._features = None
self._encoders = None
self.input_type = None
if sparse and np.dtype(dtype) not in ["f", "d", "F", "D"]:
raise ValueError(
"Only float32, float64, complex64 and complex128 "
"are supported when using sparse"
)
def _validate_keywords(self):
if self.handle_unknown not in ("error", "ignore"):
msg = (
"handle_unknown should be either 'error' or 'ignore', "
"got {0}.".format(self.handle_unknown)
)
raise ValueError(msg)
# If we have both dropped columns and ignored unknown
# values, there will be ambiguous cells. This creates difficulties
# in interpreting the model.
if self.drop is not None and self.handle_unknown != "error":
raise ValueError(
"`handle_unknown` must be 'error' when the drop parameter is "
"specified, as both would create categories that are all "
"zero."
)
def _check_is_fitted(self):
if not self._fitted:
msg = (
"This OneHotEncoder instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator."
)
raise NotFittedError(msg)
def _compute_drop_idx(self):
"""Helper to compute indices to drop from category to drop."""
if self.drop is None:
return None
elif isinstance(self.drop, str) and self.drop == "first":
return {feature: 0 for feature in self._encoders.keys()}
elif isinstance(self.drop, (dict, list)):
if isinstance(self.drop, list):
self.drop = dict(zip(range(len(self.drop)), self.drop))
if len(self.drop.keys()) != len(self._encoders):
msg = (
"`drop` should have as many columns as the number "
"of features ({}), got {}"
)
raise ValueError(
msg.format(len(self._encoders), len(self.drop.keys()))
)
drop_idx = dict()
for feature in self.drop.keys():
self.drop[feature] = Series(self.drop[feature])
if len(self.drop[feature]) != 1:
msg = (
"Trying to drop multiple values for feature {}, "
"this is not supported."
).format(feature)
raise ValueError(msg)
cats = self._encoders[feature].classes_
if not self.drop[feature].isin(cats).all():
msg = (
"Some categories for feature {} were supposed "
"to be dropped, but were not found in the encoder "
"categories.".format(feature)
)
raise ValueError(msg)
cats = Series(cats)
idx = cats.isin(self.drop[feature])
drop_idx[feature] = cp.asarray(cats[idx].index)
return drop_idx
else:
msg = (
"Wrong input for parameter `drop`. Expected "
"'first', None or a dict, got {}"
)
raise ValueError(msg.format(type(self.drop)))
def _check_input_fit(self, X, is_categories=False):
"""Helper function used in fit. Can be overridden in subclasses."""
return self._check_input(X, is_categories=is_categories)
def _has_unknown(self, X_cat, encoder_cat):
"""Check if X_cat has categories that are not present in encoder_cat."""
return not X_cat.isin(encoder_cat).all()
@generate_docstring(y=None)
def fit(self, X, y=None):
"""Fit OneHotEncoder to X."""
self._validate_keywords()
self._fit(X, True)
return self
@generate_docstring(
y=None,
return_values={
"name": "X_out",
"description": "Transformed input.",
"type": "sparse matrix if sparse=True else a 2-d array",
},
)
def fit_transform(self, X, y=None):
"""
Fit OneHotEncoder to X, then transform X. Equivalent to fit(X).transform(X).
"""
X = self._check_input(X)
return self.fit(X).transform(X)
@generate_docstring(
return_values={
"name": "X_out",
"description": "Transformed input.",
"type": "sparse matrix if sparse=True else a 2-d array",
}
)
def transform(self, X):
"""Transform X using one-hot encoding."""
self._check_is_fitted()
X = self._check_input(X)
cols, rows = list(), list()
col_idx = None
j = 0
try:
for feature in X.columns:
encoder = self._encoders[feature]
col_idx = encoder.transform(X[feature])
idx_to_keep = col_idx.notnull().to_cupy()
col_idx = col_idx.dropna().to_cupy()
# Simple test to auto upscale col_idx type as needed
# First, determine the maximum value we will add assuming
# monotonically increasing up to len(encoder.classes_)
# Ensure we dont go negative by clamping to 0
max_value = int(max(len(encoder.classes_) - 1, 0) + j)
# If we exceed the max value, upconvert
if max_value > np.iinfo(col_idx.dtype).max:
col_idx = col_idx.astype(np.min_scalar_type(max_value))
logger.debug(
"Upconverting column: '{}', to dtype: '{}', "
"to support up to {} classes".format(
feature, np.min_scalar_type(max_value), max_value
)
)
# increase indices to take previous features into account
col_idx += j
# Filter out rows with null values
row_idx = cp.arange(len(X))[idx_to_keep]
if self.drop_idx_ is not None:
drop_idx = self.drop_idx_[feature] + j
mask = cp.ones(col_idx.shape, dtype=bool)
mask[col_idx == drop_idx] = False
col_idx = col_idx[mask]
row_idx = row_idx[mask]
# account for dropped category in indices
col_idx[col_idx > drop_idx] -= 1
# account for dropped category in current cats number
j -= 1
j += len(encoder.classes_)
cols.append(col_idx)
rows.append(row_idx)
cols = cp.concatenate(cols)
rows = cp.concatenate(rows)
val = cp.ones(rows.shape[0], dtype=self.dtype)
ohe = cupyx.scipy.sparse.coo_matrix(
(val, (rows, cols)), shape=(len(X), j), dtype=self.dtype
)
if not self.sparse:
ohe = ohe.toarray()
return ohe
except TypeError as e:
# Append to cols to include the column that threw the error
cols.append(col_idx)
# Build a string showing what the types are
input_types_str = ", ".join([str(x.dtype) for x in cols])
raise TypeError(
"A TypeError occurred while calculating column "
"category indices, most likely due to integer overflow. This "
"can occur when columns have a large difference in the number "
"of categories, resulting in different category code dtypes "
"for different columns."
"Calculated column code dtypes: {}.\n"
"Internal Error: {}".format(input_types_str, repr(e))
)
def inverse_transform(self, X):
"""Convert the data back to the original representation. In case unknown
categories are encountered (all zeros in the one-hot encoding), ``None`` is used
to represent this category.
The return type is the same as the type of the input used by the first
call to fit on this estimator instance.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
The transformed data.
Returns
-------
X_tr : cudf.DataFrame or cupy.ndarray
Inverse transformed array.
"""
self._check_is_fitted()
if cupyx.scipy.sparse.issparse(X):
# cupyx.scipy.sparse 7.x does not support argmax,
# when we upgrade cupy to 8.x, we should add a condition in the
# if close: `and not cupyx.scipy.sparse.issparsecsc(X)`
# and change the following line by `X = X.tocsc()`
X = X.toarray()
result = DataFrame(columns=self._encoders.keys())
j = 0
for feature in self._encoders.keys():
feature_enc = self._encoders[feature]
cats = feature_enc.classes_
if self.drop is not None:
# Remove dropped categories
dropped_class_idx = Series(self.drop_idx_[feature])
dropped_class_mask = Series(cats).isin(cats[dropped_class_idx])
if len(cats) == 1:
inv = Series(GenericIndex(cats[0]).repeat(X.shape[0]))
result[feature] = inv
continue
cats = cats[~dropped_class_mask]
enc_size = len(cats)
x_feature = X[:, j : j + enc_size]
idx = cp.argmax(x_feature, axis=1)
inv = Series(cats.iloc[idx]).reset_index(drop=True)
if self.handle_unknown == "ignore":
not_null_idx = x_feature.any(axis=1)
inv.iloc[~not_null_idx] = None
elif self.drop is not None:
# drop will either be None or handle_unknown will be error. If
# self.drop is not None, then we can safely assume that all of
# the nulls in each column are the dropped value
dropped_mask = cp.asarray(x_feature.sum(axis=1) == 0).flatten()
if dropped_mask.any():
inv[dropped_mask] = feature_enc.inverse_transform(
Series(self.drop_idx_[feature])
)[0]
result[feature] = inv
j += enc_size
if self.input_type == "array":
try:
result = result.to_cupy()
except ValueError:
warnings.warn(
"The input one hot encoding contains rows with "
"unknown categories. Since device arrays do not "
"support null values, the output will be "
"returned as a DataFrame "
"instead."
)
return result
def get_feature_names(self, input_features=None):
"""Return feature names for output features.
Parameters
----------
input_features : list of str of shape (n_features,)
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : ndarray of shape (n_output_features,)
Array of feature names.
"""
self._check_is_fitted()
cats = self.categories_
if input_features is None:
input_features = ["x%d" % i for i in range(len(cats))]
elif len(input_features) != len(self.categories_):
raise ValueError(
"input_features should have length equal to number of "
"features ({}), got {}".format(
len(self.categories_), len(input_features)
)
)
feature_names = []
for i in range(len(cats)):
names = [
input_features[i] + "_" + str(t) for t in cats[i].values_host
]
if self.drop_idx_ is not None and self.drop_idx_[i] is not None:
names.pop(self.drop_idx_[i])
feature_names.extend(names)
return np.array(feature_names, dtype=object)
def get_param_names(self):
return super().get_param_names() + [
"categories",
"drop",
"sparse",
"dtype",
"handle_unknown",
]
def _slice_feat(X, i):
if hasattr(X, "iloc"):
return X[i]
return X[:, i]
def _get_output(
output_type: Optional[str],
input_type: Optional[str],
out: DataFrame,
dtype,
):
if output_type == "input":
if input_type == "array":
output_type = "cupy"
elif input_type == "df":
output_type = "cudf"
if output_type is None:
output_type = "cupy"
if output_type == "cudf":
return out
elif output_type == "cupy":
return out.astype(dtype).to_cupy(na_value=np.nan)
elif output_type == "numpy":
return cp.asnumpy(out.to_cupy(na_value=np.nan, dtype=dtype))
elif output_type == "pandas":
return out.to_pandas()
else:
raise ValueError("Unsupported output type.")
class OrdinalEncoder(BaseEncoder):
def __init__(
self,
*,
categories="auto",
dtype=np.float64,
handle_unknown="error",
handle=None,
verbose=False,
output_type=None,
) -> None:
"""Encode categorical features as an integer array.
The input to this transformer should be an :py:class:`cudf.DataFrame` or a
:py:class:`cupy.ndarray`, denoting the unique values taken on by categorical
(discrete) features. The features are converted to ordinal integers. This
results in a single column of integers (0 to n_categories - 1) per feature.
Parameters
----------
categories : 'auto' an cupy.ndarray or a cudf.DataFrame, default='auto'
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- DataFrame/ndarray : ``categories[col]`` holds the categories expected
in the feature col.
handle_unknown : {'error', 'ignore'}, default='error'
Whether to raise an error or ignore if an unknown categorical feature is
present during transform (default is to raise). When this parameter is set
to 'ignore' and an unknown category is encountered during transform, the
resulting encoded value would be null when output type is cudf
dataframe.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for computations in
this model. Most importantly, this specifies the CUDA stream that will be
used for the model's computations, so users can run different models
concurrently in different streams by creating handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`. See
:ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
"""
super().__init__(
handle=handle, verbose=verbose, output_type=output_type
)
self.categories = categories
self.dtype = dtype
self.handle_unknown = handle_unknown
self.input_type = None
@generate_docstring(y=None)
def fit(self, X, y=None) -> "OrdinalEncoder":
"""Fit Ordinal to X."""
self._fit(X, need_drop=False)
return self
@generate_docstring(
return_values={
"name": "X_out",
"description": "Transformed input.",
"type": "Type is specified by the `output_type` parameter.",
}
)
def transform(self, X):
"""Transform X using ordinal encoding."""
self._check_n_features(X, reset=False)
result = {}
for feature in self._features:
Xi = _slice_feat(X, feature)
col_idx = self._encoders[feature].transform(Xi)
result[feature] = col_idx
r = DataFrame(result)
return _get_output(self.output_type, self.input_type, r, self.dtype)
@generate_docstring(
y=None,
return_values={
"name": "X_out",
"description": "Transformed input.",
"type": "Type is specified by the `output_type` parameter.",
},
)
def fit_transform(self, X, y=None):
"""Fit OrdinalEncoder to X, then transform X. Equivalent to fit(X).transform(X)."""
X = self._check_input(X)
return self.fit(X).transform(X)
def inverse_transform(self, X):
"""Convert the data back to the original representation.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
The transformed data.
Returns
-------
X_tr : Type is specified by the `output_type` parameter.
Inverse transformed array.
"""
self._check_n_features(X, reset=False)
result = {}
for feature in self._features:
Xi = _slice_feat(X, feature)
inv = self._encoders[feature].inverse_transform(Xi)
result[feature] = inv
r = DataFrame(result)
return _get_output(self.output_type, self.input_type, r, self.dtype)
def get_param_names(self):
return super().get_param_names() + [
"categories",
"dtype",
"handle_unknown",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/preprocessing/__init__.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.model_selection import train_test_split
from cuml.preprocessing.LabelEncoder import LabelEncoder
from cuml.preprocessing.label import LabelBinarizer, label_binarize
from cuml.preprocessing.encoders import OneHotEncoder, OrdinalEncoder
from cuml.preprocessing.TargetEncoder import TargetEncoder
from cuml.preprocessing import text
from cuml._thirdparty.sklearn.preprocessing import (
Binarizer,
FunctionTransformer,
KBinsDiscretizer,
KernelCenterer,
MaxAbsScaler,
MinMaxScaler,
MissingIndicator,
Normalizer,
PolynomialFeatures,
PowerTransformer,
QuantileTransformer,
RobustScaler,
SimpleImputer,
StandardScaler,
)
from cuml._thirdparty.sklearn.preprocessing import (
add_dummy_feature,
binarize,
maxabs_scale,
minmax_scale,
normalize,
power_transform,
quantile_transform,
robust_scale,
scale,
)
__all__ = [
# Classes
"Binarizer",
"FunctionTransformer",
"KBinsDiscretizer",
"KernelCenterer",
"LabelBinarizer",
"LabelEncoder",
"MaxAbsScaler",
"MinMaxScaler",
"MissingIndicator",
"Normalizer",
"OneHotEncoder",
"OrdinalEncoder",
"PolynomialFeatures",
"PowerTransformer",
"QuantileTransformer",
"RobustScaler",
"SimpleImputer",
"StandardScaler",
"TargetEncoder",
# Functions
"add_dummy_feature",
"binarize",
"label_binarize",
"maxabs_scale",
"minmax_scale",
"normalize",
"power_transform",
"quantile_transform",
"robust_scale",
"scale",
"train_test_split",
# Modules
"text",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml/preprocessing | rapidsai_public_repos/cuml/python/cuml/preprocessing/text/__init__.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing.text import stem
__all__ = ["stem"]
| 0 |
rapidsai_public_repos/cuml/python/cuml/preprocessing/text | rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem/porter_stemmer.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .porter_stemmer_utils.measure_utils import (
has_positive_measure,
measure_gt_n,
measure_eq_n,
)
from .porter_stemmer_utils.len_flags_utils import (
len_eq_n,
len_gt_n,
)
from .porter_stemmer_utils.consonant_vowel_utils import (
contains_vowel,
is_consonant,
)
from .porter_stemmer_utils.porter_stemmer_rules import (
ends_with_suffix,
ends_with_double_constant,
last_char_not_in,
last_char_in,
ends_cvc,
)
from .porter_stemmer_utils.suffix_utils import (
get_stem_series,
get_str_replacement_series,
replace_suffix,
)
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
# Implementation based on nltk//stem/porter.html
# https://www.nltk.org/_modules/nltk/stem/porter.html
class PorterStemmer:
"""
A word stemmer based on the Porter stemming algorithm.
Porter, M. "An algorithm for suffix stripping."
Program 14.3 (1980): 130-137.
See http://www.tartarus.org/~martin/PorterStemmer/ for the homepage
of the algorithm.
Martin Porter has endorsed several modifications to the Porter
algorithm since writing his original paper, and those extensions are
included in the implementations on his website. Additionally, others
have proposed further improvements to the algorithm, including NLTK
contributors. Only below mode is supported currently
PorterStemmer.NLTK_EXTENSIONS
- Implementation that includes further improvements devised by
NLTK contributors or taken from other modified implementations
found on the web.
Parameters
----------
mode: Modes of stemming (Only supports (NLTK_EXTENSIONS) currently)
default("NLTK_EXTENSIONS")
Examples
--------
.. code-block:: python
>>> import cudf
>>> from cuml.preprocessing.text.stem import PorterStemmer
>>> stemmer = PorterStemmer()
>>> word_str_ser = cudf.Series(['revival','singing','adjustable'])
>>> print(stemmer.stem(word_str_ser))
0 reviv
1 sing
2 adjust
dtype: object
"""
def __init__(self, mode="NLTK_EXTENSIONS"):
if mode != "NLTK_EXTENSIONS":
raise ValueError(
"Only PorterStemmer.NLTK_EXTENSIONS is supported currently"
)
self.mode = mode
def stem(self, word_str_ser):
"""
Stem Words using Porter stemmer
Parameters
----------
word_str_ser : cudf.Series
A string series of words to stem
Returns
-------
stemmed_ser : cudf.Series
Stemmed words strings series
"""
# this is only for NLTK_EXTENSIONS
# remove the length condition for original algorithm
# do not stem is len(word) <= 2:
can_replace_mask = len_gt_n(word_str_ser, 2)
word_str_ser = word_str_ser.str.lower()
word_str_ser, can_replace_mask = map_irregular_forms(
word_str_ser, can_replace_mask
)
# apply step 1
word_str_ser = self._step1a(word_str_ser, can_replace_mask)
word_str_ser = self._step1b(word_str_ser, can_replace_mask)
word_str_ser = self._step1c(word_str_ser, can_replace_mask)
# apply step 2
word_str_ser = self._step2(word_str_ser, can_replace_mask)
# apply step 3
word_str_ser = self._step3(word_str_ser, can_replace_mask)
# apply step 4
word_str_ser = self._step4(word_str_ser, can_replace_mask)
# apply step 5
word_str_ser = self._step5a(word_str_ser, can_replace_mask)
word_str_ser = self._step5b(word_str_ser, can_replace_mask)
return word_str_ser
def _step1a(self, word_str_ser, can_replace_mask=None):
"""Implements Step 1a from "An algorithm for suffix stripping"
From the paper:
SSES -> SS caresses -> caress
IES -> I ponies -> poni
ties -> ti
(### this is for original impl)
SS -> SS caress -> caress
S -> cats -> cat
"""
can_replace_mask = build_can_replace_mask(
len_mask=len(word_str_ser), mask=can_replace_mask
)
# this NLTK-only rule extends the original algorithm, so
# that 'flies'->'fli' but 'dies'->'die' etc
# ties -> tie
if self.mode == "NLTK_EXTENSIONS":
# equivalent to
# word.endswith('ies') and len(word) == 4:
suffix_mask = ends_with_suffix(word_str_ser, "ies")
len_mask = len_eq_n(word_str_ser, 4)
condition_mask = suffix_mask & len_mask
valid_mask = can_replace_mask & condition_mask
word_str_ser = replace_suffix(
word_str_ser, "ies", "ie", valid_mask
)
# update can replace mask
can_replace_mask &= ~condition_mask
return apply_rule_list(
word_str_ser,
[
("sses", "ss", None), # SSES -> SS
("ies", "i", None), # IES -> I
("ss", "ss", None), # SS -> SS
("s", "", None), # S ->
],
can_replace_mask,
)[0]
def _step1b(self, word_str_ser, can_replace_mask=None):
"""Implements Step 1b from "An algorithm for suffix stripping"
From the paper:
(m>0) EED -> EE feed -> feed
agreed -> agree
(*v*) ED -> plastered -> plaster
bled -> bled
(*v*) ING -> motoring -> motor
sing -> sing
If the second or third of the rules in Step 1b is successful,
the following is done:
AT -> ATE conflat(ed) -> conflate
BL -> BLE troubl(ed) -> trouble
IZ -> IZE siz(ed) -> size
(*d and not (*L or *S or *Z))
-> single letter
hopp(ing) -> hop
tann(ed) -> tan
fall(ing) -> fall
hiss(ing) -> hiss
fizz(ed) -> fizz
(m=1 and *o) -> E fail(ing) -> fail
fil(ing) -> file
The rule to map to a single letter causes the removal of one of
the double letter pair. The -E is put back on -AT, -BL and -IZ,
so that the suffixes -ATE, -BLE and -IZE can be recognised
later. This E may be removed in step 4.
"""
can_replace_mask = build_can_replace_mask(
len_mask=len(word_str_ser), mask=can_replace_mask
)
# this NLTK-only block extends the original algorithm, so that
# 'spied'->'spi' but 'died'->'die' etc
if self.mode == "NLTK_EXTENSIONS":
# word.endswith('ied'):
suffix_mask = ends_with_suffix(word_str_ser, "ied")
len_mask = len_eq_n(word_str_ser, 4)
condition_mask = suffix_mask & len_mask
valid_mask = can_replace_mask & condition_mask
word_str_ser = replace_suffix(
word_str_ser, "ied", "ie", valid_mask
)
# update can replace mask
can_replace_mask &= ~condition_mask
condition_mask = suffix_mask
valid_mask = can_replace_mask & condition_mask
word_str_ser = replace_suffix(word_str_ser, "ied", "i", valid_mask)
# update can replace mask
can_replace_mask &= ~condition_mask
# (m>0) EED -> EE
# if suffix ==eed we stop processing
# to be consistent with nltk
suffix_mask = ends_with_suffix(word_str_ser, "eed")
valid_mask = suffix_mask & can_replace_mask
stem = replace_suffix(word_str_ser, "eed", "", valid_mask)
measure_mask = measure_gt_n(stem, 0)
valid_mask = measure_mask & suffix_mask & can_replace_mask
# adding ee series to stem
word_str_ser = replace_suffix(word_str_ser, "eed", "ee", valid_mask)
# to be consistent with nltk we dont replace
# if word.endswith('eed') we stop proceesing
can_replace_mask &= ~suffix_mask
# rule 2
# (*v*) ED -> plastered -> plaster
# bled -> bled
ed_suffix_mask = ends_with_suffix(word_str_ser, "ed")
intermediate_stem = replace_suffix(
word_str_ser, "ed", "", ed_suffix_mask & can_replace_mask
)
vowel_mask = contains_vowel(intermediate_stem)
rule_2_mask = vowel_mask & ed_suffix_mask & can_replace_mask
# rule 3
# (*v*) ING -> motoring -> motor
# sing -> sing
ing_suffix_mask = ends_with_suffix(word_str_ser, "ing")
intermediate_stem = replace_suffix(
word_str_ser, "ing", "", ing_suffix_mask & can_replace_mask
)
vowel_mask = contains_vowel(intermediate_stem)
rule_3_mask = vowel_mask & ing_suffix_mask & can_replace_mask
rule_2_or_rule_3_mask = rule_2_mask | rule_3_mask
# replace masks only if rule_2_or_rule_3_mask
intermediate_stem_1 = replace_suffix(
word_str_ser, "ed", "", rule_2_mask
)
intermediate_stem_2 = replace_suffix(
intermediate_stem_1, "ing", "", rule_3_mask
)
can_replace_mask = can_replace_mask & rule_2_or_rule_3_mask
return apply_rule_list(
intermediate_stem_2,
[
("at", "ate", None), # AT -> ATE
("bl", "ble", None), # BL -> BLE
("iz", "ize", None), # IZ -> IZE
# (*d and not (*L or *S or *Z))
# -> single letter
(
"*d",
-1, # intermediate_stem[-1],
lambda stem: last_char_not_in(
stem, characters=["l", "s", "z"]
),
),
# (m=1 and *o) -> E
(
"",
"e",
lambda stem: measure_eq_n(stem, n=1) & ends_cvc(stem),
),
],
can_replace_mask,
)[0]
def _step1c(self, word_str_ser, can_replace_mask=None):
"""Implements Step 1c from "An algorithm for suffix stripping"
From the paper:
Step 1c
(*v*) Y -> I happy -> happi
sky -> sky
"""
can_replace_mask = build_can_replace_mask(
len_mask=len(word_str_ser), mask=can_replace_mask
)
def nltk_condition(stem):
"""
This has been modified from the original Porter algorithm so
that y->i is only done when y is preceded by a consonant,
but not if the stem is only a single consonant, i.e.
(*c and not c) Y -> I
So 'happy' -> 'happi', but
'enjoy' -> 'enjoy' etc
This is a much better rule. Formerly 'enjoy'->'enjoi' and
'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but
with this modification that no longer really matters.
Also, the removal of the contains_vowel(z) condition means
that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and
conflate with 'spied', 'tried', 'flies' ...
"""
# equivalent to
# len(stem) > 1 and self._is_consonant(stem, len(stem) - 1)
len_gt_1_mask = len_gt_n(stem, 1)
last_char_is_consonant_mask = is_consonant(stem, -1)
return len_gt_1_mask & last_char_is_consonant_mask
def original_condition(stem):
return contains_vowel(stem)
return apply_rule_list(
word_str_ser,
[
(
"y",
"i",
nltk_condition
if self.mode == "NLTK_EXTENSIONS"
else original_condition,
)
],
can_replace_mask,
)[0]
def _step2(self, word_str_ser, can_replace_mask=None):
"""Implements Step 2 from "An algorithm for suffix stripping"
From the paper:
Step 2
(m>0) ATIONAL -> ATE relational -> relate
(m>0) TIONAL -> TION conditional -> condition
rational -> rational
(m>0) ENCI -> ENCE valenci -> valence
(m>0) ANCI -> ANCE hesitanci -> hesitance
(m>0) IZER -> IZE digitizer -> digitize
(m>0) ABLI -> ABLE conformabli -> conformable
(m>0) ALLI -> AL radicalli -> radical
(m>0) ENTLI -> ENT differentli -> different
(m>0) ELI -> E vileli - > vile
(m>0) OUSLI -> OUS analogousli -> analogous
(m>0) IZATION -> IZE vietnamization -> vietnamize
(m>0) ATION -> ATE predication -> predicate
(m>0) ATOR -> ATE operator -> operate
(m>0) ALISM -> AL feudalism -> feudal
(m>0) IVENESS -> IVE decisiveness -> decisive
(m>0) FULNESS -> FUL hopefulness -> hopeful
(m>0) OUSNESS -> OUS callousness -> callous
(m>0) ALITI -> AL formaliti -> formal
(m>0) IVITI -> IVE sensitiviti -> sensitive
(m>0) BILITI -> BLE sensibiliti -> sensible
"""
can_replace_mask = build_can_replace_mask(
len_mask=len(word_str_ser), mask=can_replace_mask
)
if self.mode == "NLTK_EXTENSIONS":
# Instead of applying the ALLI -> AL rule after '(a)bli' per
# the published algorithm, instead we apply it first, and,
# if it succeeds, run the result through step2 again.
alli_suffix_flag = ends_with_suffix(word_str_ser, "alli")
stem_ser = replace_suffix(
word_str_ser, "alli", "", alli_suffix_flag & can_replace_mask
)
positive_measure_flag = has_positive_measure(stem_ser)
word_str_ser = replace_suffix(
word_str_ser,
"alli",
"al",
alli_suffix_flag & positive_measure_flag & can_replace_mask,
)
# not updating flag because nltk does not return
bli_rule = ("bli", "ble", has_positive_measure)
abli_rule = ("abli", "able", has_positive_measure)
rules = [
("ational", "ate", has_positive_measure),
("tional", "tion", has_positive_measure),
("enci", "ence", has_positive_measure),
("anci", "ance", has_positive_measure),
("izer", "ize", has_positive_measure),
abli_rule if self.mode == "ORIGINAL_ALGORITHM" else bli_rule,
("alli", "al", has_positive_measure),
("entli", "ent", has_positive_measure),
("eli", "e", has_positive_measure),
("ousli", "ous", has_positive_measure),
("ization", "ize", has_positive_measure),
("ation", "ate", has_positive_measure),
("ator", "ate", has_positive_measure),
("alism", "al", has_positive_measure),
("iveness", "ive", has_positive_measure),
("fulness", "ful", has_positive_measure),
("ousness", "ous", has_positive_measure),
("aliti", "al", has_positive_measure),
("iviti", "ive", has_positive_measure),
("biliti", "ble", has_positive_measure),
]
if self.mode == "NLTK_EXTENSIONS":
rules.append(("fulli", "ful", has_positive_measure))
word_str_ser, can_replace_mask = apply_rule_list(
word_str_ser, rules, can_replace_mask
)
# The 'l' of the 'logi' -> 'log' rule is put with the stem,
# so that short stems like 'geo' 'theo' etc work like
# 'archaeo' 'philo' etc.
logi_suffix_flag = ends_with_suffix(word_str_ser, "logi")
stem = word_str_ser.str.slice(stop=-3)
measure_flag = has_positive_measure(stem)
valid_flag = measure_flag & logi_suffix_flag & can_replace_mask
return replace_suffix(word_str_ser, "logi", "log", valid_flag)
# as below works on word rather than stem i don't
# send it to apply rules but do it here
# rules.append(
# ("logi", "log", lambda stem:
# self._has_positive_measure(word[:-3])
# ))
if self.mode == "MARTIN_EXTENSIONS":
rules.append(("logi", "log", has_positive_measure))
return apply_rule_list(word_str_ser, rules, can_replace_mask)[0]
def _step3(self, word_str_ser, can_replace_mask=None):
"""Implements Step 3 from "An algorithm for suffix stripping"
From the paper:
Step 3
(m>0) ICATE -> IC triplicate -> triplic
(m>0) ATIVE -> formative -> form
(m>0) ALIZE -> AL formalize -> formal
(m>0) ICITI -> IC electriciti -> electric
(m>0) ICAL -> IC electrical -> electric
(m>0) FUL -> hopeful -> hope
(m>0) NESS -> goodness -> good
"""
can_replace_mask = build_can_replace_mask(
len_mask=len(word_str_ser), mask=can_replace_mask
)
return apply_rule_list(
word_str_ser,
[
("icate", "ic", has_positive_measure),
("ative", "", has_positive_measure),
("alize", "al", has_positive_measure),
("iciti", "ic", has_positive_measure),
("ical", "ic", has_positive_measure),
("ful", "", has_positive_measure),
("ness", "", has_positive_measure),
],
can_replace_mask,
)[0]
def _step4(self, word_str_ser, can_replace_mask=None):
"""Implements Step 4 from "An algorithm for suffix stripping"
Step 4
(m>1) AL -> revival -> reviv
(m>1) ANCE -> allowance -> allow
(m>1) ENCE -> inference -> infer
(m>1) ER -> airliner -> airlin
(m>1) IC -> gyroscopic -> gyroscop
(m>1) ABLE -> adjustable -> adjust
(m>1) IBLE -> defensible -> defens
(m>1) ANT -> irritant -> irrit
(m>1) EMENT -> replacement -> replac
(m>1) MENT -> adjustment -> adjust
(m>1) ENT -> dependent -> depend
(m>1 and (*S or *T)) ION -> adoption -> adopt
(m>1) OU -> homologou -> homolog
(m>1) ISM -> communism -> commun
(m>1) ATE -> activate -> activ
(m>1) ITI -> angulariti -> angular
(m>1) OUS -> homologous -> homolog
(m>1) IVE -> effective -> effect
(m>1) IZE -> bowdlerize -> bowdler
The suffixes are now removed. All that remains is a little
tidying up.
"""
can_replace_mask = build_can_replace_mask(
len_mask=len(word_str_ser), mask=can_replace_mask
)
def measure_gt_1(ser):
return measure_gt_n(ser, 1)
return apply_rule_list(
word_str_ser,
[
("al", "", measure_gt_1),
("ance", "", measure_gt_1),
("ence", "", measure_gt_1),
("er", "", measure_gt_1),
("ic", "", measure_gt_1),
("able", "", measure_gt_1),
("ible", "", measure_gt_1),
("ant", "", measure_gt_1),
("ement", "", measure_gt_1),
("ment", "", measure_gt_1),
("ent", "", measure_gt_1),
# (m>1 and (*S or *T)) ION ->
(
"ion",
"",
lambda stem: measure_gt_n(stem, 1)
& last_char_in(stem, characters=["s", "t"]),
),
("ou", "", measure_gt_1),
("ism", "", measure_gt_1),
("ate", "", measure_gt_1),
("iti", "", measure_gt_1),
("ous", "", measure_gt_1),
("ive", "", measure_gt_1),
("ize", "", measure_gt_1),
],
can_replace_mask,
)[0]
def _step5a(self, word_str_ser, can_replace_mask=None):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5a
(m>1) E -> probate -> probat
rate -> rate
(m=1 and not *o) E -> cease -> ceas
"""
can_replace_mask = build_can_replace_mask(
len_mask=len(word_str_ser), mask=can_replace_mask
)
# Note that Martin's test vocabulary and reference
# implementations are inconsistent in how they handle the case
# where two rules both refer to a suffix that matches the word
# to be stemmed, but only the condition of the second one is
# true.
# Earlier in step2b we had the rules:
# (m>0) EED -> EE
# (*v*) ED ->
# but the examples in the paper included "feed"->"feed", even
# though (*v*) is true for "fe" and therefore the second rule
# alone would map "feed"->"fe".
# However, in THIS case, we need to handle the consecutive rules
# differently and try both conditions (obviously; the second
# rule here would be redundant otherwise). Martin's paper makes
# no explicit mention of the inconsistency; you have to infer it
# from the examples.
# For this reason, we can't use _apply_rule_list here.
##
# logic is equivalent to below
# if word.endswith('e'):
# stem = self._replace_suffix(word, 'e', '')
# if self._measure(stem) > 1:
# return stem rule_1
# if self._measure(stem) == 1 and not self._ends_cvc(stem):
# return stem rule_2
#
e_suffix_flag = ends_with_suffix(word_str_ser, "e")
stem = replace_suffix(
word_str_ser, "e", "", e_suffix_flag & can_replace_mask
)
measure_gt_1_flag = measure_gt_n(stem, 1)
# if self._measure(stem) > 1:
rule_1_flag = measure_gt_1_flag
# if measure==1 and not self._ends_cvc(stem):
measure_eq_1_flag = measure_eq_n(stem, 1)
does_not_ends_with_cvc_flag = ~ends_cvc(stem)
rule_2_flag = measure_eq_1_flag & does_not_ends_with_cvc_flag
overall_rule_flag = (
(rule_1_flag | rule_2_flag) & e_suffix_flag & can_replace_mask
)
return replace_suffix(word_str_ser, "e", "", overall_rule_flag)
def _step5b(self, word_str_ser, can_replace_mask=None):
"""Implements Step 5a from "An algorithm for suffix stripping"
From the paper:
Step 5b
(m > 1 and *d and *L) -> single letter
controll -> control
roll -> roll
"""
can_replace_mask = build_can_replace_mask(
len_mask=len(word_str_ser), mask=can_replace_mask
)
# word, [('ll', 'l', lambda stem: self._measure(word[:-1]) > 1)]
# because here we are applying rule on word instead of stem
# so, unlike nltk we don't use apply rules
ll_suffix_flag = ends_with_suffix(word_str_ser, "ll")
stem = word_str_ser.str.slice()
measure_gt_1_flag = measure_gt_n(stem, 1)
valid_flag = measure_gt_1_flag & ll_suffix_flag & can_replace_mask
return replace_suffix(word_str_ser, "ll", "l", valid_flag)
def map_irregular_forms(word_str_ser, can_replace_mask):
# replaces all strings and stop rules
# need to process it
irregular_forms = {
"sky": ["sky", "skies"],
"die": ["dying"],
"lie": ["lying"],
"tie": ["tying"],
"news": ["news"],
"inning": ["innings", "inning"],
"outing": ["outings", "outing"],
"canning": ["cannings", "canning"],
"howe": ["howe"],
"proceed": ["proceed"],
"exceed": ["exceed"],
"succeed": ["succeed"],
}
for replacement, form_ls in irregular_forms.items():
for form in form_ls:
equal_flag = word_str_ser == form
stem_ser = get_stem_series(
word_str_ser, len(form), can_replace_mask & equal_flag
)
replacement_ser = get_str_replacement_series(
replacement, can_replace_mask & equal_flag
)
word_str_ser = stem_ser.str.cat(replacement_ser)
can_replace_mask &= ~equal_flag
return word_str_ser, can_replace_mask
def get_condition_flag(word_str_ser, condition):
"""
condition = None or a function that returns a bool series
return a bool series where flag is valid
"""
if condition is None:
return cudf.Series(cp.ones(len(word_str_ser), bool))
else:
return condition(word_str_ser)
def apply_rule(word_str_ser, rule, w_in_c_flag):
"""Applies the first applicable suffix-removal rule to the word
Takes a word and a list of suffix-removal rules represented as
3-tuples, with the first element being the suffix to remove,
the second element being the string to replace it with, and the
final element being the condition for the rule to be applicable,
or None if the rule is unconditional.
"""
suffix, replacement, condition = rule
if suffix == "*d":
double_consonant_mask = ends_with_double_constant(word_str_ser)
# all flags needed here
# with **d in nltk we pass word_series rather than stem_series
# see below:
# lambda stem: intermediate_stem[-1] not in ('l', 's', 'z'),
# condition is on intermediate_stem
intermediate_stem = word_str_ser.str.slice(stop=-1)
condition_mask = get_condition_flag(intermediate_stem, condition)
# mask where replacement will happen
valid_mask = double_consonant_mask & condition_mask & w_in_c_flag
# new series with updated valid_mask
word_str_ser = replace_suffix(
word_str_ser, suffix, replacement, valid_mask
)
w_in_c_flag &= ~double_consonant_mask
else:
suffix_mask = ends_with_suffix(word_str_ser, suffix)
valid_mask = suffix_mask & w_in_c_flag
stem_ser = replace_suffix(word_str_ser, suffix, "", valid_mask)
condition_mask = get_condition_flag(stem_ser, condition)
# mask where replacement will happen
valid_mask = condition_mask & suffix_mask & w_in_c_flag
word_str_ser = replace_suffix(
word_str_ser, suffix, replacement, valid_mask
)
# we wont apply further rules if it has a matching suffix
w_in_c_flag &= ~suffix_mask
return word_str_ser, w_in_c_flag
def apply_rule_list(word_str_ser, rules, condition_flag):
"""Applies the first applicable suffix-removal rule to the word
Takes a word series and a list of suffix-removal rules represented as
3-tuples, with the first element being the suffix to remove,
the second element being the string to replace it with, and the
final element being the condition for the rule to be applicable,
or None if the rule is unconditional.
"""
for rule in rules:
word_str_ser, condition_flag = apply_rule(
word_str_ser, rule, condition_flag
)
return word_str_ser, condition_flag
def build_can_replace_mask(len_mask, mask):
"""
Creates a cudf series representing can_replace_mask of length=len_mask
if mask is None else returns mask
"""
if mask is None:
mask = cudf.Series(cp.ones(len_mask, dtype=bool))
return mask
| 0 |
rapidsai_public_repos/cuml/python/cuml/preprocessing/text | rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem/__init__.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing.text.stem.porter_stemmer import PorterStemmer
import cuml.preprocessing.text.stem.porter_stemmer_utils
__all__ = ["PorterStemmer"]
| 0 |
rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem | rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem/porter_stemmer_utils/porter_stemmer_rules.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .consonant_vowel_utils import is_vowel, is_consonant
from .len_flags_utils import len_gt_n, len_eq_n
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
def ends_with_double_constant(string_ser):
len_flag = len_gt_n(string_ser, 1)
last_2_chars_equal = string_ser.str.get(-1) == string_ser.str.get(-2)
is_last_consonant_bool_mask = is_consonant(string_ser, -1)
return is_last_consonant_bool_mask & last_2_chars_equal & len_flag
def last_char_in(string_ser, characters):
last_char_strs = string_ser.str.get(-1)
last_char_ser = cudf.Series(last_char_strs)
last_char_flag = None
for char in characters:
if last_char_flag is not None:
last_char_flag = last_char_flag | (last_char_ser == char)
else:
last_char_flag = last_char_ser == char
return last_char_flag
def last_char_not_in(string_ser, characters):
last_char_strs = string_ser.str.get(-1)
last_char_ser = cudf.Series(last_char_strs)
last_char_flag = None
for char in characters:
if last_char_flag is not None:
last_char_flag = last_char_flag & (last_char_ser != char)
else:
last_char_flag = last_char_ser != char
return last_char_flag
def ends_cvc(string_ser, mode="NLTK_EXTENSIONS"):
"""Implements condition *o from the paper
From the paper:
*o - the stem ends cvc, where the second c is not W, X or Y
(e.g. -WIL, -HOP).
"""
if mode == "NLTK_EXTENSIONS":
# rule_1
# len(word) >= 3
# and self._is_consonant(word, len(word) - 3)
# and not self._is_consonant(word, len(word) - 2)
# and self._is_consonant(word, len(word) - 1)
# and word[-1] not in ("w", "x", "y")
len_flag = len_gt_n(string_ser, 2)
first_consonant = is_consonant(string_ser, -3)
middle_vowel = is_vowel(string_ser, -2)
last_consonant = is_consonant(string_ser, -1)
last_char_strs = string_ser.str.get(-1)
# converting to series to all strings
last_char_ser = cudf.Series(last_char_strs)
last_char_flag = None
for char in ["w", "x", "y"]:
if last_char_flag is not None:
last_char_flag = last_char_flag & (last_char_ser != char)
else:
last_char_flag = last_char_ser != char
rule_1 = (
len_flag
& first_consonant
& middle_vowel
& last_consonant
& last_char_flag
)
# rule_2
# self.mode == self.NLTK_EXTENSIONS
# and len(word) == 2
# and not self._is_consonant(word, 0)
# and self._is_consonant(word, 1)
len_flag = len_eq_n(string_ser, 2)
first_char = ~is_consonant(string_ser, 0)
second_char = is_consonant(string_ser, 1)
rule_2 = len_flag & first_char & second_char
return rule_1 | rule_2
else:
assert NotImplementedError
def ends_with_suffix(str_ser, suffix):
return str_ser.str.endswith(suffix)
| 0 |
rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem | rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem/porter_stemmer_utils/len_flags_utils.py | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def len_gt_n(word_str_ser, n):
return word_str_ser.str.len() > n
def len_eq_n(word_str_ser, n):
return word_str_ser.str.len() == n
| 0 |
rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem | rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem/porter_stemmer_utils/measure_utils.py | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def has_positive_measure(word_ser):
measure_ser = word_ser.str.porter_stemmer_measure()
return measure_ser > 0
def measure_gt_n(word_ser, n):
measure_ser = word_ser.str.porter_stemmer_measure()
return measure_ser > n
def measure_eq_n(word_ser, n):
measure_ser = word_ser.str.porter_stemmer_measure()
return measure_ser == n
| 0 |
rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem | rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem/porter_stemmer_utils/suffix_utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from("numba", "cuda")
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
def get_str_replacement_series(replacement, bool_mask):
"""
Get replacement series with replacement at
Places marked by bool mask and empty other wise
"""
word_ser = cudf.Series(cudf.core.column.full(len(bool_mask), ""))
word_ser.iloc[bool_mask] = replacement
return word_ser
def get_index_replacement_series(word_str_ser, replacment_index, bool_mask):
"""
Get replacement series with nulls at places marked by bool mask
"""
valid_indexes = ~bool_mask
word_str_ser = word_str_ser.str.get(replacment_index)
word_str_ser = cudf.Series(word_str_ser)
word_str_ser.iloc[valid_indexes] = ""
return word_str_ser
def replace_suffix(word_str_ser, suffix, replacement, can_replace_mask):
"""
replaces string column with valid suffix with replacement
"""
len_suffix = len(suffix)
if replacement == "":
stem_ser = get_stem_series(word_str_ser, len_suffix, can_replace_mask)
return stem_ser
else:
stem_ser = get_stem_series(word_str_ser, len_suffix, can_replace_mask)
if isinstance(replacement, str):
replacement_ser = get_str_replacement_series(
replacement, can_replace_mask
)
if isinstance(replacement, int):
replacement_ser = get_index_replacement_series(
word_str_ser, replacement, can_replace_mask
)
else:
assert ValueError(
"replacement: {} value should be a string or a int".format(
replacement
)
)
return stem_ser + replacement_ser
@cuda.jit()
def subtract_valid(input_array, valid_bool_array, sub_val):
pos = cuda.grid(1)
if pos < input_array.size:
if valid_bool_array[pos]:
input_array[pos] = input_array[pos] - sub_val
@cudf.core.buffer.acquire_spill_lock()
def get_stem_series(word_str_ser, suffix_len, can_replace_mask):
"""
word_str_ser: input string column
suffix_len: length of suffix to replace
can_repalce_mask: bool array marking strings where to replace
"""
NTHRD = 1024
NBLCK = int(np.ceil(float(len(word_str_ser)) / float(NTHRD)))
start_series = cudf.Series(cp.zeros(len(word_str_ser), dtype=cp.int32))
end_ser = word_str_ser.str.len()
end_ar = end_ser._column.data_array_view(mode="read")
can_replace_mask_ar = can_replace_mask._column.data_array_view(mode="read")
subtract_valid[NBLCK, NTHRD](end_ar, can_replace_mask_ar, suffix_len)
return word_str_ser.str.slice_from(
starts=start_series, stops=end_ser.fillna(0)
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem | rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem/porter_stemmer_utils/consonant_vowel_utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def is_consonant(str_ser, i):
"""Returns True if word[i] is a consonant, False otherwise
A consonant is defined in the paper as follows:
A consonant in a word is a letter other than A, E, I, O or
U, and other than Y preceded by a consonant. (The fact that
the term `consonant' is defined to some extent in terms of
itself does not make it ambiguous.) So in TOY the consonants
are T and Y, and in SYZYGY they are S, Z and G. If a letter
is not a consonant it is a vowel.
"""
return str_ser.str.is_consonant(i)
def is_vowel(str_ser, i):
"""Returns True if word[i] is a vowel, False otherwise
see: is_consonant for more description
"""
return str_ser.str.is_vowel(i)
def contains_vowel(stem_ser):
"""
Returns True if stem contains a vowel, else False
"""
len_ser = stem_ser.str.len()
max_len = len_ser.max()
contains_vowel_flag = None
for i in range(0, max_len):
if contains_vowel_flag is None:
contains_vowel_flag = is_vowel(stem_ser, i)
else:
contains_vowel_flag = contains_vowel_flag | is_vowel(stem_ser, i)
return contains_vowel_flag
| 0 |
rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem | rapidsai_public_repos/cuml/python/cuml/preprocessing/text/stem/porter_stemmer_utils/__init__.py | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/decomposition/pca_mg.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from libcpp cimport bool
from libc.stdint cimport uintptr_t
from cython.operator cimport dereference as deref
import cuml.internals
from pylibraft.common.handle cimport handle_t
from cuml.common.opg_data_utils_mg cimport *
from cuml.decomposition import PCA
from cuml.decomposition.base_mg import BaseDecompositionMG, MGSolver
from cuml.decomposition.utils cimport *
from cuml.decomposition.utils_mg cimport *
cdef extern from "cuml/decomposition/pca_mg.hpp" namespace "ML::PCA::opg":
cdef void fit(handle_t& handle,
vector[floatData_t *] input_data,
PartDescriptor &input_desc,
float *components,
float *explained_var,
float *explained_var_ratio,
float *singular_vals,
float *mu,
float *noise_vars,
paramsPCAMG &prms,
bool verbose) except +
cdef void fit(handle_t& handle,
vector[doubleData_t *] input_data,
PartDescriptor &input_desc,
double *components,
double *explained_var,
double *explained_var_ratio,
double *singular_vals,
double *mu,
double *noise_vars,
paramsPCAMG &prms,
bool verbose) except +
class PCAMG(BaseDecompositionMG, PCA):
def __init__(self, **kwargs):
super(PCAMG, self).__init__(**kwargs)
def _get_algorithm_c_name(self, algorithm):
algo_map = {
'full': MGSolver.COV_EIG_DQ,
'auto': MGSolver.COV_EIG_JACOBI,
'jacobi': MGSolver.COV_EIG_JACOBI,
# 'arpack': NOT_SUPPORTED,
# 'randomized': NOT_SUPPORTED,
}
if algorithm not in algo_map:
msg = "algorithm {!r} is not supported"
raise TypeError(msg.format(algorithm))
return algo_map[algorithm]
def _build_params(self, n_rows, n_cols):
cdef paramsPCAMG *params = new paramsPCAMG()
params.n_components = self.n_components_
params.n_rows = n_rows
params.n_cols = n_cols
params.whiten = self.whiten
params.tol = self.tol
params.algorithm = <mg_solver> (<underlying_type_t_solver> (
self.c_algorithm))
self.n_features_ = n_cols
return <size_t>params
@cuml.internals.api_base_return_any_skipall
def _call_fit(self, X, rank, part_desc, arg_params):
cdef uintptr_t comp_ptr = self.components_.ptr
cdef uintptr_t explained_var_ptr = self.explained_variance_.ptr
cdef uintptr_t explained_var_ratio_ptr = \
self.explained_variance_ratio_.ptr
cdef uintptr_t singular_vals_ptr = self.singular_values_.ptr
cdef uintptr_t mean_ptr = self.mean_.ptr
cdef uintptr_t noise_vars_ptr = self.noise_variance_.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef paramsPCAMG *params = <paramsPCAMG*><size_t>arg_params
if self.dtype == np.float32:
fit(handle_[0],
deref(<vector[floatData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>part_desc),
<float*> comp_ptr,
<float*> explained_var_ptr,
<float*> explained_var_ratio_ptr,
<float*> singular_vals_ptr,
<float*> mean_ptr,
<float*> noise_vars_ptr,
deref(params),
False)
else:
fit(handle_[0],
deref(<vector[doubleData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>part_desc),
<double*> comp_ptr,
<double*> explained_var_ptr,
<double*> explained_var_ratio_ptr,
<double*> singular_vals_ptr,
<double*> mean_ptr,
<double*> noise_vars_ptr,
deref(params),
False)
self.handle.sync()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/decomposition/pca.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
cupyx = gpu_only_import('cupyx')
scipy = cpu_only_import('scipy')
rmm = gpu_only_import('rmm')
from libc.stdint cimport uintptr_t
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.internals.base import UniversalBase
from cuml.common.doc_utils import generate_docstring
import cuml.internals.logger as logger
from cuml.internals.input_utils import input_to_cuml_array
from cuml.internals.input_utils import input_to_cupy_array
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.common import using_output_type
from cuml.prims.stats import cov
from cuml.internals.input_utils import sparse_scipy_to_cp
from cuml.common.exceptions import NotFittedError
from cuml.internals.mixins import FMajorInputTagMixin
from cuml.internals.mixins import SparseInputTagMixin
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
IF GPUBUILD == 1:
from enum import IntEnum
from cython.operator cimport dereference as deref
from cuml.decomposition.utils cimport *
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/decomposition/pca.hpp" namespace "ML":
cdef void pcaFit(handle_t& handle,
float *input,
float *components,
float *explained_var,
float *explained_var_ratio,
float *singular_vals,
float *mu,
float *noise_vars,
const paramsPCA &prms) except +
cdef void pcaFit(handle_t& handle,
double *input,
double *components,
double *explained_var,
double *explained_var_ratio,
double *singular_vals,
double *mu,
double *noise_vars,
const paramsPCA &prms) except +
cdef void pcaInverseTransform(handle_t& handle,
float *trans_input,
float *components,
float *singular_vals,
float *mu,
float *input,
const paramsPCA &prms) except +
cdef void pcaInverseTransform(handle_t& handle,
double *trans_input,
double *components,
double *singular_vals,
double *mu,
double *input,
const paramsPCA &prms) except +
cdef void pcaTransform(handle_t& handle,
float *input,
float *components,
float *trans_input,
float *singular_vals,
float *mu,
const paramsPCA &prms) except +
cdef void pcaTransform(handle_t& handle,
double *input,
double *components,
double *trans_input,
double *singular_vals,
double *mu,
const paramsPCA &prms) except +
class Solver(IntEnum):
COV_EIG_DQ = <underlying_type_t_solver> solver.COV_EIG_DQ
COV_EIG_JACOBI = <underlying_type_t_solver> solver.COV_EIG_JACOBI
class PCA(UniversalBase,
FMajorInputTagMixin,
SparseInputTagMixin):
"""
PCA (Principal Component Analysis) is a fundamental dimensionality
reduction technique used to combine features in X in linear combinations
such that each new component captures the most information or variance of
the data. N_components is usually small, say at 3, where it can be used for
data visualization, data compression and exploratory analysis.
cuML's PCA expects an array-like object or cuDF DataFrame, and provides 2
algorithms Full and Jacobi. Full (default) uses a full eigendecomposition
then selects the top K eigenvectors. The Jacobi algorithm is much faster
as it iteratively tries to correct the top K eigenvectors, but might be
less accurate.
Examples
--------
.. code-block:: python
>>> # Both import methods supported
>>> from cuml import PCA
>>> from cuml.decomposition import PCA
>>> import cudf
>>> import cupy as cp
>>> gdf_float = cudf.DataFrame()
>>> gdf_float['0'] = cp.asarray([1.0,2.0,5.0], dtype = cp.float32)
>>> gdf_float['1'] = cp.asarray([4.0,2.0,1.0], dtype = cp.float32)
>>> gdf_float['2'] = cp.asarray([4.0,2.0,1.0], dtype = cp.float32)
>>> pca_float = PCA(n_components = 2)
>>> pca_float.fit(gdf_float)
PCA()
>>> print(f'components: {pca_float.components_}') # doctest: +SKIP
components: 0 1 2
0 0.69225764 -0.5102837 -0.51028395
1 -0.72165036 -0.48949987 -0.4895003
>>> print(f'explained variance: {pca_float.explained_variance_}')
explained variance: 0 8.510...
1 0.489...
dtype: float32
>>> exp_var = pca_float.explained_variance_ratio_
>>> print(f'explained variance ratio: {exp_var}')
explained variance ratio: 0 0.9456...
1 0.054...
dtype: float32
>>> print(f'singular values: {pca_float.singular_values_}')
singular values: 0 4.125...
1 0.989...
dtype: float32
>>> print(f'mean: {pca_float.mean_}')
mean: 0 2.666...
1 2.333...
2 2.333...
dtype: float32
>>> print(f'noise variance: {pca_float.noise_variance_}')
noise variance: 0 0.0
dtype: float32
>>> trans_gdf_float = pca_float.transform(gdf_float)
>>> print(f'Inverse: {trans_gdf_float}') # doctest: +SKIP
Inverse: 0 1
0 -2.8547091 -0.42891636
1 -0.121316016 0.80743366
2 2.9760244 -0.37851727
>>> input_gdf_float = pca_float.inverse_transform(trans_gdf_float)
>>> print(f'Input: {input_gdf_float}') # doctest: +SKIP
Input: 0 1 2
0 1.0 4.0 4.0
1 2.0 2.0 2.0
2 5.0 1.0 1.0
Parameters
----------
copy : boolean (default = True)
If True, then copies data then removes mean from data. False might
cause data to be overwritten with its mean centered version.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
iterated_power : int (default = 15)
Used in Jacobi solver. The more iterations, the more accurate, but
slower.
n_components : int (default = None)
The number of top K singular vectors / values you want.
Must be <= number(columns). If n_components is not set, then all
components are kept:
``n_components = min(n_samples, n_features)``
random_state : int / None (default = None)
If you want results to be the same when you restart Python, select a
state.
svd_solver : 'full' or 'jacobi' or 'auto' (default = 'full')
Full uses a eigendecomposition of the covariance matrix then discards
components.
Jacobi is much faster as it iteratively corrects, but is less accurate.
tol : float (default = 1e-7)
Used if algorithm = "jacobi". Smaller tolerance can increase accuracy,
but but will slow down the algorithm's convergence.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
whiten : boolean (default = False)
If True, de-correlates the components. This is done by dividing them by
the corresponding singular values then multiplying by sqrt(n_samples).
Whitening allows each component to have unit variance and removes
multi-collinearity. It might be beneficial for downstream
tasks like LinearRegression where correlated features cause problems.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
components_ : array
The top K components (VT.T[:,:n_components]) in U, S, VT = svd(X)
explained_variance_ : array
How much each component explains the variance in the data given by S**2
explained_variance_ratio_ : array
How much in % the variance is explained given by S**2/sum(S**2)
singular_values_ : array
The top K singular values. Remember all singular values >= 0
mean_ : array
The column wise mean of X. Used to mean - center the data first.
noise_variance_ : float
From Bishop 1999's Textbook. Used in later tasks like calculating the
estimated covariance of X.
Notes
-----
PCA considers linear combinations of features, specifically those that
maximize global variance structure. This means PCA is fantastic for global
structure analyses, but weak for local relationships. Consider UMAP or
T-SNE for a locally important embedding.
**Applications of PCA**
PCA is used extensively in practice for data visualization and data
compression. It has been used to visualize extremely large word
embeddings like Word2Vec and GloVe in 2 or 3 dimensions, large
datasets of everyday objects and images, and used to distinguish
between cancerous cells from healthy cells.
For additional docs, see `scikitlearn's PCA
<http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_.
"""
_cpu_estimator_import_path = 'sklearn.decomposition.PCA'
components_ = CumlArrayDescriptor(order='F')
explained_variance_ = CumlArrayDescriptor(order='F')
explained_variance_ratio_ = CumlArrayDescriptor(order='F')
singular_values_ = CumlArrayDescriptor(order='F')
mean_ = CumlArrayDescriptor(order='F')
noise_variance_ = CumlArrayDescriptor(order='F')
trans_input_ = CumlArrayDescriptor(order='F')
@device_interop_preparation
def __init__(self, *, copy=True, handle=None, iterated_power=15,
n_components=None, random_state=None, svd_solver='auto',
tol=1e-7, verbose=False, whiten=False,
output_type=None):
# parameters
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.copy = copy
self.iterated_power = iterated_power
self.n_components = n_components
self.random_state = random_state
self.svd_solver = svd_solver
self.tol = tol
self.whiten = whiten
self.c_algorithm = self._get_algorithm_c_name(self.svd_solver)
# internal array attributes
self.components_ = None
self.trans_input_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.singular_values_ = None
self.mean_ = None
self.noise_variance_ = None
# This variable controls whether a sparse model was fit
# This can be removed once there is more inter-operability
# between cuml.array and cupy.ndarray
self._sparse_model = None
def _get_algorithm_c_name(self, algorithm):
IF GPUBUILD == 1:
algo_map = {
'full': Solver.COV_EIG_DQ,
'auto': Solver.COV_EIG_DQ,
# 'arpack': NOT_SUPPORTED,
# 'randomized': NOT_SUPPORTED,
'jacobi': Solver.COV_EIG_JACOBI
}
if algorithm not in algo_map:
msg = "algorithm {!r} is not supported"
raise TypeError(msg.format(algorithm))
return algo_map[algorithm]
def _build_params(self, n_rows, n_cols):
IF GPUBUILD == 1:
cdef paramsPCA *params = new paramsPCA()
params.n_components = self.n_components_
params.n_rows = n_rows
params.n_cols = n_cols
params.whiten = self.whiten
params.n_iterations = self.iterated_power
params.tol = self.tol
params.algorithm = <solver> (<underlying_type_t_solver> (
self.c_algorithm))
return <size_t>params
def _initialize_arrays(self, n_components, n_rows, n_cols):
self.components_ = CumlArray.zeros((n_components, n_cols),
dtype=self.dtype)
self.explained_variance_ = CumlArray.zeros(n_components,
dtype=self.dtype)
self.explained_variance_ratio_ = CumlArray.zeros(n_components,
dtype=self.dtype)
self.mean_ = CumlArray.zeros(n_cols, dtype=self.dtype)
self.singular_values_ = CumlArray.zeros(n_components,
dtype=self.dtype)
self.noise_variance_ = CumlArray.zeros(1, dtype=self.dtype)
def _sparse_fit(self, X):
self._sparse_model = True
self.n_samples_ = X.shape[0]
self.n_features_in_ = X.shape[1] if X.ndim == 2 else 1
self.dtype = X.dtype
# NOTE: All intermediate calculations are done using cupy.ndarray and
# then converted to CumlArray at the end to minimize conversions
# between types
covariance, self.mean_, _ = cov(X, X, return_mean=True)
self.explained_variance_, self.components_ = \
cp.linalg.eigh(covariance, UPLO='U')
# NOTE: We reverse the eigen vector and eigen values here
# because cupy provides them in ascending order. Make a copy otherwise
# it is not C_CONTIGUOUS anymore and would error when converting to
# CumlArray
self.explained_variance_ = self.explained_variance_[::-1]
self.components_ = cp.flip(self.components_, axis=1)
self.components_ = self.components_.T[:self.n_components_, :]
self.explained_variance_ratio_ = self.explained_variance_ / cp.sum(
self.explained_variance_)
if self.n_components_ < min(self.n_samples_, self.n_features_in_):
self.noise_variance_ = \
self.explained_variance_[self.n_components_:].mean()
else:
self.noise_variance_ = cp.array([0.0])
self.explained_variance_ = \
self.explained_variance_[:self.n_components_]
self.explained_variance_ratio_ = \
self.explained_variance_ratio_[:self.n_components_]
# Truncating negative explained variance values to 0
self.singular_values_ = \
cp.where(self.explained_variance_ < 0, 0,
self.explained_variance_)
self.singular_values_ = \
cp.sqrt(self.singular_values_ * (self.n_samples_ - 1))
return self
@generate_docstring(X='dense_sparse')
@enable_device_interop
def fit(self, X, y=None) -> "PCA":
"""
Fit the model with X. y is currently ignored.
"""
if self.n_components is None:
logger.warn(
'Warning(`fit`): As of v0.16, PCA invoked without an'
' n_components argument defaults to using'
' min(n_samples, n_features) rather than 1'
)
n_rows = X.shape[0]
n_cols = X.shape[1]
self.n_components_ = min(n_rows, n_cols)
else:
self.n_components_ = self.n_components
if cupyx.scipy.sparse.issparse(X):
return self._sparse_fit(X)
elif scipy.sparse.issparse(X):
X = sparse_scipy_to_cp(X, dtype=None)
return self._sparse_fit(X)
X_m, self.n_samples_, self.n_features_in_, self.dtype = \
input_to_cuml_array(X, check_dtype=[np.float32, np.float64])
cdef uintptr_t _input_ptr = X_m.ptr
self.feature_names_in_ = X_m.index
IF GPUBUILD == 1:
cdef paramsPCA *params = <paramsPCA*><size_t> \
self._build_params(self.n_samples_, self.n_features_in_)
if params.n_components > self.n_features_in_:
raise ValueError('Number of components should not be greater than'
'the number of columns in the data')
# Calling _initialize_arrays, guarantees everything is CumlArray
self._initialize_arrays(params.n_components,
params.n_rows, params.n_cols)
cdef uintptr_t comp_ptr = self.components_.ptr
cdef uintptr_t explained_var_ptr = \
self.explained_variance_.ptr
cdef uintptr_t explained_var_ratio_ptr = \
self.explained_variance_ratio_.ptr
cdef uintptr_t singular_vals_ptr = \
self.singular_values_.ptr
cdef uintptr_t _mean_ptr = self.mean_.ptr
cdef uintptr_t noise_vars_ptr = \
self.noise_variance_.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self.dtype == np.float32:
pcaFit(handle_[0],
<float*> _input_ptr,
<float*> comp_ptr,
<float*> explained_var_ptr,
<float*> explained_var_ratio_ptr,
<float*> singular_vals_ptr,
<float*> _mean_ptr,
<float*> noise_vars_ptr,
deref(params))
else:
pcaFit(handle_[0],
<double*> _input_ptr,
<double*> comp_ptr,
<double*> explained_var_ptr,
<double*> explained_var_ratio_ptr,
<double*> singular_vals_ptr,
<double*> _mean_ptr,
<double*> noise_vars_ptr,
deref(params))
# make sure the previously scheduled gpu tasks are complete before the
# following transfers start
self.handle.sync()
return self
@generate_docstring(X='dense_sparse',
return_values={'name': 'trans',
'type': 'dense_sparse',
'description': 'Transformed values',
'shape': '(n_samples, n_components)'})
@cuml.internals.api_base_return_array_skipall
@enable_device_interop
def fit_transform(self, X, y=None) -> CumlArray:
"""
Fit the model with X and apply the dimensionality reduction on X.
"""
return self.fit(X).transform(X)
@cuml.internals.api_base_return_array_skipall
def _sparse_inverse_transform(self, X, return_sparse=False,
sparse_tol=1e-10) -> CumlArray:
# NOTE: All intermediate calculations are done using cupy.ndarray and
# then converted to CumlArray at the end to minimize conversions
# between types
if self.whiten:
cp.multiply(self.components_,
(1 / cp.sqrt(self.n_samples_ - 1)),
out=self.components_)
cp.multiply(self.components_,
self.singular_values_.reshape((-1, 1)),
out=self.components_)
X_inv = cp.dot(X, self.components_)
cp.add(X_inv, self.mean_, out=X_inv)
if self.whiten:
self.components_ /= self.singular_values_.reshape((-1, 1))
self.components_ *= cp.sqrt(self.n_samples_ - 1)
if return_sparse:
X_inv = cp.where(X_inv < sparse_tol, 0, X_inv)
X_inv = cupyx.scipy.sparse.csr_matrix(X_inv)
return X_inv
return X_inv
@generate_docstring(X='dense_sparse',
return_values={'name': 'X_inv',
'type': 'dense_sparse',
'description': 'Transformed values',
'shape': '(n_samples, n_features)'})
@enable_device_interop
def inverse_transform(self, X, convert_dtype=False,
return_sparse=False, sparse_tol=1e-10) -> CumlArray:
"""
Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
"""
self._check_is_fitted('components_')
dtype = self.components_.dtype
if cupyx.scipy.sparse.issparse(X):
return self._sparse_inverse_transform(X,
return_sparse=return_sparse,
sparse_tol=sparse_tol)
elif scipy.sparse.issparse(X):
X = sparse_scipy_to_cp(X, dtype=None)
return self._sparse_inverse_transform(X,
return_sparse=return_sparse,
sparse_tol=sparse_tol)
elif self._sparse_model:
X, _, _, _ = \
input_to_cupy_array(X, order='K',
check_dtype=[cp.float32, cp.float64])
return self._sparse_inverse_transform(X,
return_sparse=return_sparse,
sparse_tol=sparse_tol)
X_m, _n_rows, _, dtype = \
input_to_cuml_array(X, check_dtype=dtype,
convert_to_dtype=(dtype if convert_dtype
else None)
)
cdef uintptr_t _trans_input_ptr = X_m.ptr
IF GPUBUILD == 1:
# todo: check n_cols and dtype
cdef paramsPCA params
params.n_components = self.n_components_
params.n_rows = _n_rows
params.n_cols = self.n_features_in_
params.whiten = self.whiten
input_data = CumlArray.zeros((params.n_rows, params.n_cols),
dtype=dtype.type)
cdef uintptr_t input_ptr = input_data.ptr
cdef uintptr_t components_ptr = self.components_.ptr
cdef uintptr_t singular_vals_ptr = self.singular_values_.ptr
cdef uintptr_t _mean_ptr = self.mean_.ptr
cdef handle_t* h_ = <handle_t*><size_t>self.handle.getHandle()
if dtype.type == np.float32:
pcaInverseTransform(h_[0],
<float*> _trans_input_ptr,
<float*> components_ptr,
<float*> singular_vals_ptr,
<float*> _mean_ptr,
<float*> input_ptr,
params)
else:
pcaInverseTransform(h_[0],
<double*> _trans_input_ptr,
<double*> components_ptr,
<double*> singular_vals_ptr,
<double*> _mean_ptr,
<double*> input_ptr,
params)
# make sure the previously scheduled gpu tasks are complete before the
# following transfers start
self.handle.sync()
return input_data
@cuml.internals.api_base_return_array_skipall
def _sparse_transform(self, X) -> CumlArray:
# NOTE: All intermediate calculations are done using cupy.ndarray and
# then converted to CumlArray at the end to minimize conversions
# between types
with using_output_type("cupy"):
if self.whiten:
self.components_ *= cp.sqrt(self.n_samples_ - 1)
self.components_ /= self.singular_values_.reshape((-1, 1))
X = X - self.mean_
X_transformed = X.dot(self.components_.T)
if self.whiten:
self.components_ *= self.singular_values_.reshape((-1, 1))
self.components_ *= (1 / cp.sqrt(self.n_samples_ - 1))
return X_transformed
@generate_docstring(X='dense_sparse',
return_values={'name': 'trans',
'type': 'dense_sparse',
'description': 'Transformed values',
'shape': '(n_samples, n_components)'})
@enable_device_interop
def transform(self, X, convert_dtype=False) -> CumlArray:
"""
Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
"""
self._check_is_fitted('components_')
dtype = self.components_.dtype
if cupyx.scipy.sparse.issparse(X):
return self._sparse_transform(X)
elif scipy.sparse.issparse(X):
X = sparse_scipy_to_cp(X, dtype=None)
return self._sparse_transform(X)
elif self._sparse_model:
X, _, _, _ = \
input_to_cupy_array(X, order='K',
check_dtype=[cp.float32, cp.float64])
return self._sparse_transform(X)
X_m, _n_rows, _n_cols, dtype = \
input_to_cuml_array(X, check_dtype=dtype,
convert_to_dtype=(dtype if convert_dtype
else None),
check_cols=self.n_features_in_)
cdef uintptr_t _input_ptr = X_m.ptr
IF GPUBUILD == 1:
# todo: check dtype
cdef paramsPCA params
params.n_components = self.n_components_
params.n_rows = _n_rows
params.n_cols = _n_cols
params.whiten = self.whiten
t_input_data = \
CumlArray.zeros((params.n_rows, params.n_components),
dtype=dtype.type, index=X_m.index)
cdef uintptr_t _trans_input_ptr = t_input_data.ptr
cdef uintptr_t components_ptr = self.components_.ptr
cdef uintptr_t singular_vals_ptr = \
self.singular_values_.ptr
cdef uintptr_t _mean_ptr = self.mean_.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if dtype.type == np.float32:
pcaTransform(handle_[0],
<float*> _input_ptr,
<float*> components_ptr,
<float*> _trans_input_ptr,
<float*> singular_vals_ptr,
<float*> _mean_ptr,
params)
else:
pcaTransform(handle_[0],
<double*> _input_ptr,
<double*> components_ptr,
<double*> _trans_input_ptr,
<double*> singular_vals_ptr,
<double*> _mean_ptr,
params)
# make sure the previously scheduled gpu tasks are complete before the
# following transfers start
self.handle.sync()
return t_input_data
def get_param_names(self):
return super().get_param_names() + \
["copy", "iterated_power", "n_components", "svd_solver", "tol",
"whiten", "random_state"]
def _check_is_fitted(self, attr):
if not hasattr(self, attr) or (getattr(self, attr) is None):
msg = ("This instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator.")
raise NotFittedError(msg)
def get_attr_names(self):
return ['components_', 'explained_variance_',
'explained_variance_ratio_', 'singular_values_',
'mean_', 'n_components_', 'noise_variance_',
'n_samples_', 'n_features_in_', 'feature_names_in_']
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/decomposition/utils.pxd | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from libcpp cimport bool
ctypedef int underlying_type_t_solver
cdef extern from "cuml/decomposition/params.hpp" namespace "ML" nogil:
ctypedef enum solver "ML::solver":
COV_EIG_DQ "ML::solver::COV_EIG_DQ"
COV_EIG_JACOBI "ML::solver::COV_EIG_JACOBI"
cdef cppclass params:
size_t n_rows
size_t n_cols
int gpu_id
cdef cppclass paramsSolver(params):
float tol
unsigned n_iterations
int verbose
cdef cppclass paramsTSVD(paramsSolver):
size_t n_components
solver algorithm # = solver::COV_EIG_DQ
cdef cppclass paramsPCA(paramsTSVD):
bool copy
bool whiten
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/decomposition/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("pca.pyx" ${pca_algo} ${decomposition_algo})
add_module_gpu_default("tsvd.pyx" ${tsvd_algo} ${decomposition_algo})
if(NOT SINGLEGPU)
list(APPEND cython_sources
base_mg.pyx
pca_mg.pyx
tsvd_mg.pyx
)
endif()
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_mg_libraries}"
MODULE_PREFIX decomposition_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/decomposition/tsvd_mg.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from libcpp cimport bool
from libc.stdint cimport uintptr_t
from cython.operator cimport dereference as deref
from pylibraft.common.handle cimport handle_t
import cuml.internals
from cuml.common.opg_data_utils_mg cimport *
from cuml.decomposition.utils cimport *
from cuml.decomposition.utils_mg cimport *
from cuml.decomposition import TruncatedSVD
from cuml.decomposition.base_mg import BaseDecompositionMG
cdef extern from "cuml/decomposition/tsvd_mg.hpp" namespace "ML::TSVD::opg":
cdef void fit_transform(handle_t& handle,
vector[floatData_t *] input_data,
PartDescriptor &input_desc,
vector[floatData_t *] trans_data,
PartDescriptor &trans_desc,
float *components,
float *explained_var,
float *explained_var_ratio,
float *singular_vals,
paramsTSVDMG &prms,
bool verbose) except +
cdef void fit_transform(handle_t& handle,
vector[doubleData_t *] input_data,
PartDescriptor &input_desc,
vector[doubleData_t *] trans_data,
PartDescriptor &trans_desc,
double *components,
double *explained_var,
double *explained_var_ratio,
double *singular_vals,
paramsTSVDMG &prms,
bool verbose) except +
class TSVDMG(BaseDecompositionMG, TruncatedSVD):
def __init__(self, **kwargs):
super(TSVDMG, self).__init__(**kwargs)
def _build_params(self, n_rows, n_cols):
cdef paramsTSVDMG *params = new paramsTSVDMG()
params.n_components = self.n_components_
params.n_rows = n_rows
params.n_cols = n_cols
params.n_iterations = self.n_iter
params.tol = self.tol
params.algorithm = <mg_solver> (<underlying_type_t_solver> (
self.c_algorithm))
return <size_t>params
@cuml.internals.api_base_return_any_skipall
def _call_fit(self, X, trans, rank, input_desc,
trans_desc, arg_params):
cdef uintptr_t comp_ptr = self.components_.ptr
cdef uintptr_t explained_var_ptr = self.explained_variance_.ptr
cdef uintptr_t explained_var_ratio_ptr = \
self.explained_variance_ratio_.ptr
cdef uintptr_t singular_vals_ptr = self.singular_values_.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef paramsTSVDMG *params = <paramsTSVDMG*><size_t>arg_params
if self.dtype == np.float32:
fit_transform(handle_[0],
deref(<vector[floatData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>input_desc),
deref(<vector[floatData_t*]*><uintptr_t>trans),
deref(<PartDescriptor*><uintptr_t>trans_desc),
<float*> comp_ptr,
<float*> explained_var_ptr,
<float*> explained_var_ratio_ptr,
<float*> singular_vals_ptr,
deref(params),
<bool>False)
else:
fit_transform(handle_[0],
deref(<vector[doubleData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>input_desc),
deref(<vector[doubleData_t*]*><uintptr_t>trans),
deref(<PartDescriptor*><uintptr_t>trans_desc),
<double*> comp_ptr,
<double*> explained_var_ptr,
<double*> explained_var_ratio_ptr,
<double*> singular_vals_ptr,
deref(params),
<bool>False)
self.handle.sync()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/decomposition/incremental_pca.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.decomposition import PCA
from cuml.internals.array import CumlArray
import cuml.internals
from cuml.internals.input_utils import input_to_cupy_array
from cuml.common import input_to_cuml_array
from cuml import Base
from cuml.internals.safe_imports import cpu_only_import
import numbers
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
scipy = cpu_only_import("scipy")
class IncrementalPCA(PCA):
"""
Based on sklearn.decomposition.IncrementalPCA from scikit-learn 0.23.1
Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
the data, keeping only the most significant singular vectors to
project the data to a lower dimensional space. The input data is
centered but not scaled for each feature before applying the SVD.
Depending on the size of the input data, this algorithm can be much
more memory efficient than a PCA, and allows sparse input.
This algorithm has constant memory complexity, on the order of
:py:`batch_size * n_features`, enabling use of np.memmap files without
loading the entire file into memory. For sparse matrices, the input
is converted to dense in batches (in order to be able to subtract the
mean) which avoids storing the entire dense matrix at any one time.
The computational overhead of each SVD is
:py:`O(batch_size * n_features ** 2)`, but only 2 * batch_size samples
remain in memory at a time. There will be :py:`n_samples / batch_size`
SVD computations to get the principal components, versus 1 large SVD
of complexity :py:`O(n_samples * n_features ** 2)` for PCA.
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
n_components : int or None, (default=None)
Number of components to keep. If `n_components` is ``None``,
then `n_components` is set to :py:`min(n_samples, n_features)`.
whiten : bool, optional
If True, de-correlates the components. This is done by dividing them by
the corresponding singular values then multiplying by sqrt(n_samples).
Whitening allows each component to have unit variance and removes
multi-collinearity. It might be beneficial for downstream
tasks like LinearRegression where correlated features cause problems.
copy : bool, (default=True)
If False, X will be overwritten. :py:`copy=False` can be used to
save memory but is unsafe for general use.
batch_size : int or None, (default=None)
The number of samples to use for each batch. Only used when calling
`fit`. If `batch_size` is ``None``, then `batch_size`
is inferred from the data and set to :py:`5 * n_features`, to provide a
balance between approximation accuracy and memory consumption.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ : array, shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the `n_components`
variables in the lower-dimensional space.
mean_ : array, shape (n_features,)
Per-feature empirical mean, aggregate over calls to `partial_fit`.
var_ : array, shape (n_features,)
Per-feature empirical variance, aggregate over calls to
`partial_fit`.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from [4]_.
n_components_ : int
The estimated number of components. Relevant when
`n_components=None`.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across `partial_fit` calls.
batch_size_ : int
Inferred batch size from `batch_size`.
Notes
-----
Implements the incremental PCA model from [1]_. This model is an extension
of the Sequential Karhunen-Loeve Transform from [2]_. We have specifically
abstained from an optimization used by authors of both papers, a QR
decomposition used in specific situations to reduce the algorithmic
complexity of the SVD. The source for this technique is [3]_. This
technique has been omitted because it is advantageous only when decomposing
a matrix with :py:`n_samples >= 5/3 * n_features` where `n_samples` and
`n_features` are the matrix rows and columns, respectively. In addition,
it hurts the readability of the implemented algorithm. This would be a good
opportunity for future optimization, if it is deemed necessary.
References
----------
.. [1] `D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust
Visual Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
<https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf>`_
.. [2] `A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis
Extraction and its Application to Images, IEEE Transactions on Image
Processing, Volume 9, Number 8, pp. 1371-1374, August 2000.
<https://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf>`_
.. [3] G. Golub and C. Van Loan. Matrix Computations, Third Edition,
Chapter 5, Section 5.4.4, pp. 252-253.
.. [4] `C. Bishop, 1999. "Pattern Recognition and Machine Learning",
Section 12.2.1, pp. 574
<http://www.miketipping.com/papers/met-mppca.pdf>`_
Examples
--------
.. code-block:: python
>>> from cuml.decomposition import IncrementalPCA
>>> import cupy as cp
>>> import cupyx
>>>
>>> X = cupyx.scipy.sparse.random(1000, 4, format='csr',
... density=0.07, random_state=5)
>>> ipca = IncrementalPCA(n_components=2, batch_size=200)
>>> ipca.fit(X)
IncrementalPCA()
>>>
>>> # Components:
>>> ipca.components_ # doctest: +SKIP
array([[ 0.23698335, -0.06073393, 0.04310868, 0.9686547 ],
[ 0.27040346, -0.57185116, 0.76248786, -0.13594291]])
>>>
>>> # Singular Values:
>>> ipca.singular_values_ # doctest: +SKIP
array([5.06637586, 4.59406975])
>>>
>>> # Explained Variance:
>>> ipca.explained_variance_ # doctest: +SKIP
array([0.02569386, 0.0211266 ])
>>>
>>> # Explained Variance Ratio:
>>> ipca.explained_variance_ratio_ # doctest: +SKIP
array([0.30424536, 0.25016372])
>>>
>>> # Mean:
>>> ipca.mean_ # doctest: +SKIP
array([0.02693948, 0.0326928 , 0.03818463, 0.03861492])
>>>
>>> # Noise Variance:
>>> ipca.noise_variance_.item() # doctest: +SKIP
0.0037122774558343763
"""
def __init__(
self,
*,
handle=None,
n_components=None,
whiten=False,
copy=True,
batch_size=None,
verbose=False,
output_type=None,
):
super().__init__(
handle=handle,
n_components=n_components,
whiten=whiten,
copy=copy,
verbose=verbose,
output_type=output_type,
)
self.batch_size = batch_size
self._hyperparams = ["n_components", "whiten", "copy", "batch_size"]
self._sparse_model = True
def fit(self, X, y=None) -> "IncrementalPCA":
"""
Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
if scipy.sparse.issparse(X) or cupyx.scipy.sparse.issparse(X):
X = _validate_sparse_input(X)
else:
# NOTE: While we cast the input to a cupy array here, we still
# respect the `output_type` parameter in the constructor. This
# is done by PCA, which IncrementalPCA inherits from. PCA's
# transform and inverse transform convert the output to the
# required type.
X, n_samples, n_features, self.dtype = input_to_cupy_array(
X, order="K", check_dtype=[cp.float32, cp.float64]
)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in _gen_batches(
n_samples, self.batch_size_, min_batch_size=self.n_components or 0
):
X_batch = X[batch]
if cupyx.scipy.sparse.issparse(X_batch):
X_batch = X_batch.toarray()
self.partial_fit(X_batch, check_input=False)
return self
@cuml.internals.api_base_return_any_skipall
def partial_fit(self, X, y=None, check_input=True) -> "IncrementalPCA":
"""
Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
check_input : bool
Run check_array on X.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
if check_input:
if scipy.sparse.issparse(X) or cupyx.scipy.sparse.issparse(X):
raise TypeError(
"IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches."
)
self._set_output_type(X)
X, n_samples, n_features, self.dtype = input_to_cupy_array(
X, order="K", check_dtype=[cp.float32, cp.float64]
)
else:
n_samples, n_features = X.shape
if not hasattr(self, "components_"):
self.components_ = None
if self.n_components is None:
if self.components_ is None:
self.n_components_ = min(n_samples, n_features)
else:
self.n_components_ = self.components_.shape[0]
elif not 1 <= self.n_components <= n_features:
raise ValueError(
"n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features)
)
elif not self.n_components <= n_samples:
raise ValueError(
"n_components=%r must be less or equal to "
"the batch number of samples "
"%d." % (self.n_components, n_samples)
)
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (
self.components_.shape[0] != self.n_components_
):
raise ValueError(
"Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value."
% (self.components_.shape[0], self.n_components_)
)
# This is the first partial_fit
if not hasattr(self, "n_samples_seen_"):
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
# Update stats - they are 0 if this is the first step
col_mean, col_var, n_total_samples = _incremental_mean_and_var(
X,
last_mean=self.mean_,
last_variance=self.var_,
last_sample_count=cp.repeat(
cp.asarray([self.n_samples_seen_]), X.shape[1]
),
)
n_total_samples = n_total_samples[0]
# Whitening
if self.n_samples_seen_ == 0:
# If it is the first step, simply whiten X
X = X - col_mean
else:
col_batch_mean = cp.mean(X, axis=0)
X = X - col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = cp.sqrt(
(self.n_samples_seen_ * n_samples) / n_total_samples
) * (self.mean_ - col_batch_mean)
X = cp.vstack(
(
self.singular_values_.reshape((-1, 1)) * self.components_,
X,
mean_correction,
)
)
U, S, V = cp.linalg.svd(X, full_matrices=False)
U, V = _svd_flip(U, V, u_based_decision=False)
explained_variance = S**2 / (n_total_samples - 1)
explained_variance_ratio = S**2 / cp.sum(col_var * n_total_samples)
self.n_samples_ = n_total_samples
self.n_samples_seen_ = n_total_samples
self.components_ = V[: self.n_components_]
self.singular_values_ = S[: self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[: self.n_components_]
self.explained_variance_ratio_ = explained_variance_ratio[
: self.n_components_
]
if self.n_components_ < n_features:
self.noise_variance_ = explained_variance[
self.n_components_ :
].mean()
else:
self.noise_variance_ = 0.0
return self
def transform(self, X, convert_dtype=False) -> CumlArray:
"""
Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set, using minibatches of size batch_size if X is
sparse.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
convert_dtype : bool, optional (default = False)
When set to True, the transform method will automatically
convert the input to the data type which was used to train the
model. This will increase memory used for the method.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
if scipy.sparse.issparse(X) or cupyx.scipy.sparse.issparse(X):
X = _validate_sparse_input(X)
n_samples = X.shape[0]
output = []
for batch in _gen_batches(
n_samples,
self.batch_size_,
min_batch_size=self.n_components or 0,
):
output.append(super().transform(X[batch]))
output, _, _, _ = input_to_cuml_array(cp.vstack(output), order="K")
return output
else:
return super().transform(X)
def get_param_names(self):
# Skip super() since we dont pass any extra parameters in __init__
return Base.get_param_names(self) + self._hyperparams
def _validate_sparse_input(X):
"""
Validate the format and dtype of sparse inputs.
This function throws an error for any cupyx.scipy.sparse object that is not
of type cupyx.scipy.sparse.csr_matrix or cupyx.scipy.sparse.csc_matrix.
It also validates the dtype of the input to be 'float32' or 'float64'
Parameters
----------
X : scipy.sparse or cupyx.scipy.sparse object
A sparse input
Returns
-------
X : The input converted to a cupyx.scipy.sparse.csr_matrix object
"""
acceptable_dtypes = ("float32", "float64")
# NOTE: We can include cupyx.scipy.sparse.csc.csc_matrix
# once it supports indexing in cupy 8.0.0b5
acceptable_cupy_sparse_formats = cupyx.scipy.sparse.csr_matrix
if X.dtype not in acceptable_dtypes:
raise TypeError(
"Expected input to be of type float32 or float64."
" Received %s" % X.dtype
)
if scipy.sparse.issparse(X):
return cupyx.scipy.sparse.csr_matrix(X)
elif cupyx.scipy.sparse.issparse(X):
if not isinstance(X, acceptable_cupy_sparse_formats):
raise TypeError(
"Expected input to be of type"
" cupyx.scipy.sparse.csr_matrix or"
" cupyx.scipy.sparse.csc_matrix. Received %s" % type(X)
)
else:
return X
def _gen_batches(n, batch_size, min_batch_size=0):
"""
Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Parameters
----------
n : int
batch_size : int
Number of element in each batch
min_batch_size : int, default=0
Minimum batch size to produce.
Yields
------
slice of batch_size elements
"""
if not isinstance(batch_size, numbers.Integral):
raise TypeError(
"gen_batches got batch_size=%s, must be an" " integer" % batch_size
)
if batch_size <= 0:
raise ValueError(
"gen_batches got batch_size=%s, must be" " positive" % batch_size
)
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
if end + min_batch_size > n:
continue
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def _safe_accumulator_op(op, x, *args, **kwargs):
"""
This function provides numpy accumulator functions with a float64 dtype
when used on a floating point input. This prevents accumulator overflow on
smaller floating point dtypes.
Parameters
----------
op : function
A cupy accumulator function such as cp.mean or cp.sum
x : cupy array
A numpy array to apply the accumulator function
*args : positional arguments
Positional arguments passed to the accumulator function after the
input x
**kwargs : keyword arguments
Keyword arguments passed to the accumulator function
Returns
-------
result : The output of the accumulator function passed to this function
"""
if cp.issubdtype(x.dtype, cp.floating) and x.dtype.itemsize < 8:
result = op(x, *args, **kwargs, dtype=cp.float64).astype(cp.float32)
else:
result = op(x, *args, **kwargs)
return result
def _incremental_mean_and_var(X, last_mean, last_variance, last_sample_count):
"""
Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : array-like, shape (n_features,)
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : array, shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = _safe_accumulator_op(cp.nansum, X, axis=0)
new_sample_count = cp.sum(~cp.isnan(X), axis=0)
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = (
_safe_accumulator_op(cp.nanvar, X, axis=0) * new_sample_count
)
last_unnormalized_variance = last_variance * last_sample_count
# NOTE: The scikit-learn implementation has a np.errstate check
# here for ignoring invalid divides. This is not implemented in
# cupy as of 7.6.0
last_over_new_count = last_sample_count / new_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance
+ new_unnormalized_variance
+ last_over_new_count
/ updated_sample_count
* (last_sum / last_over_new_count - new_sum) ** 2
)
zeros = last_sample_count == 0
updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _svd_flip(u, v, u_based_decision=True):
"""
Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u : cupy.ndarray
u and v are the output of `cupy.linalg.svd`
v : cupy.ndarray
u and v are the output of `cupy.linalg.svd`
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = cp.argmax(cp.abs(u), axis=0)
signs = cp.sign(u[max_abs_cols, list(range(u.shape[1]))])
u *= signs
v *= signs[:, cp.newaxis]
else:
# rows of v, columns of u
max_abs_rows = cp.argmax(cp.abs(v), axis=1)
signs = cp.sign(v[list(range(v.shape[0])), max_abs_rows])
u *= signs
v *= signs[:, cp.newaxis]
return u, v
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/decomposition/tsvd.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from libc.stdint cimport uintptr_t
from cuml.internals.array import CumlArray
from cuml.internals.base import UniversalBase
from cuml.common import input_to_cuml_array
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.common.doc_utils import generate_docstring
from cuml.internals.mixins import FMajorInputTagMixin
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
IF GPUBUILD == 1:
from enum import IntEnum
from cython.operator cimport dereference as deref
from cuml.decomposition.utils cimport *
from cuml.decomposition.utils cimport *
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/decomposition/tsvd.hpp" namespace "ML":
cdef void tsvdFit(handle_t& handle,
float *input,
float *components,
float *singular_vals,
const paramsTSVD &prms) except +
cdef void tsvdFit(handle_t& handle,
double *input,
double *components,
double *singular_vals,
const paramsTSVD &prms) except +
cdef void tsvdFitTransform(handle_t& handle,
float *input,
float *trans_input,
float *components,
float *explained_var,
float *explained_var_ratio,
float *singular_vals,
const paramsTSVD &prms) except +
cdef void tsvdFitTransform(handle_t& handle,
double *input,
double *trans_input,
double *components,
double *explained_var,
double *explained_var_ratio,
double *singular_vals,
const paramsTSVD &prms) except +
cdef void tsvdInverseTransform(handle_t& handle,
float *trans_input,
float *components,
float *input,
const paramsTSVD &prms) except +
cdef void tsvdInverseTransform(handle_t& handle,
double *trans_input,
double *components,
double *input,
const paramsTSVD &prms) except +
cdef void tsvdTransform(handle_t& handle,
float *input,
float *components,
float *trans_input,
const paramsTSVD &prms) except +
cdef void tsvdTransform(handle_t& handle,
double *input,
double *components,
double *trans_input,
const paramsTSVD &prms) except +
class Solver(IntEnum):
COV_EIG_DQ = <underlying_type_t_solver> solver.COV_EIG_DQ
COV_EIG_JACOBI = <underlying_type_t_solver> solver.COV_EIG_JACOBI
class TruncatedSVD(UniversalBase,
FMajorInputTagMixin):
"""
TruncatedSVD is used to compute the top K singular values and vectors of a
large matrix X. It is much faster when n_components is small, such as in
the use of PCA when 3 components is used for 3D visualization.
cuML's TruncatedSVD an array-like object or cuDF DataFrame, and provides 2
algorithms Full and Jacobi. Full (default) uses a full eigendecomposition
then selects the top K singular vectors. The Jacobi algorithm is much
faster as it iteratively tries to correct the top K singular vectors, but
might be less accurate.
Examples
--------
.. code-block:: python
>>> # Both import methods supported
>>> from cuml import TruncatedSVD
>>> from cuml.decomposition import TruncatedSVD
>>> import cudf
>>> import cupy as cp
>>> gdf_float = cudf.DataFrame()
>>> gdf_float['0'] = cp.asarray([1.0,2.0,5.0], dtype=cp.float32)
>>> gdf_float['1'] = cp.asarray([4.0,2.0,1.0], dtype=cp.float32)
>>> gdf_float['2'] = cp.asarray([4.0,2.0,1.0], dtype=cp.float32)
>>> tsvd_float = TruncatedSVD(n_components = 2, algorithm = "jacobi",
... n_iter = 20, tol = 1e-9)
>>> tsvd_float.fit(gdf_float)
TruncatedSVD()
>>> print(f'components: {tsvd_float.components_}') # doctest: +SKIP
components: 0 1 2
0 0.587259 0.572331 0.572331
1 0.809399 -0.415255 -0.415255
>>> exp_var = tsvd_float.explained_variance_
>>> print(f'explained variance: {exp_var}')
explained variance: 0 0.494...
1 5.505...
dtype: float32
>>> exp_var_ratio = tsvd_float.explained_variance_ratio_
>>> print(f'explained variance ratio: {exp_var_ratio}')
explained variance ratio: 0 0.082...
1 0.917...
dtype: float32
>>> sing_values = tsvd_float.singular_values_
>>> print(f'singular values: {sing_values}')
singular values: 0 7.439...
1 4.081...
dtype: float32
>>> trans_gdf_float = tsvd_float.transform(gdf_float)
>>> print(f'Transformed matrix: {trans_gdf_float}') # doctest: +SKIP
Transformed matrix: 0 1
0 5.165910 -2.512643
1 3.463844 -0.042223
2 4.080960 3.216484
>>> input_gdf_float = tsvd_float.inverse_transform(trans_gdf_float)
>>> print(f'Input matrix: {input_gdf_float}')
Input matrix: 0 1 2
0 1.0 4.0 4.0
1 2.0 2.0 2.0
2 5.0 1.0 1.0
Parameters
----------
algorithm : 'full' or 'jacobi' or 'auto' (default = 'full')
Full uses a eigendecomposition of the covariance matrix then discards
components.
Jacobi is much faster as it iteratively corrects, but is less accurate.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
n_components : int (default = 1)
The number of top K singular vectors / values you want.
Must be <= number(columns).
n_iter : int (default = 15)
Used in Jacobi solver. The more iterations, the more accurate, but
slower.
random_state : int / None (default = None)
If you want results to be the same when you restart Python, select a
state.
tol : float (default = 1e-7)
Used if algorithm = "jacobi". Smaller tolerance can increase accuracy,
but but will slow down the algorithm's convergence.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
components_ : array
The top K components (VT.T[:,:n_components]) in U, S, VT = svd(X)
explained_variance_ : array
How much each component explains the variance in the data given by S**2
explained_variance_ratio_ : array
How much in % the variance is explained given by S**2/sum(S**2)
singular_values_ : array
The top K singular values. Remember all singular values >= 0
Notes
-----
TruncatedSVD (the randomized version [Jacobi]) is fantastic when the number
of components you want is much smaller than the number of features. The
approximation to the largest singular values and vectors is very robust,
however, this method loses a lot of accuracy when you want many, many
components.
**Applications of TruncatedSVD**
TruncatedSVD is also known as Latent Semantic Indexing (LSI) which
tries to find topics of a word count matrix. If X previously was
centered with mean removal, TruncatedSVD is the same as TruncatedPCA.
TruncatedSVD is also used in information retrieval tasks,
recommendation systems and data compression.
For additional documentation, see `scikitlearn's TruncatedSVD docs
<http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.TruncatedSVD.html>`_.
"""
_cpu_estimator_import_path = 'sklearn.decomposition.TruncatedSVD'
components_ = CumlArrayDescriptor(order='F')
explained_variance_ = CumlArrayDescriptor(order='F')
explained_variance_ratio_ = CumlArrayDescriptor(order='F')
singular_values_ = CumlArrayDescriptor(order='F')
@device_interop_preparation
def __init__(self, *, algorithm='full', handle=None, n_components=1,
n_iter=15, random_state=None, tol=1e-7,
verbose=False, output_type=None):
# params
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
self.c_algorithm = self._get_algorithm_c_name(self.algorithm)
# internal array attributes
self.components_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.singular_values_ = None
def _get_algorithm_c_name(self, algorithm):
IF GPUBUILD == 1:
algo_map = {
'full': Solver.COV_EIG_DQ,
'auto': Solver.COV_EIG_DQ,
'jacobi': Solver.COV_EIG_JACOBI
}
if algorithm not in algo_map:
msg = "algorithm {!r} is not supported"
raise TypeError(msg.format(algorithm))
return algo_map[algorithm]
def _build_params(self, n_rows, n_cols):
IF GPUBUILD == 1:
cdef paramsTSVD *params = new paramsTSVD()
params.n_components = self.n_components
params.n_rows = n_rows
params.n_cols = n_cols
params.n_iterations = self.n_iter
params.tol = self.tol
params.algorithm = <solver> (<underlying_type_t_solver> (
self.c_algorithm))
return <size_t>params
def _initialize_arrays(self, n_components, n_rows, n_cols):
self.components_ = CumlArray.zeros((n_components, n_cols),
dtype=self.dtype)
self.explained_variance_ = CumlArray.zeros(n_components,
dtype=self.dtype)
self.explained_variance_ratio_ = CumlArray.zeros(n_components,
dtype=self.dtype)
self.singular_values_ = CumlArray.zeros(n_components,
dtype=self.dtype)
@generate_docstring()
@enable_device_interop
def fit(self, X, y=None) -> "TruncatedSVD":
"""
Fit LSI model on training cudf DataFrame X. y is currently ignored.
"""
self.fit_transform(X)
return self
@generate_docstring(return_values={'name': 'trans',
'type': 'dense',
'description': 'Reduced version of X',
'shape': '(n_samples, n_components)'})
@enable_device_interop
def fit_transform(self, X, y=None) -> CumlArray:
"""
Fit LSI model to X and perform dimensionality reduction on X.
y is currently ignored.
"""
X_m, self.n_rows, self.n_features_in_, self.dtype = \
input_to_cuml_array(X, check_dtype=[np.float32, np.float64])
cdef uintptr_t _input_ptr = X_m.ptr
self._initialize_arrays(self.n_components, self.n_rows,
self.n_features_in_)
cdef uintptr_t _comp_ptr = self.components_.ptr
cdef uintptr_t _explained_var_ptr = \
self.explained_variance_.ptr
cdef uintptr_t _explained_var_ratio_ptr = \
self.explained_variance_ratio_.ptr
cdef uintptr_t _singular_vals_ptr = \
self.singular_values_.ptr
if self.n_components> self.n_features_in_:
raise ValueError(' n_components must be < n_features')
IF GPUBUILD == 1:
cdef paramsTSVD *params = <paramsTSVD*><size_t> \
self._build_params(self.n_rows, self.n_features_in_)
_trans_input_ = CumlArray.zeros((params.n_rows, params.n_components),
dtype=self.dtype, index=X_m.index)
cdef uintptr_t t_input_ptr = _trans_input_.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self.dtype == np.float32:
tsvdFitTransform(handle_[0],
<float*> _input_ptr,
<float*> t_input_ptr,
<float*> _comp_ptr,
<float*> _explained_var_ptr,
<float*> _explained_var_ratio_ptr,
<float*> _singular_vals_ptr,
deref(params))
else:
tsvdFitTransform(handle_[0],
<double*> _input_ptr,
<double*> t_input_ptr,
<double*> _comp_ptr,
<double*> _explained_var_ptr,
<double*> _explained_var_ratio_ptr,
<double*> _singular_vals_ptr,
deref(params))
# make sure the previously scheduled gpu tasks are complete before the
# following transfers start
self.handle.sync()
return _trans_input_
@generate_docstring(return_values={'name': 'X_original',
'type': 'dense',
'description': 'X in original space',
'shape': '(n_samples, n_features)'})
@enable_device_interop
def inverse_transform(self, X, convert_dtype=False) -> CumlArray:
"""
Transform X back to its original space.
Returns X_original whose transform would be X.
"""
dtype = self.components_.dtype
_X_m, _n_rows, _, dtype = \
input_to_cuml_array(X, check_dtype=dtype,
convert_to_dtype=(dtype if convert_dtype
else None))
IF GPUBUILD == 1:
cdef paramsTSVD params
params.n_components = self.n_components
params.n_rows = _n_rows
params.n_cols = self.n_features_in_
input_data = CumlArray.zeros((params.n_rows, params.n_cols),
dtype=dtype, index=_X_m.index)
cdef uintptr_t trans_input_ptr = _X_m.ptr
cdef uintptr_t input_ptr = input_data.ptr
cdef uintptr_t components_ptr = self.components_.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if dtype.type == np.float32:
tsvdInverseTransform(handle_[0],
<float*> trans_input_ptr,
<float*> components_ptr,
<float*> input_ptr,
params)
else:
tsvdInverseTransform(handle_[0],
<double*> trans_input_ptr,
<double*> components_ptr,
<double*> input_ptr,
params)
# make sure the previously scheduled gpu tasks are complete before the
# following transfers start
self.handle.sync()
return input_data
@generate_docstring(return_values={'name': 'X_new',
'type': 'dense',
'description': 'Reduced version of X',
'shape': '(n_samples, n_components)'})
@enable_device_interop
def transform(self, X, convert_dtype=False) -> CumlArray:
"""
Perform dimensionality reduction on X.
"""
dtype = self.components_.dtype
self.n_features_in_ = self.components_.shape[1]
_X_m, _n_rows, _, dtype = \
input_to_cuml_array(X, check_dtype=dtype,
convert_to_dtype=(dtype if convert_dtype
else None),
check_cols=self.n_features_in_)
IF GPUBUILD == 1:
cdef paramsTSVD params
params.n_components = self.n_components
params.n_rows = _n_rows
params.n_cols = self.n_features_in_
t_input_data = \
CumlArray.zeros((params.n_rows, params.n_components),
dtype=dtype, index=_X_m.index)
cdef uintptr_t input_ptr = _X_m.ptr
cdef uintptr_t trans_input_ptr = t_input_data.ptr
cdef uintptr_t components_ptr = self.components_.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if dtype.type == np.float32:
tsvdTransform(handle_[0],
<float*> input_ptr,
<float*> components_ptr,
<float*> trans_input_ptr,
params)
else:
tsvdTransform(handle_[0],
<double*> input_ptr,
<double*> components_ptr,
<double*> trans_input_ptr,
params)
# make sure the previously scheduled gpu tasks are complete before the
# following transfers start
self.handle.sync()
return t_input_data
def get_param_names(self):
return super().get_param_names() + \
["algorithm", "n_components", "n_iter", "random_state", "tol"]
def get_attr_names(self):
return ['components_', 'explained_variance_',
'explained_variance_ratio_', 'singular_values_',
'n_features_in_', 'feature_names_in_']
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/decomposition/base_mg.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from libc.stdint cimport uintptr_t
import cuml.common.opg_data_utils_mg as opg
import cuml.internals
from cuml.decomposition.utils cimport *
from cuml.decomposition.utils_mg cimport *
from cuml.common import input_to_cuml_array
from cuml.common.opg_data_utils_mg cimport *
from enum import IntEnum
class MGSolver(IntEnum):
COV_EIG_DQ = <underlying_type_t_solver> mg_solver.COV_EIG_DQ
COV_EIG_JACOBI = <underlying_type_t_solver> mg_solver.COV_EIG_JACOBI
QR = <underlying_type_t_solver> mg_solver.QR
class BaseDecompositionMG(object):
def __init__(self, **kwargs):
super(BaseDecompositionMG, self).__init__(**kwargs)
@cuml.internals.api_base_return_any_skipall
def fit(self, X, total_rows, n_cols, partsToRanks, rank,
_transform=False):
"""
Fit function for PCA MG. This not meant to be used as
part of the public API.
:param X: array of local dataframes / array partitions
:param total_rows: total number of rows
:param n_cols: total number of cols
:param partsToRanks: array of tuples in the format: [(rank,size)]
:return: self
"""
self._set_output_type(X[0])
self._set_n_features_in(n_cols)
if self.n_components is None:
self.n_components_ = min(total_rows, n_cols)
else:
self.n_components_ = self.n_components
X_arys = []
for i in range(len(X)):
if i == 0:
check_dtype = [np.float32, np.float64]
else:
check_dtype = self.dtype
X_m, _, self.n_cols, _ = \
input_to_cuml_array(X[i], check_dtype=check_dtype)
X_arys.append(X_m)
if i == 0:
self.dtype = X_m.dtype
cdef uintptr_t X_arg = opg.build_data_t(X_arys)
cdef uintptr_t rank_to_sizes = opg.build_rank_size_pair(partsToRanks,
rank)
cdef uintptr_t part_desc = opg.build_part_descriptor(total_rows,
self.n_cols,
rank_to_sizes,
rank)
cdef uintptr_t trans_part_desc
if _transform:
trans_arys = opg.build_pred_or_trans_arys(X_arys, "F", self.dtype)
trans_arg = opg.build_data_t(trans_arys)
trans_part_desc = opg.build_part_descriptor(total_rows,
self.n_components_,
rank_to_sizes,
rank)
self._initialize_arrays(self.n_components_, total_rows, n_cols)
decomp_params = self._build_params(total_rows, n_cols)
if _transform:
self._call_fit(
X_arg, trans_arg, rank, part_desc, trans_part_desc,
decomp_params)
else:
self._call_fit(X_arg, rank, part_desc, decomp_params)
opg.free_rank_size_pair(rank_to_sizes)
opg.free_part_descriptor(part_desc)
opg.free_data_t(X_arg, self.dtype)
if _transform:
trans_out = []
for i in range(len(trans_arys)):
trans_out.append(trans_arys[i].to_output(
output_type=self._get_output_type(X[0])))
opg.free_data_t(trans_arg, self.dtype)
opg.free_part_descriptor(trans_part_desc)
return trans_out
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/decomposition/__init__.py | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.decomposition.pca import PCA
from cuml.decomposition.tsvd import TruncatedSVD
from cuml.decomposition.incremental_pca import IncrementalPCA
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/decomposition/utils_mg.pxd | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcpp cimport bool
from cuml.decomposition.utils cimport *
cdef extern from "cuml/decomposition/params.hpp" namespace "ML" nogil:
ctypedef enum mg_solver "ML::mg_solver":
COV_EIG_DQ "ML::mg_solver::COV_EIG_DQ"
COV_EIG_JACOBI "ML::mg_solver::COV_EIG_JACOBI"
QR "ML::mg_solver::QR"
cdef cppclass paramsTSVDMG(paramsSolver):
size_t n_components
mg_solver algorithm # = solver::COV_EIG_DQ
cdef cppclass paramsPCAMG(paramsTSVDMG):
bool copy
bool whiten
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/model_selection/_split.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Optional, Union
from cuml.common import input_to_cuml_array
from cuml.internals.array import array_to_memory_order
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import,
gpu_only_import_from,
)
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
np = cpu_only_import("numpy")
cuda = gpu_only_import_from("numba", "cuda")
def _stratify_split(
X, stratify, labels, n_train, n_test, x_numba, y_numba, random_state
):
"""
Function to perform a stratified split based on stratify column.
Based on scikit-learn stratified split implementation.
Parameters
----------
X, y: Shuffled input data and labels
stratify: column to be stratified on.
n_train: Number of samples in train set
n_test: number of samples in test set
x_numba: Determines whether the data should be converted to numba
y_numba: Determines whether the labales should be converted to numba
Returns
-------
X_train, X_test: Data X divided into train and test sets
y_train, y_test: Labels divided into train and test sets
"""
x_cudf = False
labels_cudf = False
if isinstance(X, cudf.DataFrame):
x_cudf = True
elif hasattr(X, "__cuda_array_interface__"):
X = cp.asarray(X)
x_order = array_to_memory_order(X)
# labels and stratify will be only cp arrays
if isinstance(labels, cudf.Series):
labels_cudf = True
labels = labels.values
elif hasattr(labels, "__cuda_array_interface__"):
labels = cp.asarray(labels)
elif isinstance(stratify, cudf.DataFrame):
# ensuring it has just one column
if labels.shape[1] != 1:
raise ValueError(
"Expected one column for labels, but found df"
"with shape = %d" % (labels.shape)
)
labels_cudf = True
labels = labels[0].values
labels_order = array_to_memory_order(labels)
# Converting to cupy array removes the need to add an if-else block
# for startify column
if isinstance(stratify, cudf.Series):
stratify = stratify.values
elif hasattr(stratify, "__cuda_array_interface__"):
stratify = cp.asarray(stratify)
elif isinstance(stratify, cudf.DataFrame):
# ensuring it has just one column
if stratify.shape[1] != 1:
raise ValueError(
"Expected one column, but found column"
"with shape = %d" % (stratify.shape)
)
stratify = stratify[0].values
classes, stratify_indices = cp.unique(stratify, return_inverse=True)
n_classes = classes.shape[0]
class_counts = cp.bincount(stratify_indices)
if cp.min(class_counts) < 2:
raise ValueError(
"The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2."
)
if n_train < n_classes:
raise ValueError(
"The train_size = %d should be greater or "
"equal to the number of classes = %d" % (n_train, n_classes)
)
class_indices = cp.split(
cp.argsort(stratify_indices), cp.cumsum(class_counts)[:-1].tolist()
)
X_train = None
# random_state won't be None or int, that's handled earlier
if isinstance(random_state, np.random.RandomState):
random_state = cp.random.RandomState(seed=random_state.get_state()[1])
# Break ties
n_i = _approximate_mode(class_counts, n_train, random_state)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, random_state)
for i in range(n_classes):
permutation = random_state.permutation(class_counts[i].item())
perm_indices_class_i = class_indices[i].take(permutation)
y_train_i = cp.array(
labels[perm_indices_class_i[: n_i[i]]], order=labels_order
)
y_test_i = cp.array(
labels[perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]]],
order=labels_order,
)
if hasattr(X, "__cuda_array_interface__") or isinstance(
X, cupyx.scipy.sparse.csr_matrix
):
X_train_i = cp.array(
X[perm_indices_class_i[: n_i[i]]], order=x_order
)
X_test_i = cp.array(
X[perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]]],
order=x_order,
)
if X_train is None:
X_train = cp.array(X_train_i, order=x_order)
y_train = cp.array(y_train_i, order=labels_order)
X_test = cp.array(X_test_i, order=x_order)
y_test = cp.array(y_test_i, order=labels_order)
else:
X_train = cp.concatenate([X_train, X_train_i], axis=0)
X_test = cp.concatenate([X_test, X_test_i], axis=0)
y_train = cp.concatenate([y_train, y_train_i], axis=0)
y_test = cp.concatenate([y_test, y_test_i], axis=0)
elif x_cudf:
X_train_i = X.iloc[perm_indices_class_i[: n_i[i]]]
X_test_i = X.iloc[perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]]]
if X_train is None:
X_train = X_train_i
y_train = y_train_i
X_test = X_test_i
y_test = y_test_i
else:
X_train = cudf.concat([X_train, X_train_i], ignore_index=False)
X_test = cudf.concat([X_test, X_test_i], ignore_index=False)
y_train = cp.concatenate([y_train, y_train_i], axis=0)
y_test = cp.concatenate([y_test, y_test_i], axis=0)
if x_numba:
X_train = cuda.as_cuda_array(X_train)
X_test = cuda.as_cuda_array(X_test)
elif x_cudf:
X_train = cudf.DataFrame(X_train)
X_test = cudf.DataFrame(X_test)
if y_numba:
y_train = cuda.as_cuda_array(y_train)
y_test = cuda.as_cuda_array(y_test)
elif labels_cudf:
y_train = cudf.Series(y_train)
y_test = cudf.Series(y_test)
return X_train, X_test, y_train, y_test
def _approximate_mode(class_counts, n_draws, rng):
"""
CuPy implementataiton based on scikit-learn approximate_mode method.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/__init__.py#L984
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : cupy array of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = cp.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = cp.sort(cp.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
(inds,) = cp.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = rng.choice(inds, size=add_now, replace=False)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(int)
def train_test_split(
X,
y=None,
test_size: Optional[Union[float, int]] = None,
train_size: Optional[Union[float, int]] = None,
shuffle: bool = True,
random_state: Optional[
Union[int, cp.random.RandomState, np.random.RandomState]
] = None,
stratify=None,
):
"""
Partitions device data into four collated objects, mimicking
Scikit-learn's `train_test_split
<https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html>`_.
Parameters
----------
X : cudf.DataFrame or cuda_array_interface compliant device array
Data to split, has shape (n_samples, n_features)
y : str, cudf.Series or cuda_array_interface compliant device array
Set of labels for the data, either a series of shape (n_samples) or
the string label of a column in X (if it is a cuDF DataFrame)
containing the labels
train_size : float or int, optional
If float, represents the proportion [0, 1] of the data
to be assigned to the training set. If an int, represents the number
of instances to be assigned to the training set. Defaults to 0.8
shuffle : bool, optional
Whether or not to shuffle inputs before splitting
random_state : int, CuPy RandomState or NumPy RandomState optional
If shuffle is true, seeds the generator. Unseeded by default
stratify: cudf.Series or cuda_array_interface compliant device array,
optional parameter. When passed, the input is split using this
as column to startify on. Default=None
Examples
--------
.. code-block:: python
>>> import cudf
>>> from cuml.model_selection import train_test_split
>>> # Generate some sample data
>>> df = cudf.DataFrame({'x': range(10),
... 'y': [0, 1] * 5})
>>> print(f'Original data: {df.shape[0]} elements')
Original data: 10 elements
>>> # Suppose we want an 80/20 split
>>> X_train, X_test, y_train, y_test = train_test_split(df, 'y',
... train_size=0.8)
>>> print(f'X_train: {X_train.shape[0]} elements')
X_train: 8 elements
>>> print(f'X_test: {X_test.shape[0]} elements')
X_test: 2 elements
>>> print(f'y_train: {y_train.shape[0]} elements')
y_train: 8 elements
>>> print(f'y_test: {y_test.shape[0]} elements')
y_test: 2 elements
>>> # Alternatively, if our labels are stored separately
>>> labels = df['y']
>>> df = df.drop(['y'], axis=1)
>>> # we can also do
>>> X_train, X_test, y_train, y_test = train_test_split(df, labels,
... train_size=0.8)
Returns
-------
X_train, X_test, y_train, y_test : cudf.DataFrame or array-like objects
Partitioned dataframes if X and y were cuDF objects. If `y` was
provided as a column name, the column was dropped from `X`.
Partitioned numba device arrays if X and y were Numba device arrays.
Partitioned CuPy arrays for any other input.
"""
if isinstance(y, str):
# Use the column with name `str` as y
if isinstance(X, cudf.DataFrame):
name = y
y = X[name]
X = X.drop(name, axis=1)
else:
raise TypeError(
"X needs to be a cuDF Dataframe when y is a \
string"
)
# todo: this check will be replaced with upcoming improvements
# to input_utils
#
if y is not None:
if not hasattr(X, "__cuda_array_interface__") and not isinstance(
X, cudf.DataFrame
):
raise TypeError(
"X needs to be either a cuDF DataFrame, Series or \
a cuda_array_interface compliant array."
)
if not hasattr(y, "__cuda_array_interface__") and not isinstance(
y, cudf.DataFrame
):
raise TypeError(
"y needs to be either a cuDF DataFrame, Series or \
a cuda_array_interface compliant array."
)
if X.shape[0] != y.shape[0]:
raise ValueError(
"X and y must have the same first dimension"
"(found {} and {})".format(X.shape[0], y.shape[0])
)
else:
if not hasattr(X, "__cuda_array_interface__") and not isinstance(
X, cudf.DataFrame
):
raise TypeError(
"X needs to be either a cuDF DataFrame, Series or \
a cuda_array_interface compliant object."
)
if isinstance(train_size, float):
if not 0 <= train_size <= 1:
raise ValueError(
"proportion train_size should be between"
"0 and 1 (found {})".format(train_size)
)
if isinstance(train_size, int):
if not 0 <= train_size <= X.shape[0]:
raise ValueError(
"Number of instances train_size should be between 0 and the"
"first dimension of X (found {})".format(train_size)
)
if isinstance(test_size, float):
if not 0 <= test_size <= 1:
raise ValueError(
"proportion test_size should be between"
"0 and 1 (found {})".format(train_size)
)
if isinstance(test_size, int):
if not 0 <= test_size <= X.shape[0]:
raise ValueError(
"Number of instances test_size should be between 0 and the"
"first dimension of X (found {})".format(test_size)
)
x_numba = cuda.devicearray.is_cuda_ndarray(X)
y_numba = cuda.devicearray.is_cuda_ndarray(y)
# Determining sizes of splits
if isinstance(train_size, float):
train_size = int(X.shape[0] * train_size)
if test_size is None:
if train_size is None:
train_size = int(X.shape[0] * 0.75)
test_size = X.shape[0] - train_size
if isinstance(test_size, float):
test_size = int(X.shape[0] * test_size)
if train_size is None:
train_size = X.shape[0] - test_size
elif isinstance(test_size, int):
if train_size is None:
train_size = X.shape[0] - test_size
if shuffle:
# Shuffle the data
if random_state is None or isinstance(random_state, int):
idxs = cp.arange(X.shape[0])
random_state = cp.random.RandomState(seed=random_state)
elif isinstance(random_state, cp.random.RandomState):
idxs = cp.arange(X.shape[0])
elif isinstance(random_state, np.random.RandomState):
idxs = np.arange(X.shape[0])
else:
raise TypeError(
"`random_state` must be an int, NumPy RandomState \
or CuPy RandomState."
)
random_state.shuffle(idxs)
if isinstance(X, cudf.DataFrame) or isinstance(X, cudf.Series):
X = X.iloc[idxs]
elif hasattr(X, "__cuda_array_interface__"):
# numba (and therefore rmm device_array) does not support
# fancy indexing
X = cp.asarray(X)[idxs]
if isinstance(y, cudf.DataFrame) or isinstance(y, cudf.Series):
y = y.iloc[idxs]
elif hasattr(y, "__cuda_array_interface__"):
y = cp.asarray(y)[idxs]
if stratify is not None:
if isinstance(stratify, cudf.DataFrame) or isinstance(
stratify, cudf.Series
):
stratify = stratify.iloc[idxs]
elif hasattr(stratify, "__cuda_array_interface__"):
stratify = cp.asarray(stratify)[idxs]
split_return = _stratify_split(
X,
stratify,
y,
train_size,
test_size,
x_numba,
y_numba,
random_state,
)
return split_return
# If not stratified, perform train_test_split splicing
x_order = array_to_memory_order(X)
if y is None:
y_order = None
else:
y_order = array_to_memory_order(y)
if hasattr(X, "__cuda_array_interface__") or isinstance(
X, cupyx.scipy.sparse.csr_matrix
):
X_train = cp.array(X[0:train_size], order=x_order)
X_test = cp.array(X[-1 * test_size :], order=x_order)
if y is not None:
y_train = cp.array(y[0:train_size], order=y_order)
y_test = cp.array(y[-1 * test_size :], order=y_order)
elif isinstance(X, cudf.DataFrame):
X_train = X.iloc[0:train_size]
X_test = X.iloc[-1 * test_size :]
if y is not None:
if isinstance(y, cudf.Series):
y_train = y.iloc[0:train_size]
y_test = y.iloc[-1 * test_size :]
elif hasattr(y, "__cuda_array_interface__") or isinstance(
y, cupyx.scipy.sparse.csr_matrix
):
y_train = cp.array(y[0:train_size], order=y_order)
y_test = cp.array(y[-1 * test_size :], order=y_order)
if x_numba:
X_train = cuda.as_cuda_array(X_train)
X_test = cuda.as_cuda_array(X_test)
if y_numba:
y_train = cuda.as_cuda_array(y_train)
y_test = cuda.as_cuda_array(y_test)
if y is not None:
return X_train, X_test, y_train, y_test
else:
return X_train, X_test
class StratifiedKFold:
"""
A cudf based implementation of Stratified K-Folds cross-validator.
Provides train/test indices to split data into stratified K folds.
The percentage of samples for each class are maintained in each
fold.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
shuffle : boolean, default=False
Whether to shuffle each class's samples before splitting.
random_state : int (default=None)
Random seed
Examples
--------
Splitting X,y into stratified K folds
.. code-block:: python
import cupy
X = cupy.random.rand(12,10)
y = cupy.arange(12)%4
kf = StratifiedKFold(n_splits=3)
for tr,te in kf.split(X,y):
print(tr, te)
Output:
.. code-block:: python
[ 4 5 6 7 8 9 10 11] [0 1 2 3]
[ 0 1 2 3 8 9 10 11] [4 5 6 7]
[0 1 2 3 4 5 6 7] [ 8 9 10 11]
"""
def __init__(self, n_splits=5, shuffle=False, random_state=None):
if n_splits < 2 or not isinstance(n_splits, int):
raise ValueError(
f"n_splits {n_splits} is not a integer at least 2"
)
if random_state is not None and not isinstance(random_state, int):
raise ValueError(f"random_state {random_state} is not an integer")
self.n_splits = n_splits
self.shuffle = shuffle
self.seed = random_state
def get_n_splits(self, X=None, y=None):
return self.n_splits
def split(self, x, y):
if len(x) != len(y):
raise ValueError("Expecting same length of x and y")
y = input_to_cuml_array(y).array.to_output("cupy")
if len(cp.unique(y)) < 2:
raise ValueError("number of unique classes cannot be less than 2")
df = cudf.DataFrame()
ids = cp.arange(y.shape[0])
if self.shuffle:
cp.random.seed(self.seed)
cp.random.shuffle(ids)
y = y[ids]
df["y"] = y
df["ids"] = ids
grpby = df.groupby(["y"])
dg = grpby.agg({"y": "count"})
col = dg.columns[0]
msg = (
f"n_splits={self.n_splits} cannot be greater "
+ "than the number of members in each class."
)
if self.n_splits > dg[col].min():
raise ValueError(msg)
def get_order_in_group(y, ids, order):
for i in range(cuda.threadIdx.x, len(y), cuda.blockDim.x):
order[i] = i
got = grpby.apply_grouped(
get_order_in_group,
incols=["y", "ids"],
outcols={"order": "int32"},
tpb=64,
)
got = got.sort_values("ids")
for i in range(self.n_splits):
mask = got["order"] % self.n_splits == i
train = got.loc[~mask, "ids"].values
test = got.loc[mask, "ids"].values
if len(test) == 0:
break
yield train, test
def _check_array_shape(self, y):
if y is None:
raise ValueError("Expecting 1D array, got None")
elif hasattr(y, "shape") and len(y.shape) > 1 and y.shape[1] > 1:
raise ValueError(f"Expecting 1D array, got {y.shape}")
else:
pass
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/model_selection/__init__.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.model_selection._split import train_test_split
from cuml.model_selection._split import StratifiedKFold
from cuml.internals.import_utils import has_sklearn
if has_sklearn():
from sklearn.model_selection import GridSearchCV
GridSearchCV.__doc__ = (
"""
This code is developed and maintained by scikit-learn and imported
by cuML to maintain the familiar sklearn namespace structure.
cuML includes tests to ensure full compatibility of these wrappers
with CUDA-based data and cuML estimators, but all of the underlying code
is due to the scikit-learn developers.\n\n"""
+ GridSearchCV.__doc__
)
__all__ = ["train_test_split", "GridSearchCV", "StratifiedKFold"]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/prims/array.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.kernel_utils import cuda_kernel_factory
import math
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
def _binarize_kernel(x_dtype):
binarize_kernel_str = r"""({0} *x, float threshold, int x_n) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= x_n) return;
{0} val = x[tid];
if(val > threshold)
val = 1;
else
val = 0;
x[tid] = val;
}"""
return cuda_kernel_factory(
binarize_kernel_str, (x_dtype,), "binarize_kernel"
)
def binarize(x, threshold, copy=False):
"""
Binarizes an array by converting values
greater than a threshold to 1s and less
than a threshold to 0s.
Parameters
----------
x : array-like
Array to binarize
threshold : float
The cut-off point for values to be converted to 1s.
copy : bool
Should the operation be done in place or a copy made
"""
arr = cp.asarray(x, dtype=x.dtype)
if copy:
arr = arr.copy()
tpb = 512
binarizer = _binarize_kernel(x.dtype)
binarizer((math.ceil(arr.size / tpb),), (tpb,), (x, threshold, arr.size))
return arr
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/prims/__init__.py | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/cuml/python/cuml/prims | rapidsai_public_repos/cuml/python/cuml/prims/stats/covariance.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.kernel_utils import cuda_kernel_factory
import cuml.internals
import math
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
cov_kernel_str = r"""
({0} *cov_values, {0} *gram_matrix, {0} *mean_x, {0} *mean_y, int n_cols) {
int rid = blockDim.x * blockIdx.x + threadIdx.x;
int cid = blockDim.y * blockIdx.y + threadIdx.y;
if(rid >= n_cols || cid >= n_cols) return;
cov_values[rid * n_cols + cid] = \
gram_matrix[rid * n_cols + cid] - mean_x[rid] * mean_y[cid];
}
"""
gramm_kernel_csr = r"""
(const int *indptr, const int *index, {0} *data, int nrows, int ncols, {0} *out) {
int row = blockIdx.x;
int col = threadIdx.x;
if(row >= nrows) return;
int start = indptr[row];
int end = indptr[row + 1];
for (int idx1 = start; idx1 < end; idx1++){
int index1 = index[idx1];
{0} data1 = data[idx1];
for(int idx2 = idx1 + col; idx2 < end; idx2 += blockDim.x){
int index2 = index[idx2];
{0} data2 = data[idx2];
atomicAdd(&out[index1 * ncols + index2], data1 * data2);
}
}
}
"""
gramm_kernel_coo = r"""
(const int *rows, const int *cols, {0} *data, int nnz, int ncols, int nrows, {0} * out) {
int i = blockIdx.x;
if (i >= nnz) return;
int row1 = rows[i];
int col1 = cols[i];
{0} data1 = data[i];
int limit = min(i + nrows, nnz);
for(int j = i + threadIdx.x; j < limit; j += blockDim.x){
if(row1 < rows[j]) return;
if(col1 <= cols[j]){
atomicAdd(&out[col1 * ncols + cols[j]], data1 * data[j]);
}
}
}
"""
copy_kernel = r"""
({0} *out, int ncols) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= ncols || col >= ncols) return;
if (row > col) {
out[row * ncols + col] = out[col * ncols + row];
}
}
"""
def _cov_kernel(dtype):
return cuda_kernel_factory(cov_kernel_str, (dtype,), "cov_kernel")
def _gramm_kernel_csr(dtype):
return cuda_kernel_factory(gramm_kernel_csr, (dtype,), "gramm_kernel_csr")
def _gramm_kernel_coo(dtype):
return cuda_kernel_factory(gramm_kernel_coo, (dtype,), "gramm_kernel_coo")
def _copy_kernel(dtype):
return cuda_kernel_factory(copy_kernel, (dtype,), "copy_kernel")
@cuml.internals.api_return_any()
def cov(x, y, mean_x=None, mean_y=None, return_gram=False, return_mean=False):
"""
Computes a covariance between two matrices using
the form Cov(X, Y) = E(XY) - E(X)E(Y)
This function prevents the need to explicitly
compute the outer product E(X)E(Y) by taking
advantage of the symmetry of that matrix
and computes per element in a kernel.
When E(XY) is approximately equal to E(X)E(Y),
this method is prone to catastrophic cancellation.
In such cases, a rectangular solver should be
preferred.
Parameters
----------
x : device-array or cupyx.scipy.sparse of size (m, n)
y : device-array or cupyx.scipy.sparse of size (m, n)
mean_x : float (default = None)
device-array of size (n, ) which is the mean
of x across rows
mean_x : float (default = None)
device-array of size (n, ) which is the mean
of x across rows
return_gram : boolean (default = False)
If True, gram matrix of the form (1 / n) * X.T.dot(Y)
will be returned.
When True, a copy will be created
to store the results of the covariance.
When False, the local gram matrix result
will be overwritten
return_mean: boolean (default = False)
If True, the Maximum Likelihood Estimate used to
calculate the mean of X and Y will be returned,
of the form (1 / n) * mean(X) and (1 / n) * mean(Y)
Returns
-------
result : cov(X, Y) when return_gram and return_mean are False
cov(X, Y), gram(X, Y) when return_gram is True,
return_mean is False
cov(X, Y), mean(X), mean(Y) when return_gram is False,
return_mean is True
cov(X, Y), gram(X, Y), mean(X), mean(Y)
when return_gram is True and return_mean is True
"""
if x.dtype != y.dtype:
raise ValueError(
"X and Y must have same dtype (%s != %s)" % (x.dtype, y.dtype)
)
if x.shape != y.shape:
raise ValueError(
"X and Y must have same shape %s != %s" % (x.shape, y.shape)
)
# Fix for cuml issue #5475 & cupy issue #7699
# addressing problems with sparse matrix multiplication (spGEMM)
if (
x is y
and cupyx.scipy.sparse.issparse(x)
and mean_x is None
and mean_y is None
):
return _cov_sparse(x, return_gram=return_gram, return_mean=return_mean)
if mean_x is not None and mean_y is not None:
if mean_x.dtype != mean_y.dtype:
raise ValueError(
"Mean of X and Mean of Y must have same dtype"
"(%s != %s)" % (mean_x.dtype, mean_y.dtype)
)
if mean_x.shape != mean_y.shape:
raise ValueError(
"Mean of X and Mean of Y must have same shape"
"%s != %s" % (mean_x.shape, mean_y.shape)
)
gram_matrix = x.T.dot(y) * (1 / x.shape[0])
if cupyx.scipy.sparse.issparse(gram_matrix):
gram_matrix = gram_matrix.todense()
if mean_x is None:
mean_x = x.sum(axis=0) * (1 / x.shape[0])
if mean_y is None:
mean_y = y.sum(axis=0) * (1 / y.shape[0])
if return_gram:
cov_result = cp.zeros(
(gram_matrix.shape[0], gram_matrix.shape[1]),
dtype=gram_matrix.dtype,
)
else:
cov_result = gram_matrix
compute_cov = _cov_kernel(x.dtype)
block_size = (32, 32)
grid_size = (
math.ceil(gram_matrix.shape[0] / 32),
math.ceil(gram_matrix.shape[1] / 32),
)
compute_cov(
grid_size,
block_size,
(cov_result, gram_matrix, mean_x, mean_y, gram_matrix.shape[0]),
)
if not return_gram and not return_mean:
return cov_result
elif return_gram and not return_mean:
return cov_result, gram_matrix
elif not return_gram and return_mean:
return cov_result, mean_x, mean_y
elif return_gram and return_mean:
return cov_result, gram_matrix, mean_x, mean_y
@cuml.internals.api_return_any()
def _cov_sparse(x, return_gram=False, return_mean=False):
"""
Computes the mean and the covariance of matrix X of
the form Cov(X, X) = E(XX) - E(X)E(X)
This is a temporary fix for
cuml issue #5475 and cupy issue #7699,
where the operation `x.T.dot(x)` did not work for
larger sparse matrices.
Parameters
----------
x : cupyx.scipy.sparse of size (m, n)
return_gram : boolean (default = False)
If True, gram matrix of the form (1 / n) * X.T.dot(X)
will be returned.
When True, a copy will be created
to store the results of the covariance.
When False, the local gram matrix result
will be overwritten
return_mean: boolean (default = False)
If True, the Maximum Likelihood Estimate used to
calculate the mean of X and X will be returned,
of the form (1 / n) * mean(X) and (1 / n) * mean(X)
Returns
-------
result : cov(X, X) when return_gram and return_mean are False
cov(X, X), gram(X, X) when return_gram is True,
return_mean is False
cov(X, X), mean(X), mean(X) when return_gram is False,
return_mean is True
cov(X, X), gram(X, X), mean(X), mean(X)
when return_gram is True and return_mean is True
"""
gram_matrix = cp.zeros((x.shape[1], x.shape[1]), dtype=x.data.dtype)
if cupyx.scipy.sparse.isspmatrix_csr(x):
block = (128,)
grid = (x.shape[0],)
compute_mean_cov = _gramm_kernel_csr(x.data.dtype)
compute_mean_cov(
grid,
block,
(
x.indptr,
x.indices,
x.data,
x.shape[0],
x.shape[1],
gram_matrix,
),
)
elif cupyx.scipy.sparse.isspmatrix_coo(x):
x.sum_duplicates()
nnz = len(x.row)
block = (128,)
grid = (nnz,)
compute_gram_coo = _gramm_kernel_coo(x.data.dtype)
compute_gram_coo(
grid,
block,
(x.row, x.col, x.data, nnz, x.shape[1], x.shape[0], gram_matrix),
)
else:
x = x.tocsr()
block = (128,)
grid = (math.ceil(x.shape[0] / block[0]),)
compute_mean_cov = _gramm_kernel_csr(x.data.dtype)
compute_mean_cov(
grid,
block,
(
x.indptr,
x.indices,
x.data,
x.shape[0],
x.shape[1],
gram_matrix,
),
)
copy_gram = _copy_kernel(x.data.dtype)
block = (32, 32)
grid = (math.ceil(x.shape[1] / block[0]), math.ceil(x.shape[1] / block[1]))
copy_gram(
grid,
block,
(gram_matrix, x.shape[1]),
)
mean_x = x.sum(axis=0) * (1 / x.shape[0])
gram_matrix *= 1 / x.shape[0]
if return_gram:
cov_result = cp.zeros(
(gram_matrix.shape[0], gram_matrix.shape[0]),
dtype=gram_matrix.dtype,
)
else:
cov_result = gram_matrix
compute_cov = _cov_kernel(x.dtype)
block_size = (32, 32)
grid_size = (math.ceil(gram_matrix.shape[0] / 8),) * 2
compute_cov(
grid_size,
block_size,
(cov_result, gram_matrix, mean_x, mean_x, gram_matrix.shape[0]),
)
if not return_gram and not return_mean:
return cov_result
elif return_gram and not return_mean:
return cov_result, gram_matrix
elif not return_gram and return_mean:
return cov_result, mean_x, mean_x
elif return_gram and return_mean:
return cov_result, gram_matrix, mean_x, mean_x
| 0 |
rapidsai_public_repos/cuml/python/cuml/prims | rapidsai_public_repos/cuml/python/cuml/prims/stats/__init__.py | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.prims.stats.covariance import cov
| 0 |
rapidsai_public_repos/cuml/python/cuml/prims | rapidsai_public_repos/cuml/python/cuml/prims/label/classlabels.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.kernel_utils import cuda_kernel_factory
from cuml.internals.array import CumlArray
import cuml.internals
from cuml.internals.input_utils import input_to_cupy_array
import math
import typing
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
map_kernel_str = r"""
({0} *x, int x_n, {0} *labels, int n_labels) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ {0} label_cache[];
for(int i = threadIdx.x; i < n_labels; i+=blockDim.x)
label_cache[i] = labels[i];
if(tid >= x_n) return;
__syncthreads();
{0} unmapped_label = x[tid];
for(int i = 0; i < n_labels; i++) {
if(label_cache[i] == unmapped_label) {
x[tid] = i;
return;
}
}
x[tid] = n_labels+1;
}
"""
validate_kernel_str = r"""
({0} *x, int x_n, {0} *labels, int n_labels, int *out) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ {0} label_cache[];
for(int i = threadIdx.x; i < n_labels; i+=blockDim.x)
label_cache[i] = labels[i];
if(tid >= x_n) return;
__syncthreads();
int unmapped_label = x[tid];
bool found = false;
for(int i = 0; i < n_labels; i++) {
if(label_cache[i] == unmapped_label) {
found = true;
break;
}
}
if(!found) out[0] = 0;
}
"""
inverse_map_kernel_str = r"""
({0} *labels, int n_labels, {0} *x, int x_n) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ {0} label_cache[];
for(int i = threadIdx.x; i < n_labels; i+=blockDim.x) {
label_cache[i] = labels[i];
}
if(tid >= x_n) return;
__syncthreads();
{0} mapped_label = x[tid];
{0} original_label = label_cache[mapped_label];
x[tid] = original_label;
}
"""
def _map_kernel(dtype):
return cuda_kernel_factory(map_kernel_str, (dtype,), "map_labels_kernel")
def _inverse_map_kernel(dtype):
return cuda_kernel_factory(
inverse_map_kernel_str, (dtype,), "inv_map_labels_kernel"
)
def _validate_kernel(dtype):
return cuda_kernel_factory(
validate_kernel_str, (dtype,), "validate_labels_kernel"
)
@cuml.internals.api_return_generic(input_arg="labels", get_output_type=True)
def make_monotonic(
labels, classes=None, copy=False
) -> typing.Tuple[CumlArray, CumlArray]:
"""
Takes a set of labels that might not be drawn from the
set [0, n-1] and renumbers them to be drawn that
interval.
Replaces labels not present in classes by len(classes)+1.
Parameters
----------
labels : array-like of size (n,) labels to convert
classes : array-like of size (n_classes,) the unique
set of classes in the set of labels
copy : boolean if true, a copy will be returned and the
operation will not be done in place.
Returns
-------
mapped_labels : array-like of size (n,)
classes : array-like of size (n_classes,)
"""
labels = input_to_cupy_array(labels, deepcopy=copy).array
if labels.ndim != 1:
raise ValueError("Labels array must be 1D")
if classes is None:
classes = cp.unique(labels)
else:
classes = input_to_cupy_array(classes).array
smem = labels.dtype.itemsize * int(classes.shape[0])
map_labels = _map_kernel(labels.dtype)
map_labels(
(math.ceil(labels.shape[0] / 32),),
(32,),
(labels, labels.shape[0], classes, classes.shape[0]),
shared_mem=smem,
)
return labels, classes
@cuml.internals.api_return_any()
def check_labels(labels, classes) -> bool:
"""
Validates that a set of labels is drawn from the unique
set of given classes.
Parameters
----------
labels : array-like of size (n,) labels to validate
classes : array-like of size (n_classes,) the unique
set of classes to verify
Returns
-------
result : boolean
"""
labels = input_to_cupy_array(labels, order="K").array
classes = input_to_cupy_array(classes, order="K").array
if labels.dtype != classes.dtype:
raise ValueError(
"Labels and classes must have same dtype (%s != %s"
% (labels.dtype, classes.dtype)
)
if labels.ndim != 1:
raise ValueError("Labels array must be 1D")
valid = cp.array([1])
smem = labels.dtype.itemsize * int(classes.shape[0])
validate = _validate_kernel(labels.dtype)
validate(
(math.ceil(labels.shape[0] / 32),),
(32,),
(labels, labels.shape[0], classes, classes.shape[0], valid),
shared_mem=smem,
)
return valid[0] == 1
@cuml.internals.api_return_array(input_arg="labels", get_output_type=True)
def invert_labels(labels, classes, copy=False) -> CumlArray:
"""
Takes a set of labels that have been mapped to be drawn
from a monotonically increasing set and inverts them to
back to the original set of classes.
Parameters
----------
labels : array-like of size (n,) labels to invert
classes : array-like of size (n_classes,) the unique set
of classes for inversion. It is assumed that
the classes are ordered by their corresponding
monotonically increasing label.
copy : boolean if true, a copy will be returned and the
operation will not be done in place.
Returns
-------
inverted labels : array-like of size (n,)
"""
labels = input_to_cupy_array(labels, deepcopy=copy).array
classes = input_to_cupy_array(classes).array
if labels.dtype != classes.dtype:
raise ValueError(
"Labels and classes must have same dtype (%s != %s"
% (labels.dtype, classes.dtype)
)
smem = labels.dtype.itemsize * len(classes)
inverse_map = _inverse_map_kernel(labels.dtype)
inverse_map(
(math.ceil(len(labels) / 32),),
(32,),
(classes, len(classes), labels, len(labels)),
shared_mem=smem,
)
return labels
| 0 |
rapidsai_public_repos/cuml/python/cuml/prims | rapidsai_public_repos/cuml/python/cuml/prims/label/__init__.py | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.prims.label.classlabels import make_monotonic # NOQA
from cuml.prims.label.classlabels import check_labels # NOQA
from cuml.prims.label.classlabels import invert_labels # NOQA
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/fil/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("fil.pyx" ${fil_algo} ${randomforestclassifier_algo} ${randomforestregressor_algo})
set(linked_libraries
"${cuml_sg_libraries}"
"${CUML_PYTHON_TREELITE_TARGET}"
)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}"
MODULE_PREFIX fil_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/fil/README.md | # FIL - RAPIDS Forest Inference Library
The Forest Inference Library provides a lightweight, flexible API to
infer (predict) results from a tree-based model ensemble on GPU. The
tree ensemble can be either gradient-boosted decision tree (GBDT) or
random forest (RF) models trained in XGBoost, cuML, scikit-learn, or
LightGBM.
# Code sample
Starting with an XGBoost classification model saved in the file
"xgb.mod," we want to use that model to infer on a large dataset of
test samples.
```python
from cuml import ForestInference
fm = ForestInference.load(filename=model_path,
output_class=True,
threshold=0.50,
model_type='xgboost')
X = ... load test samples as a numpy or cupy array ...
y_out = fm.predict(X)
```
See [the sample notebook](https://github.com/rapidsai/cuml/blob/main/notebooks/forest_inference_demo.ipynb) for much more detail and runnable samples.
Additionally, FIL can be called directly from C or C++ code. See [the API docs here](https://docs.rapids.ai/api/libcuml/nightly/namespaceML_1_1fil.html)
# Features
* Input model source: XGBoost (binary format), cuML RandomForest, scikit-learn RandomForest and similar classes, LightGBM
* Model types: Regression, Binary Classification, Multi-class Classification
* Tree storage types: Dense or sparse tree storage (see Sparse Forests with FIL blog below)
* Input formats: Dense, row-major, FP32 arrays on GPU or CPU (e.g. NumPy, cuPy, or other data formats supported by cuML). Trees are expected to be trained for float32 inputs. There may be rounding differences if trees were trained for float64 inputs.
* High performance batch inference
* Input parsing based on (Treelite)[https://github.com/dmlc/treelite]
Upcoming features:
* Support for multi-class random forests from scikit-learn
* Support for 8-byte sparse nodes to reduce memory usage for small trees is experimental
* Categorical features for LightGBM models
# Benchmarks and performance notes
(1) The core data format supported by FIL is an FP32, (row-major)[https://en.wikipedia.org/wiki/Row-_and_column-major_order] array on
GPU. All other input types will be automatically converted to this
format internally, but you will get the lowest latency if you use that
format to start with.
(2) FIL is optimized for high-throughput, batch inference, so its
performance benefits become more pronounced as the size of the test
data X grows. Larger, more complex models (e.g. those with more trees)
will also see a greater boost as they can fully occupy a large GPU.
The chart below shows how performance (measured in microseconds per
row) varies as the number of input rows increases, comparing both
CPU-based inference (XGBoost CPU inference, and the optimized treelite
library) and GPU-based inference (XGBoost and FIL).

(_Benchmarks were run on a DGX1-Volta system with 2x 20-core
Intel(R) Xeon(R) CPU E5-2698 v4 @ 2.20GHz CPUs and a single V100-32gb
GPU, using FIL 0.9.)
# Blogs and further references
* [RAPIDS Forest Inference Library: Prediction at 100 million rows per second](https://medium.com/rapids-ai/rapids-forest-inference-library-prediction-at-100-million-rows-per-second-19558890bc35)
* [Sparse Forests with FIL](https://medium.com/rapids-ai/sparse-forests-with-fil-ffbb42b0c7e3
)
* [GBM Inferencing on GPU, 2018 talk (earlier research work)](https://on-demand.gputechconf.com/gtc/2018/presentation/s8873-gbm-inferencing-on-gpu-v2.pdf)
* [Sample Notebook](https://github.com/rapidsai/cuml/blob/branch-0.16/notebooks/forest_inference_demo.ipynb)
* [GTC 2021 talk](https://www.nvidia.com/en-us/on-demand/session/gtcspring21-s31296/)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/fil/fil.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
pd = cpu_only_import('pandas')
from inspect import getdoc
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from libcpp cimport bool
from libc.stdint cimport uintptr_t
from libc.stdlib cimport free
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.internals.base import Base
from pylibraft.common.handle cimport handle_t
from cuml.common import input_to_cuml_array
from cuml.internals import logger
from cuml.internals.mixins import CMajorInputTagMixin
from cuml.common.doc_utils import _parameters_docstrings
from rmm._lib.memory_resource cimport DeviceMemoryResource
from rmm._lib.memory_resource cimport get_current_device_resource
import treelite.sklearn as tl_skl
cimport cuml.common.cuda
cdef extern from "treelite/c_api.h":
ctypedef void* ModelHandle
cdef int TreeliteLoadXGBoostModelEx(const char* filename,
const char* config_json,
ModelHandle* out) except +
cdef int TreeliteLoadXGBoostJSONEx(const char* filename,
const char* config_json,
ModelHandle* out) except +
cdef int TreeliteFreeModel(ModelHandle handle) except +
cdef int TreeliteQueryNumTree(ModelHandle handle, size_t* out) except +
cdef int TreeliteQueryNumFeature(ModelHandle handle, size_t* out) except +
cdef int TreeliteQueryNumClass(ModelHandle handle, size_t* out) except +
cdef int TreeliteLoadLightGBMModelEx(const char* filename,
const char* config_json,
ModelHandle* out) except +
cdef int TreeliteSerializeModel(const char* filename,
ModelHandle handle) except +
cdef int TreeliteDeserializeModel(const char* filename,
ModelHandle handle) except +
cdef const char* TreeliteGetLastError()
cdef class TreeliteModel():
"""
Wrapper for Treelite-loaded forest
.. note:: This is only used for loading saved models into ForestInference,
it does not actually perform inference. Users typically do
not need to access TreeliteModel instances directly.
Attributes
----------
handle : ModelHandle
Opaque pointer to Treelite model
"""
cdef ModelHandle handle
cdef bool owns_handle
def __cinit__(self, owns_handle=True):
"""If owns_handle is True, free the handle's model in destructor.
Set this to False if another owner will free the model."""
self.handle = <ModelHandle>NULL
self.owns_handle = owns_handle
cdef set_handle(self, ModelHandle new_handle):
self.handle = new_handle
cdef ModelHandle get_handle(self):
return self.handle
@property
def handle(self):
return <uintptr_t>(self.handle)
def __dealloc__(self):
if self.handle != NULL and self.owns_handle:
TreeliteFreeModel(self.handle)
@property
def num_trees(self):
assert self.handle != NULL
cdef size_t out
TreeliteQueryNumTree(self.handle, &out)
return out
@property
def num_features(self):
assert self.handle != NULL
cdef size_t out
TreeliteQueryNumFeature(self.handle, &out)
return out
@classmethod
def free_treelite_model(cls, model_handle):
cdef uintptr_t model_ptr = <uintptr_t>model_handle
TreeliteFreeModel(<ModelHandle> model_ptr)
@classmethod
def from_filename(cls, filename, model_type="xgboost"):
"""
Returns a TreeliteModel object loaded from `filename`
Parameters
----------
filename : string
Path to treelite model file to load
model_type : string
Type of model: 'xgboost', 'xgboost_json', or 'lightgbm'
"""
filename_bytes = filename.encode("UTF-8")
config_bytes = "{}".encode("UTF-8")
cdef ModelHandle handle
if model_type == "xgboost":
res = TreeliteLoadXGBoostModelEx(filename_bytes, config_bytes, &handle)
if res < 0:
err = TreeliteGetLastError()
raise RuntimeError("Failed to load %s (%s)" % (filename, err))
elif model_type == "xgboost_json":
res = TreeliteLoadXGBoostJSONEx(filename_bytes, config_bytes, &handle)
if res < 0:
err = TreeliteGetLastError()
raise RuntimeError("Failed to load %s (%s)" % (filename, err))
elif model_type == "lightgbm":
logger.warn("Treelite currently does not support float64 model"
" parameters. Accuracy may degrade slightly relative"
" to native LightGBM invocation.")
res = TreeliteLoadLightGBMModelEx(filename_bytes, config_bytes, &handle)
if res < 0:
err = TreeliteGetLastError()
raise RuntimeError("Failed to load %s (%s)" % (filename, err))
else:
raise ValueError("Unknown model type %s" % model_type)
model = TreeliteModel()
model.set_handle(handle)
return model
def to_treelite_checkpoint(self, filename):
"""
Serialize to a Treelite binary checkpoint
Parameters
----------
filename : string
Path to Treelite binary checkpoint
"""
assert self.handle != NULL
filename_bytes = filename.encode("UTF-8")
TreeliteSerializeModel(filename_bytes, self.handle)
@classmethod
def from_treelite_model_handle(cls,
treelite_handle,
take_handle_ownership=False):
cdef ModelHandle handle = <ModelHandle> <size_t> treelite_handle
model = TreeliteModel(owns_handle=take_handle_ownership)
model.set_handle(handle)
return model
cdef extern from "variant" namespace "std":
cdef cppclass variant[T1, T2]:
variant()
variant(T1)
size_t index()
cdef T& get[T, T1, T2](variant[T1, T2]& v)
cdef extern from "cuml/fil/fil.h" namespace "ML::fil":
cdef enum algo_t:
ALGO_AUTO,
NAIVE,
TREE_REORG,
BATCH_TREE_REORG
cdef enum storage_type_t:
AUTO,
DENSE,
SPARSE,
SPARSE8
cdef enum precision_t:
PRECISION_NATIVE,
PRECISION_FLOAT32,
PRECISION_FLOAT64
cdef cppclass forest[real_t]:
pass
ctypedef forest[float]* forest32_t
ctypedef forest[double]* forest64_t
ctypedef variant[forest32_t, forest64_t] forest_variant
# TODO(canonizer): use something like
# ctypedef forest[real_t]* forest_t[real_t]
# once it is supported in Cython
cdef struct treelite_params_t:
algo_t algo
bool output_class
float threshold
# changing the parameters below may speed up inference
# tree storage format, tradeoffs in big O(), node size
# not all formats fit all models
storage_type_t storage_type
# limit number of CUDA blocks launched per GPU SM (or unlimited if 0)
int blocks_per_sm
# multiple (neighboring) threads infer on the same tree within a block
# this improves memory bandwidth near tree root (but uses more shared
# memory)
int threads_per_tree
# n_items is how many input samples (items) any thread processes.
# if 0 is given, FIL chooses itself
int n_items
# this affects inference performance and will become configurable soon
char** pforest_shape_str
precision_t precision
cdef void free[real_t](handle_t& handle,
forest[real_t]*)
cdef void predict[real_t](handle_t& handle,
forest[real_t]*,
real_t*,
real_t*,
size_t,
bool) except +
cdef void from_treelite(handle_t& handle,
forest_variant*,
ModelHandle,
treelite_params_t*) except +
cdef class ForestInference_impl():
cdef object handle
cdef forest_variant forest_data
cdef size_t num_class
cdef bool output_class
cdef char* shape_str
cdef DeviceMemoryResource mr
cdef forest32_t get_forest32(self):
return get[forest32_t, forest32_t, forest64_t](self.forest_data)
cdef forest64_t get_forest64(self):
return get[forest64_t, forest32_t, forest64_t](self.forest_data)
def __cinit__(self,
handle=None):
self.handle = handle
self.forest_data = forest_variant(<forest32_t> NULL)
self.shape_str = NULL
self.mr = get_current_device_resource()
def get_shape_str(self):
if self.shape_str:
return unicode(self.shape_str, 'utf-8')
return None
def get_dtype(self):
dtype_array = [np.float32, np.float64]
return dtype_array[self.forest_data.index()]
def get_algo(self, algo_str):
algo_dict={'AUTO': algo_t.ALGO_AUTO,
'auto': algo_t.ALGO_AUTO,
'NAIVE': algo_t.NAIVE,
'naive': algo_t.NAIVE,
'BATCH_TREE_REORG': algo_t.BATCH_TREE_REORG,
'batch_tree_reorg': algo_t.BATCH_TREE_REORG,
'TREE_REORG': algo_t.TREE_REORG,
'tree_reorg': algo_t.TREE_REORG}
if algo_str not in algo_dict.keys():
raise Exception(' Wrong algorithm selected please refer'
' to the documentation')
return algo_dict[algo_str]
def get_storage_type(self, storage_type):
storage_type_str = str(storage_type)
storage_type_dict={'auto': storage_type_t.AUTO,
'False': storage_type_t.DENSE,
'dense': storage_type_t.DENSE,
'True': storage_type_t.SPARSE,
'sparse': storage_type_t.SPARSE,
'sparse8': storage_type_t.SPARSE8}
if storage_type_str not in storage_type_dict.keys():
raise ValueError(
"The value entered for storage_type is not "
"supported. Please refer to the documentation at"
"(https://docs.rapids.ai/api/cuml/nightly/api.html#"
"forest-inferencing) to see the accepted values.")
if storage_type_str == 'sparse8':
logger.info('storage_type=="sparse8" is an experimental feature')
return storage_type_dict[storage_type_str]
def get_precision(self, precision):
precision_dict = {'native': precision_t.PRECISION_NATIVE,
'float32': precision_t.PRECISION_FLOAT32,
'float64': precision_t.PRECISION_FLOAT64}
if precision not in precision_dict:
raise ValueError(
"The value entered for precision is not "
"supported. Please refer to the documentation at"
"(https://docs.rapids.ai/api/cuml/nightly/api.html#"
"forest-inferencing) to see the accepted values.")
return precision_dict[precision]
def predict(self, X,
output_dtype=None,
predict_proba=False,
preds=None,
safe_dtype_conversion=False):
"""
Returns the results of forest inference on the examples in X
Parameters
----------
X : float32 array-like (device or host) shape = (n_samples, n_features)
For optimal performance, pass a device array with C-style layout.
For categorical features: category < 0.0 or category > 16'777'214
is equivalent to out-of-dictionary category (not matching).
-0.0 represents category 0.
If float(int(category)) != category, we will discard the
fractional part. E.g. 3.8 represents category 3 regardless of
max_matching value. FIL will reject a model where an integer
within [0, max_matching + 1] cannot be represented precisely
as a float32.
NANs work the same between numerical and categorical inputs:
they are missing values and follow Treelite's DefaultLeft.
preds : float32 device array, shape = n_samples
predict_proba : bool, whether to output class probabilities(vs classes)
Supported only for binary classification. output format
matches sklearn
Returns
-------
Predicted results of type as defined by the output_type variable
"""
# Set the output_dtype. None is fine here
cuml.internals.set_api_output_dtype(output_dtype)
if (not self.output_class) and predict_proba:
raise NotImplementedError("Predict_proba function is not available"
" for Regression models. If you are "
" using a Classification model, please "
" set `output_class=True` while creating"
" the FIL model.")
fil_dtype = self.get_dtype()
cdef uintptr_t X_ptr
X_m, n_rows, _n_cols, _dtype = \
input_to_cuml_array(X, order='C',
convert_to_dtype=fil_dtype,
safe_dtype_conversion=safe_dtype_conversion,
check_dtype=fil_dtype)
X_ptr = X_m.ptr
cdef handle_t* handle_ =\
<handle_t*><size_t>self.handle.getHandle()
if preds is None:
shape = (n_rows, )
if predict_proba:
if self.num_class <= 2:
shape += (2,)
else:
shape += (self.num_class,)
preds = CumlArray.empty(shape=shape, dtype=fil_dtype, order='C',
index=X_m.index)
else:
if not hasattr(preds, "__cuda_array_interface__"):
raise ValueError("Invalid type for output preds,"
" need GPU array")
preds.index = X_m.index
cdef uintptr_t preds_ptr
preds_ptr = preds.ptr
if fil_dtype == np.float32:
predict(handle_[0],
self.get_forest32(),
<float*> preds_ptr,
<float*> X_ptr,
<size_t> n_rows,
<bool> predict_proba)
elif fil_dtype == np.float64:
predict(handle_[0],
self.get_forest64(),
<double*> preds_ptr,
<double*> X_ptr,
<size_t> n_rows,
<bool> predict_proba)
else:
# should not reach here
assert False, 'invalid fil_dtype, must be np.float32 or np.float64'
self.handle.sync()
# special case due to predict and predict_proba
# both coming from the same CUDA/C++ function
if predict_proba:
cuml.internals.set_api_output_dtype(None)
return preds
def load_from_treelite_model_handle(self, **kwargs):
self.forest_data = forest_variant(<forest32_t> NULL)
return self.load_using_treelite_handle(**kwargs)
def load_from_treelite_model(self, **kwargs):
cdef TreeliteModel model = kwargs['model']
return self.load_from_treelite_model_handle(
model_handle=<uintptr_t>model.handle, **kwargs)
def load_using_treelite_handle(self, **kwargs):
cdef treelite_params_t treelite_params
self.output_class = kwargs['output_class']
treelite_params.output_class = self.output_class
treelite_params.threshold = kwargs['threshold']
treelite_params.algo = self.get_algo(kwargs['algo'])
treelite_params.storage_type =\
self.get_storage_type(kwargs['storage_type'])
treelite_params.blocks_per_sm = kwargs['blocks_per_sm']
treelite_params.n_items = kwargs['n_items']
treelite_params.threads_per_tree = kwargs['threads_per_tree']
if kwargs['compute_shape_str']:
if self.shape_str:
free(self.shape_str)
treelite_params.pforest_shape_str = &self.shape_str
else:
treelite_params.pforest_shape_str = NULL
treelite_params.precision = self.get_precision(kwargs['precision'])
cdef handle_t* handle_ =\
<handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t model_ptr = <uintptr_t>kwargs['model_handle']
from_treelite(handle_[0],
&self.forest_data,
<ModelHandle> model_ptr,
&treelite_params)
TreeliteQueryNumClass(<ModelHandle> model_ptr,
&self.num_class)
return self
def __dealloc__(self):
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
fil_dtype = self.get_dtype()
if fil_dtype == np.float32:
if self.get_forest32() != NULL:
free[float](handle_[0], self.get_forest32())
elif fil_dtype == np.float64:
if self.get_forest64() != NULL:
free[double](handle_[0], self.get_forest64())
else:
# should not reach here
assert False, 'invalid fil_dtype, must be np.float32 or np.float64'
class ForestInference(Base,
CMajorInputTagMixin):
"""
ForestInference provides GPU-accelerated inference (prediction)
for random forest and boosted decision tree models.
This module does not support training models. Rather, users should
train a model in another package and save it in a
treelite-compatible format. (See https://github.com/dmlc/treelite)
Currently, LightGBM, XGBoost and SKLearn GBDT and random forest models
are supported.
Users typically create a ForestInference object by loading a saved model
file with ForestInference.load. It is also possible to create it from an
SKLearn model using ForestInference.load_from_sklearn. The resulting object
provides a `predict` method for carrying out inference.
**Known limitations**:
* A single row of data should fit into the shared memory of a thread
block, otherwise (starting from 5000-12288 features) FIL might infer
slower
* From sklearn.ensemble, only
`{RandomForest,GradientBoosting,ExtraTrees}{Classifier,Regressor}`
models are supported. Other sklearn.ensemble models are currently not
supported.
* Importing large SKLearn models can be slow, as it is done in Python.
* LightGBM categorical features are not supported.
* Inference uses a dense matrix format, which is efficient for many
problems but can be suboptimal for sparse datasets.
* Only classification and regression are supported.
* Many other random forest implementations including LightGBM, and SKLearn
GBDTs make use of 64-bit floating point parameters, but the underlying
library for ForestInference uses only 32-bit parameters. Because of the
truncation that will occur when loading such models into
ForestInference, you may observe a slight degradation in accuracy.
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Examples
--------
In the example below, synthetic data is copied to the host before
inference. ForestInference can also accept a numpy array directly at the
cost of a slight performance overhead.
.. code-block:: python
>>> # Assume that the file 'xgb.model' contains a classifier model
>>> # that was previously saved by XGBoost's save_model function.
>>> import sklearn, sklearn.datasets
>>> import numpy as np
>>> from numba import cuda
>>> from cuml import ForestInference
>>> model_path = 'xgb.model'
>>> X_test, y_test = sklearn.datasets.make_classification()
>>> X_gpu = cuda.to_device(
... np.ascontiguousarray(X_test.astype(np.float32)))
>>> fm = ForestInference.load(
... model_path, output_class=True) # doctest: +SKIP
>>> fil_preds_gpu = fm.predict(X_gpu) # doctest: +SKIP
>>> accuracy_score = sklearn.metrics.accuracy_score(y_test,
... np.asarray(fil_preds_gpu)) # doctest: +SKIP
Notes
-----
For additional usage examples, see the sample notebook at
https://github.com/rapidsai/cuml/blob/main/notebooks/forest_inference_demo.ipynb
"""
def common_load_params_docstring(func):
func.__doc__ = getdoc(func).format("""
output_class: boolean (default=False)
For a Classification model `output_class` must be True.
For a Regression model `output_class` must be False.
algo : string (default='auto')
Name of the algo from (from algo_t enum):
- ``'AUTO'`` or ``'auto'``: Choose the algorithm automatically.
Currently 'BATCH_TREE_REORG' is used for dense storage,
and 'NAIVE' for sparse storage
- ``'NAIVE'`` or ``'naive'``: Simple inference using shared memory
- ``'TREE_REORG'`` or ``'tree_reorg'``: Similar to naive but trees
rearranged to be more coalescing-friendly
- ``'BATCH_TREE_REORG'`` or ``'batch_tree_reorg'``: Similar to
TREE_REORG but predicting multiple rows per thread block
threshold : float (default=0.5)
Threshold is used to for classification. It is applied
only if ``output_class == True``, else it is ignored.
storage_type : string or boolean (default='auto')
In-memory storage format to be used for the FIL model:
- ``'auto'``: Choose the storage type automatically
(currently DENSE is always used)
- ``False``: Create a dense forest
- ``True``: Create a sparse forest. Requires algo='NAIVE' or
algo='AUTO'
blocks_per_sm : integer (default=0)
(experimental) Indicates how the number of thread blocks to launch
for the inference kernel is determined.
- ``0`` (default): Launches the number of blocks proportional to
the number of data rows
- ``>= 1``: Attempts to launch blocks_per_sm blocks per SM. This
will fail if blocks_per_sm blocks result in more threads than the
maximum supported number of threads per GPU. Even if successful,
it is not guaranteed that blocks_per_sm blocks will run on an SM
concurrently.
compute_shape_str : boolean (default=False)
if True or equivalent, creates a ForestInference.shape_str
(writes a human-readable forest shape description as a
multiline ascii string)
precision : string (default='native')
precision of weights and thresholds of the FIL model loaded from
the treelite model.
- ``'native'``: load in float64 if the treelite model contains float64
weights or thresholds, otherwise load in float32
- ``'float32'``: always load in float32, may lead to loss of precision
if the treelite model contains float64 weights or thresholds
- ``'float64'``: always load in float64
""")
return func
def common_predict_params_docstring(func):
func.__doc__ = getdoc(func).format(
_parameters_docstrings['dense'].format(
name='X', shape='(n_samples, n_features)') +
'\n For optimal performance, pass a float device array '
'with C-style layout')
return func
def __init__(self, *,
handle=None,
output_type=None,
verbose=False):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self._impl = ForestInference_impl(self.handle)
@common_predict_params_docstring
def predict(self, X, preds=None,
safe_dtype_conversion=False) -> CumlArray:
"""
Predicts the labels for X with the loaded forest model.
By default, the result is the raw floating point output
from the model, unless `output_class` was set to True
during model loading.
See the documentation of `ForestInference.load` for details.
Parameters
----------
preds : gpuarray or cudf.Series, shape = (n_samples,)
Optional 'out' location to store inference results
safe_dtype_conversion : bool (default = False)
FIL converts data to np.float32 when needed. Set this parameter to
True to enable checking for information loss during that
conversion, but note that this check can have a significant
performance penalty. Parameter will be dropped in a future
version.
Returns
-------
GPU array of length n_samples with inference results
(or 'preds' filled with inference results if preds was specified)
"""
return self._impl.predict(X, predict_proba=False, preds=None,
safe_dtype_conversion=safe_dtype_conversion)
@common_predict_params_docstring
def predict_proba(self, X, preds=None,
safe_dtype_conversion=False) -> CumlArray:
"""
Predicts the class probabilities for X with the loaded forest model.
The result is the raw floating point output
from the model.
Parameters
----------
preds : gpuarray or cudf.Series, shape = (n_samples,2)
Binary probability output
Optional 'out' location to store inference results
safe_dtype_conversion : bool (default = False)
FIL converts data to np.float32 when needed. Set this parameter to
True to enable checking for information loss during that
conversion, but note that this check can have a significant
performance penalty. Parameter will be dropped in a future
version.
Returns
-------
GPU array of shape (n_samples,2) with inference results
(or 'preds' filled with inference results if preds was specified)
"""
return self._impl.predict(X, predict_proba=True, preds=None,
safe_dtype_conversion=safe_dtype_conversion)
@common_load_params_docstring
def load_from_treelite_model(self, model, output_class=False,
algo='auto',
threshold=0.5,
storage_type='auto',
blocks_per_sm=0,
threads_per_tree=1,
n_items=0,
compute_shape_str=False,
precision='native'):
"""Creates a FIL model using the treelite model
passed to the function.
Parameters
----------
model
the trained model information in the treelite format
loaded from a saved model using the treelite API
https://treelite.readthedocs.io/en/latest/treelite-api.html
{}
Returns
-------
fil_model
A Forest Inference model which can be used to perform
inferencing on the random forest/ XGBoost model.
"""
if isinstance(model, TreeliteModel):
# TreeliteModel defined in this file
self._impl.load_from_treelite_model(**locals())
else:
# assume it is treelite.Model
self._impl.load_from_treelite_model_handle(
model_handle=model.handle.value, **locals())
self.shape_str = self._impl.get_shape_str()
return self
@classmethod
def load_from_sklearn(cls, skl_model,
output_class=False,
threshold=0.50,
algo='auto',
storage_type='auto',
blocks_per_sm=0,
threads_per_tree=1,
n_items=0,
compute_shape_str=False,
precision='native',
handle=None):
"""
Creates a FIL model using the scikit-learn model passed to the
function. This function requires Treelite 1.0.0+ to be installed.
Parameters
----------
skl_model
The scikit-learn model from which to build the FIL version.
output_class: boolean (default=False)
For a Classification model `output_class` must be True.
For a Regression model `output_class` must be False.
algo : string (default='auto')
Name of the algo from (from algo_t enum):
- ``'AUTO'`` or ``'auto'``: Choose the algorithm automatically.
Currently 'BATCH_TREE_REORG' is used for dense storage,
and 'NAIVE' for sparse storage
- ``'NAIVE'`` or ``'naive'``: Simple inference using shared memory
- ``'TREE_REORG'`` or ``'tree_reorg'``: Similar to naive but trees
rearranged to be more coalescing-friendly
- ``'BATCH_TREE_REORG'`` or ``'batch_tree_reorg'``: Similar to
TREE_REORG but predicting multiple rows per thread block
threshold : float (default=0.5)
Threshold is used to for classification. It is applied
only if ``output_class == True``, else it is ignored.
storage_type : string or boolean (default='auto')
In-memory storage format to be used for the FIL model:
- ``'auto'``: Choose the storage type automatically
(currently DENSE is always used)
- ``False``: Create a dense forest
- ``True``: Create a sparse forest. Requires algo='NAIVE' or
algo='AUTO'
blocks_per_sm : integer (default=0)
(experimental) Indicates how the number of thread blocks to launch
for the inference kernel is determined.
- ``0`` (default): Launches the number of blocks proportional to
the number of data rows
- ``>= 1``: Attempts to launch blocks_per_sm blocks per SM. This
will fail if blocks_per_sm blocks result in more threads than the
maximum supported number of threads per GPU. Even if successful,
it is not guaranteed that blocks_per_sm blocks will run on an SM
concurrently.
compute_shape_str : boolean (default=False)
if True or equivalent, creates a ForestInference.shape_str
(writes a human-readable forest shape description as a
multiline ascii string)
precision : string (default='native')
precision of weights and thresholds of the FIL model loaded from
the treelite model.
- ``'native'``: load in float64 if the treelite model contains
float64 weights or thresholds, otherwise load in float32
- ``'float32'``: always load in float32, may lead to loss of
precision if the treelite model contains float64 weights or
thresholds
- ``'float64'``: always load in float64
Returns
-------
fil_model
A Forest Inference model created from the scikit-learn
model passed.
"""
cuml_fm = ForestInference(handle=handle)
logger.warn("Treelite currently does not support float64 model"
" parameters. Accuracy may degrade slightly relative to"
" native sklearn invocation.")
tl_model = tl_skl.import_model(skl_model)
cuml_fm.load_from_treelite_model(
model=tl_model,
output_class=output_class,
threshold=threshold,
algo=algo,
storage_type=storage_type,
blocks_per_sm=blocks_per_sm,
threads_per_tree=threads_per_tree,
n_items=n_items,
compute_shape_str=compute_shape_str,
precision=precision
)
return cuml_fm
@classmethod
def load(cls,
filename,
output_class=False,
threshold=0.50,
algo='auto',
storage_type='auto',
blocks_per_sm=0,
threads_per_tree=1,
n_items=0,
compute_shape_str=False,
precision='native',
model_type="xgboost",
handle=None):
"""
Returns a FIL instance containing the forest saved in `filename`
This uses Treelite to load the saved model.
Parameters
----------
filename : string
Path to saved model file in a treelite-compatible format
(See https://treelite.readthedocs.io/en/latest/treelite-api.html
for more information)
output_class: boolean (default=False)
For a Classification model `output_class` must be True.
For a Regression model `output_class` must be False.
algo : string (default='auto')
Name of the algo from (from algo_t enum):
- ``'AUTO'`` or ``'auto'``: Choose the algorithm automatically.
Currently 'BATCH_TREE_REORG' is used for dense storage,
and 'NAIVE' for sparse storage
- ``'NAIVE'`` or ``'naive'``: Simple inference using shared memory
- ``'TREE_REORG'`` or ``'tree_reorg'``: Similar to naive but trees
rearranged to be more coalescing-friendly
- ``'BATCH_TREE_REORG'`` or ``'batch_tree_reorg'``: Similar to
TREE_REORG but predicting multiple rows per thread block
threshold : float (default=0.5)
Threshold is used to for classification. It is applied
only if ``output_class == True``, else it is ignored.
storage_type : string or boolean (default='auto')
In-memory storage format to be used for the FIL model:
- ``'auto'``: Choose the storage type automatically
(currently DENSE is always used)
- ``False``: Create a dense forest
- ``True``: Create a sparse forest. Requires algo='NAIVE' or
algo='AUTO'
blocks_per_sm : integer (default=0)
(experimental) Indicates how the number of thread blocks to launch
for the inference kernel is determined.
- ``0`` (default): Launches the number of blocks proportional to
the number of data rows
- ``>= 1``: Attempts to launch blocks_per_sm blocks per SM. This
will fail if blocks_per_sm blocks result in more threads than the
maximum supported number of threads per GPU. Even if successful,
it is not guaranteed that blocks_per_sm blocks will run on an SM
concurrently.
compute_shape_str : boolean (default=False)
if True or equivalent, creates a ForestInference.shape_str
(writes a human-readable forest shape description as a
multiline ascii string)
precision : string (default='native')
precision of weights and thresholds of the FIL model loaded from
the treelite model.
- ``'native'``: load in float64 if the treelite model contains
float64 weights or thresholds, otherwise load in float32
- ``'float32'``: always load in float32, may lead to loss of
precision if the treelite model contains float64 weights or
thresholds
- ``'float64'``: always load in float64
model_type : string (default="xgboost")
Format of the saved treelite model to be load.
It can be 'xgboost', 'xgboost_json', 'lightgbm'.
Returns
-------
fil_model
A Forest Inference model which can be used to perform
inferencing on the model read from the file.
"""
cuml_fm = ForestInference(handle=handle)
tl_model = TreeliteModel.from_filename(filename, model_type=model_type)
cuml_fm.load_from_treelite_model(
model=tl_model,
output_class=output_class,
threshold=threshold,
algo=algo,
storage_type=storage_type,
blocks_per_sm=blocks_per_sm,
threads_per_tree=threads_per_tree,
n_items=n_items,
compute_shape_str=compute_shape_str,
precision=precision
)
return cuml_fm
@common_load_params_docstring
def load_using_treelite_handle(self,
model_handle,
output_class=False,
algo='auto',
storage_type='auto',
threshold=0.50,
blocks_per_sm=0,
threads_per_tree=1,
n_items=0,
compute_shape_str=False,
precision='native'
):
"""
Returns a FIL instance by converting a treelite model to
FIL model by using the treelite ModelHandle passed.
Parameters
----------
model_handle : Modelhandle to the treelite forest model
(See https://treelite.readthedocs.io/en/latest/treelite-api.html
for more information)
{}
Returns
-------
fil_model
A Forest Inference model which can be used to perform
inferencing on the random forest model.
"""
self._impl.load_using_treelite_handle(**locals())
self.shape_str = self._impl.get_shape_str()
# DO NOT RETURN self._impl here!!
return self
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/fil/__init__.py | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.fil.fil import ForestInference
from cuml.fil import fil
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/distance_type.pxd | #
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cdef extern from "raft/distance/distance_types.hpp" namespace "raft::distance":
ctypedef enum DistanceType:
L2Expanded "raft::distance::DistanceType::L2Expanded"
L2SqrtExpanded "raft::distance::DistanceType::L2SqrtExpanded"
CosineExpanded "raft::distance::DistanceType::CosineExpanded"
L1 "raft::distance::DistanceType::L1"
L2Unexpanded "raft::distance::DistanceType::L2Unexpanded"
L2SqrtUnexpanded "raft::distance::DistanceType::L2SqrtUnexpanded"
InnerProduct "raft::distance::DistanceType::InnerProduct"
Linf "raft::distance::DistanceType::Linf"
Canberra "raft::distance::DistanceType::Canberra"
LpUnexpanded "raft::distance::DistanceType::LpUnexpanded"
CorrelationExpanded "raft::distance::DistanceType::CorrelationExpanded"
JaccardExpanded "raft::distance::DistanceType::JaccardExpanded"
HellingerExpanded "raft::distance::DistanceType::HellingerExpanded"
Haversine "raft::distance::DistanceType::Haversine"
BrayCurtis "raft::distance::DistanceType::BrayCurtis"
JensenShannon "raft::distance::DistanceType::JensenShannon"
HammingUnexpanded "raft::distance::DistanceType::HammingUnexpanded"
KLDivergence "raft::distance::DistanceType::KLDivergence"
RusselRaoExpanded "raft::distance::DistanceType::RusselRaoExpanded"
DiceExpanded "raft::distance::DistanceType::DiceExpanded"
Precomputed "raft::distance::DistanceType::Precomputed"
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("accuracy.pyx" ${accuracy_algo} ${metrics_algo})
add_module_gpu_default("hinge_loss.pyx" ${hinge_loss_algo} ${metrics_algo})
add_module_gpu_default("kl_divergence.pyx" ${kl_divergence_algo} ${metrics_algo})
add_module_gpu_default("pairwise_distances.pyx" ${pairwise_distances_algo} ${metrics_algo})
add_module_gpu_default("regression.pyx" ${regression_algo} ${metrics_algo})
add_module_gpu_default("trustworthiness.pyx" ${trustworthiness_algo} ${metrics_algo})
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
MODULE_PREFIX metrics_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/kl_divergence.pyx | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
import cuml.internals
from cuml.common import input_to_cuml_array
from libc.stdint cimport uintptr_t
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
double c_kl_divergence "ML::Metrics::kl_divergence"(
const handle_t &handle,
const double *y,
const double *y_hat,
int n) except +
float c_kl_divergence "ML::Metrics::kl_divergence"(
const handle_t &handle,
const float *y,
const float *y_hat,
int n) except +
@cuml.internals.api_return_any()
def kl_divergence(P, Q, handle=None, convert_dtype=True):
"""
Calculates the "Kullback-Leibler" Divergence
The KL divergence tells us how well the probability distribution Q
approximates the probability distribution P
It is often also used as a 'distance metric' between two probability
distributions (not symmetric)
Parameters
----------
P : Dense array of probabilities corresponding to distribution P
shape = (n_samples, 1)
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy.
Q : Dense array of probabilities corresponding to distribution Q
shape = (n_samples, 1)
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy.
handle : cuml.Handle
convert_dtype : bool, optional (default = True)
When set to True, the method will, convert P and
Q to be the same data type: float32. This
will increase memory used for the method.
Returns
-------
float
The KL Divergence value
"""
handle = Handle() if handle is None else handle
cdef handle_t *handle_ = <handle_t*> <size_t> handle.getHandle()
P_m, n_features_p, _, dtype_p = \
input_to_cuml_array(P, check_cols=1,
convert_to_dtype=(np.float32 if convert_dtype
else None),
check_dtype=[np.float32, np.float64])
Q_m, n_features_q, _, _ = \
input_to_cuml_array(Q, check_cols=1,
convert_to_dtype=(dtype_p if convert_dtype
else None),
check_dtype=[dtype_p])
if n_features_p != n_features_q:
raise ValueError("Incompatible dimension for P and Q arrays: \
P.shape == ({}) while Q.shape == ({})"
.format(n_features_p, n_features_q))
cdef uintptr_t d_P_ptr = P_m.ptr
cdef uintptr_t d_Q_ptr = Q_m.ptr
if (dtype_p == np.float32):
res = c_kl_divergence(handle_[0],
<float*> d_P_ptr,
<float*> d_Q_ptr,
<int> n_features_p)
else:
res = c_kl_divergence(handle_[0],
<double*> d_P_ptr,
<double*> d_Q_ptr,
<int> n_features_p)
return res
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/accuracy.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from libc.stdint cimport uintptr_t
import cuml.internals
from cuml.internals.input_utils import input_to_cuml_array
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
cimport cuml.common.cuda
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
float accuracy_score_py(handle_t &handle,
int *predictions,
int *ref_predictions,
int n) except +
@cuml.internals.api_return_any()
def accuracy_score(ground_truth, predictions, handle=None, convert_dtype=True):
"""
Calculates the accuracy score of a classification model.
Parameters
----------
handle : cuml.Handle
prediction : NumPy ndarray or Numba device
The labels predicted by the model for the test dataset
ground_truth : NumPy ndarray, Numba device
The ground truth labels of the test dataset
Returns
-------
float
The accuracy of the model used for prediction
"""
handle = Handle() \
if handle is None else handle
cdef handle_t* handle_ =\
<handle_t*><size_t>handle.getHandle()
cdef uintptr_t preds_ptr, ground_truth_ptr
preds_m, n_rows, _, _ = \
input_to_cuml_array(predictions,
convert_to_dtype=np.int32
if convert_dtype else None)
preds_ptr = preds_m.ptr
ground_truth_m, _, _, _ =\
input_to_cuml_array(ground_truth,
convert_to_dtype=np.int32
if convert_dtype else None)
ground_truth_ptr = ground_truth_m.ptr
acc = accuracy_score_py(handle_[0],
<int*> preds_ptr,
<int*> ground_truth_ptr,
<int> n_rows)
return acc
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/trustworthiness.pyx | #
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
from libc.stdint cimport uintptr_t
import cuml.internals
from cuml.internals.input_utils import input_to_cuml_array
from pylibraft.common.handle import Handle
from pylibraft.common.handle cimport handle_t
cdef extern from "raft/distance/distance_types.hpp" namespace "raft::distance":
ctypedef int DistanceType
ctypedef DistanceType euclidean "(raft::distance::DistanceType)5"
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
cdef double trustworthiness_score[T, DistanceType](const handle_t& h,
T* X,
T* X_embedded,
int n, int m,
int d,
int n_neighbors,
int batchSize) \
except +
def _get_array_ptr(obj):
"""
Get ctype pointer of a numba style device array
"""
return obj.device_ctypes_pointer.value
@cuml.internals.api_return_any()
def trustworthiness(X, X_embedded, handle=None, n_neighbors=5,
metric='euclidean',
convert_dtype=True, batch_size=512) -> float:
"""
Expresses to what extent the local structure is retained in embedding.
The score is defined in the range [0, 1].
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
X_embedded : array-like (device or host) shape= (n_samples, n_features)
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
n_neighbors : int, optional (default=5)
Number of neighbors considered
metric : str in ['euclidean'] (default='euclidean')
Metric used to compute the trustworthiness. For the moment only
'euclidean' is supported.
convert_dtype : bool, optional (default=False)
When set to True, the trustworthiness method will automatically
convert the inputs to np.float32.
batch_size : int (default=512)
The number of samples to use for each batch.
Returns
-------
trustworthiness score : double
Trustworthiness of the low-dimensional embedding
"""
if n_neighbors > X.shape[0]:
raise ValueError("n_neighbors must be <= the number of rows.")
if n_neighbors > X.shape[0]:
raise ValueError("n_neighbors must be <= the number of rows.")
handle = Handle() if handle is None else handle
cdef uintptr_t d_X_ptr
cdef uintptr_t d_X_embedded_ptr
X_m, n_samples, n_features, _ = \
input_to_cuml_array(X, order='C', check_dtype=np.float32,
convert_to_dtype=(np.float32 if convert_dtype
else None))
d_X_ptr = X_m.ptr
X_m2, _, n_components, _ = \
input_to_cuml_array(X_embedded, order='C',
check_dtype=np.float32,
convert_to_dtype=(np.float32 if convert_dtype
else None))
d_X_embedded_ptr = X_m2.ptr
handle = Handle() if handle is None else handle
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
if metric == 'euclidean':
ret = trustworthiness_score[float, euclidean](handle_[0],
<float*>d_X_ptr,
<float*>d_X_embedded_ptr,
n_samples,
n_features,
n_components,
n_neighbors,
batch_size)
handle.sync()
else:
raise Exception("Unknown metric")
return ret
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/hinge_loss.pyx | #
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
import cuml.internals
from cuml.internals.input_utils import determine_array_type
from cuml.preprocessing import LabelEncoder, LabelBinarizer
cp = gpu_only_import('cupy')
cudf = gpu_only_import('cudf')
@cuml.internals.api_return_any()
def hinge_loss(y_true,
pred_decision,
labels=None,
sample_weights=None) -> float:
"""
Calculates non-regularized hinge loss. Adapted from scikit-learn hinge loss
Parameters
----------
y_true: cuDF Series or cuPy array of shape (n_samples,)
True labels, consisting of labels for the classes.
In binary classification, the positive label must be
greater than negative class
pred_decision: cuDF DataFrame or cuPy array of shape (n_samples,) or \
(n_samples, n_classes)
Predicted decisions, as output by decision_function (floats)
labels: cuDF Series or cuPy array, default=None
In multiclass problems, this must include all class labels.
sample_weight: cupy array of shape (n_samples,), default=None
Sample weights to be used for computing the average
Returns
-------
loss : float
The average hinge loss.
"""
yt_type = determine_array_type(y_true)
pd_type = determine_array_type(pred_decision)
labels_type = determine_array_type(labels)
if yt_type not in ['cupy', 'numba', 'cudf']:
raise TypeError("y_true needs to be either a cuDF Series or \
a cuPy/numba array.")
if pd_type not in ['cupy', 'numba', 'cudf']:
raise TypeError("pred_decision needs to be either a cuDF DataFrame or \
a cuPy/numba array.")
if labels_type not in ['cupy', 'numba', 'cudf']:
raise TypeError("labels needs to be either a cuDF Series or \
a cuPy/numba array.")
if y_true.shape[0] != pred_decision.shape[0]:
raise ValueError("y_true and pred_decision must have the same"
" number of rows(found {} and {})".format(
y_true.shape[0],
pred_decision.shape[0]))
if sample_weights and sample_weights.shape[0] != y_true.shape[0]:
raise ValueError("y_true and sample_weights must have the same "
"number of rows (found {} and {})".format(
y_true.shape[0],
sample_weights.shape[0]))
if not isinstance(labels, cudf.Series):
labels = cudf.Series(labels)
if not isinstance(y_true, cudf.Series):
y_true = cudf.Series(y_true)
y_true_unique = cp.unique(labels if labels is not None else y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(cp.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder(output_type="cudf")
le.fit(labels)
y_true = le.transform(y_true)
if isinstance(pred_decision, cudf.DataFrame):
pred_decision = pred_decision.values
mask = cp.ones_like(pred_decision, dtype=bool)
mask[cp.arange(y_true.shape[0]), y_true.values] = False
margin = pred_decision[~mask]
margin -= cp.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
if isinstance(pred_decision, cudf.DataFrame):
pred_decision = pred_decision.values
pred_decision = cp.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1, output_type="cupy")
y_true = lbin.fit_transform(y_true)[:, 1]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
cp.clip(losses, 0, None, out=losses)
return cp.average(losses, weights=sample_weights)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/_classification.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.input_utils import input_to_cupy_array
import cuml.internals
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
@cuml.internals.api_return_any()
def log_loss(
y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None
) -> float:
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of a logistic model that returns ``y_pred`` probabilities
for its training data ``y_true``.
The log loss is only defined for two or more labels.
Parameters
----------
y_true : array-like, shape = (n_samples,)
y_pred : array-like of float,
shape = (n_samples, n_classes) or (n_samples,)
eps : float (default=1e-15)
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
Examples
--------
.. code-block:: python
>>> from cuml.metrics import log_loss
>>> import cupy as cp
>>> log_loss(cp.array([1, 0, 0, 1]),
... cp.array([[.1, .9], [.9, .1], [.8, .2], [.35, .65]]))
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_true, n_rows, n_cols, ytype = input_to_cupy_array(
y_true, check_dtype=[np.int32, np.int64, np.float32, np.float64]
)
if y_true.dtype.kind == "f" and np.any(y_true != y_true.astype(int)):
raise ValueError("'y_true' can only have integer values")
if y_true.min() < 0:
raise ValueError("'y_true' cannot have negative values")
y_pred, _, _, _ = input_to_cupy_array(
y_pred,
check_dtype=[np.int32, np.int64, np.float32, np.float64],
check_rows=n_rows,
)
y_true_max = y_true.max()
if (y_pred.ndim == 1 and y_true_max > 1) or (
y_pred.ndim > 1 and y_pred.shape[1] <= y_true_max
):
raise ValueError(
"The shape of y_pred doesn't " "match the number of classes"
)
y_true = y_true.astype("int32")
y_pred = cp.clip(y_pred, eps, 1 - eps)
if y_pred.ndim == 1:
y_pred = cp.expand_dims(y_pred, axis=1)
if y_pred.shape[1] == 1:
y_pred = cp.hstack([1 - y_pred, y_pred])
y_pred /= cp.sum(y_pred, axis=1, keepdims=True)
loss = -cp.log(y_pred)[cp.arange(y_pred.shape[0]), y_true]
return _weighted_sum(loss, sample_weight, normalize).item()
def _weighted_sum(sample_score, sample_weight, normalize):
if normalize:
return cp.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return cp.dot(sample_score, sample_weight)
else:
return sample_score.sum()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/regression.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from libc.stdint cimport uintptr_t
import cuml.internals
from pylibraft.common.handle import Handle
from pylibraft.common.handle cimport handle_t
from cuml.metrics cimport regression
from cuml.internals.input_utils import input_to_cuml_array
@cuml.internals.api_return_any()
def r2_score(y, y_hat, convert_dtype=True, handle=None) -> float:
"""
Calculates r2 score between y and y_hat
Parameters
----------
y : array-like (device or host) shape = (n_samples, 1)
Dense vector (floats or doubles) of shape (n_samples, 1).
Acceptable formats: cuDF Series, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
y_hat : array-like (device or host) shape = (n_samples, 1)
Dense vector (floats or doubles) of shape (n_samples, 1).
Acceptable formats: cuDF Series, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
convert_dtype : bool, optional (default = False)
When set to True, the fit method will, when necessary, convert
y_hat to be the same data type as y if they differ. This
will increase memory used for the method.
Returns
-------
trustworthiness score : double
Trustworthiness of the low-dimensional embedding
"""
handle = Handle() if handle is None else handle
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
y_m, n_rows, _, ytype = \
input_to_cuml_array(y, check_dtype=[np.float32, np.float64],
check_cols=1)
cdef uintptr_t y_ptr = y_m.ptr
y_m2, *_ = \
input_to_cuml_array(y_hat, check_dtype=ytype,
convert_to_dtype=(ytype if convert_dtype
else None),
check_rows=n_rows, check_cols=1)
cdef uintptr_t y_hat_ptr = y_m2.ptr
cdef float result_f32
cdef double result_f64
n = len(y)
if y_m.dtype == 'float32':
result_f32 = regression.r2_score_py(handle_[0],
<float*> y_ptr,
<float*> y_hat_ptr,
<int> n)
result = result_f32
else:
result_f64 = regression.r2_score_py(handle_[0],
<double*> y_ptr,
<double*> y_hat_ptr,
<int> n)
result = result_f64
del y_m
del y_m2
return result
def _prepare_input_reg(y_true, y_pred, sample_weight, multioutput):
"""
Helper function to avoid code duplication for regression metrics.
Converts inputs to CumlArray and check multioutput parameter validity.
"""
allowed_d_types = [np.float32, np.float64, np.int32, np.int64]
y_true = y_true.squeeze() if len(y_true.shape) > 1 else y_true
y_true, n_rows, n_cols, _ = \
input_to_cuml_array(y_true, check_dtype=allowed_d_types)
y_pred = y_pred.squeeze() if len(y_pred.shape) > 1 else y_pred
y_pred, _, _, _ = \
input_to_cuml_array(y_pred, check_dtype=allowed_d_types,
check_rows=n_rows, check_cols=n_cols)
if sample_weight is not None:
sample_weight, _, _, _ = \
input_to_cuml_array(sample_weight, check_dtype=allowed_d_types,
check_rows=n_rows, check_cols=n_cols)
raw_multioutput = False
allowed_multioutput_str = ('raw_values', 'uniform_average',
'variance_weighted')
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}"
.format(allowed_multioutput_str, multioutput))
elif multioutput == 'raw_values':
raw_multioutput = True
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
elif multioutput is not None:
multioutput, _, _, _ = \
input_to_cuml_array(multioutput, check_dtype=allowed_d_types)
if n_cols == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
return y_true, y_pred, sample_weight, multioutput, raw_multioutput
def _mse(y_true, y_pred, sample_weight, multioutput, squared, raw_multioutput):
"""Helper to compute the mean squared error"""
output_errors = cp.subtract(y_true, y_pred)
output_errors = cp.multiply(output_errors, output_errors)
output_errors = cp.average(output_errors, axis=0, weights=sample_weight)
if raw_multioutput:
return output_errors
mse = cp.average(output_errors, weights=multioutput)
return mse if squared else cp.sqrt(mse)
@cuml.internals.api_return_any()
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average',
squared=True):
"""Mean squared error regression loss
Be careful when using this metric with float32 inputs as the result can be
slightly incorrect because of floating point precision if the input is
large enough. float64 will have lower numerical error.
Parameters
----------
y_true : array-like (device or host) shape = (n_samples,)
or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like (device or host) shape = (n_samples,)
or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like (device or host) shape = (n_samples,), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average'] \
(default='uniform_average')
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
squared : boolean value, optional (default = True)
If True returns MSE value, if False returns RMSE value.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
"""
y_true, y_pred, sample_weight, multioutput, raw_multioutput = \
_prepare_input_reg(y_true, y_pred, sample_weight, multioutput)
return _mse(y_true, y_pred, sample_weight, multioutput, squared,
raw_multioutput)
@cuml.internals.api_return_any()
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Be careful when using this metric with float32 inputs as the result can be
slightly incorrect because of floating point precision if the input is
large enough. float64 will have lower numerical error.
Parameters
----------
y_true : array-like (device or host) shape = (n_samples,)
or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like (device or host) shape = (n_samples,)
or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like (device or host) shape = (n_samples,), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is ‘raw_values’, then mean absolute error is returned
for each output separately. If multioutput is ‘uniform_average’ or an
ndarray of weights, then the weighted average of all output errors is
returned.
MAE output is non-negative floating point. The best value is 0.0.
"""
y_true, y_pred, sample_weight, multioutput, raw_multioutput = \
_prepare_input_reg(y_true, y_pred, sample_weight, multioutput)
output_errors = cp.abs(cp.subtract(y_pred, y_true))
output_errors = cp.average(output_errors, axis=0, weights=sample_weight)
if raw_multioutput:
return output_errors
return cp.average(output_errors, weights=multioutput)
@cuml.internals.api_return_any()
def mean_squared_log_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average',
squared=True):
"""Mean squared log error regression loss
Be careful when using this metric with float32 inputs as the result can be
slightly incorrect because of floating point precision if the input is
large enough. float64 will have lower numerical error.
Parameters
----------
y_true : array-like (device or host) shape = (n_samples,)
or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like (device or host) shape = (n_samples,)
or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like (device or host) shape = (n_samples,), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
squared : boolean value, optional (default = True)
If True returns MSE value, if False returns RMSE value.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
"""
y_true, y_pred, sample_weight, multioutput, raw_multioutput = \
_prepare_input_reg(y_true, y_pred, sample_weight, multioutput)
if cp.less(y_true, 0).any() or cp.less(y_pred, 0).any():
raise ValueError("Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values.")
return _mse(cp.log1p(y_true), cp.log1p(y_pred), sample_weight, multioutput,
squared, raw_multioutput)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/regression.pxd | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
float r2_score_py(const handle_t& handle,
float *y,
float *y_hat,
int n) except +
double r2_score_py(const handle_t& handle,
double *y,
double *y_hat,
int n) except +
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/confusion_matrix.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.prims.label import make_monotonic
from cuml.metrics.utils import sorted_unique_labels
from cuml.internals.input_utils import input_to_cupy_array
from cuml.internals.array import CumlArray
from cuml.common import using_output_type
from cuml.common import input_to_cuml_array
import cuml.internals
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
@cuml.internals.api_return_any()
def confusion_matrix(
y_true,
y_pred,
labels=None,
sample_weight=None,
normalize=None,
convert_dtype=False,
) -> CumlArray:
"""Compute confusion matrix to evaluate the accuracy of a classification.
Parameters
----------
y_true : array-like (device or host) shape = (n_samples,)
or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like (device or host) shape = (n_samples,)
or (n_samples, n_outputs)
Estimated target values.
labels : array-like (device or host) shape = (n_classes,), optional
List of labels to index the matrix. This may be used to reorder or
select a subset of labels. If None is given, those that appear at least
once in y_true or y_pred are used in sorted order.
sample_weight : array-like (device or host) shape = (n_samples,), optional
Sample weights.
normalize : string in ['true', 'pred', 'all'] or None (default=None)
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix will not be
normalized.
convert_dtype : bool, optional (default=False)
When set to True, the confusion matrix method will automatically
convert the predictions, ground truth, and labels arrays to np.int32.
Returns
-------
C : array-like (device or host) shape = (n_classes, n_classes)
Confusion matrix.
"""
y_true, n_rows, n_cols, dtype = input_to_cuml_array(
y_true,
check_dtype=[cp.int32, cp.int64],
convert_to_dtype=(cp.int32 if convert_dtype else None),
)
y_pred, _, _, _ = input_to_cuml_array(
y_pred,
check_dtype=[cp.int32, cp.int64],
check_rows=n_rows,
check_cols=n_cols,
convert_to_dtype=(cp.int32 if convert_dtype else None),
)
if labels is None:
labels = sorted_unique_labels(y_true, y_pred)
n_labels = len(labels)
else:
labels, n_labels, _, _ = input_to_cupy_array(
labels,
check_dtype=[cp.int32, cp.int64],
convert_to_dtype=(cp.int32 if convert_dtype else None),
check_cols=1,
)
if sample_weight is None:
sample_weight = cp.ones(n_rows, dtype=dtype)
else:
sample_weight, _, _, _ = input_to_cupy_array(
sample_weight,
check_dtype=[cp.float32, cp.float64, cp.int32, cp.int64],
check_rows=n_rows,
check_cols=n_cols,
)
if normalize not in ["true", "pred", "all", None]:
msg = (
"normalize must be one of "
f"{{'true', 'pred', 'all', None}}, got {normalize}."
)
raise ValueError(msg)
with using_output_type("cupy"):
y_true, _ = make_monotonic(y_true, labels, copy=True)
y_pred, _ = make_monotonic(y_pred, labels, copy=True)
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = cp.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
sample_weight = sample_weight[ind]
cm = cupyx.scipy.sparse.coo_matrix(
(sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels),
dtype=np.float64,
).toarray()
# Choose the accumulator dtype to always have high precision
if sample_weight.dtype.kind in {"i", "u", "b"}:
cm = cm.astype(np.int64)
with np.errstate(all="ignore"):
if normalize == "true":
cm = cp.divide(cm, cm.sum(axis=1, keepdims=True))
elif normalize == "pred":
cm = cp.divide(cm, cm.sum(axis=0, keepdims=True))
elif normalize == "all":
cm = cp.divide(cm, cm.sum())
cm = cp.nan_to_num(cm)
return cm
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/pairwise_distances.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import warnings
from libcpp cimport bool
from libc.stdint cimport uintptr_t
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
pd = cpu_only_import('pandas')
cudf = gpu_only_import('cudf')
scipy = cpu_only_import('scipy')
cupyx = gpu_only_import('cupyx')
import cuml.internals
from cuml.common import (input_to_cuml_array, CumlArray)
from cuml.internals.input_utils import sparse_scipy_to_cp
from cuml.common.sparse_utils import is_sparse
from cuml.internals.array_sparse import SparseCumlArray
from cuml.metrics.distance_type cimport DistanceType
from cuml.thirdparty_adapters import _get_mask
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
void pairwise_distance(const handle_t &handle, const double *x,
const double *y, double *dist, int m, int n, int k,
DistanceType metric, bool isRowMajor,
double metric_arg) except +
void pairwise_distance(const handle_t &handle, const float *x,
const float *y, float *dist, int m, int n, int k,
DistanceType metric, bool isRowMajor,
float metric_arg) except +
void pairwiseDistance_sparse(const handle_t &handle, float *x, float *y,
float *dist, int x_nrows, int y_nrows,
int n_cols, int x_nnz, int y_nnz,
int* x_indptr, int* y_indptr,
int* x_indices, int* y_indices,
DistanceType metric,
float metric_arg) except +
void pairwiseDistance_sparse(const handle_t &handle, double *x, double *y,
double *dist, int x_nrows, int y_nrows,
int n_cols, int x_nnz, int y_nnz,
int* x_indptr, int* y_indptr,
int* x_indices, int* y_indices,
DistanceType metric,
float metric_arg) except +
# List of available distance metrics in `pairwise_distances`
PAIRWISE_DISTANCE_METRICS = {
"cityblock": DistanceType.L1,
"cosine": DistanceType.CosineExpanded,
"euclidean": DistanceType.L2SqrtUnexpanded,
"l1": DistanceType.L1,
"l2": DistanceType.L2SqrtUnexpanded,
"manhattan": DistanceType.L1,
"sqeuclidean": DistanceType.L2Expanded,
"canberra": DistanceType.Canberra,
"chebyshev": DistanceType.Linf,
"minkowski": DistanceType.LpUnexpanded,
"hellinger": DistanceType.HellingerExpanded,
"correlation": DistanceType.CorrelationExpanded,
"jensenshannon": DistanceType.JensenShannon,
"hamming": DistanceType.HammingUnexpanded,
"kldivergence": DistanceType.KLDivergence,
"russellrao": DistanceType.RusselRaoExpanded,
"nan_euclidean": DistanceType.L2Expanded
}
PAIRWISE_DISTANCE_SPARSE_METRICS = {
"cityblock": DistanceType.L1,
"cosine": DistanceType.CosineExpanded,
"euclidean": DistanceType.L2SqrtExpanded,
"l1": DistanceType.L1,
"l2": DistanceType.L2SqrtExpanded,
"manhattan": DistanceType.L1,
"sqeuclidean": DistanceType.L2Expanded,
"canberra": DistanceType.Canberra,
"inner_product": DistanceType.InnerProduct,
"minkowski": DistanceType.LpUnexpanded,
"jaccard": DistanceType.JaccardExpanded,
"hellinger": DistanceType.HellingerExpanded,
"chebyshev": DistanceType.Linf,
"dice": DistanceType.DiceExpanded
}
def _determine_metric(metric_str, is_sparse_=False):
# Available options in scikit-learn and their pairs. See
# sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS:
# 'cityblock': L1
# 'cosine': CosineExpanded
# 'euclidean': L2SqrtUnexpanded
# 'haversine': N/A
# 'l2': L2SqrtUnexpanded
# 'l1': L1
# 'manhattan': L1
# 'nan_euclidean': N/A
# 'sqeuclidean': L2Unexpanded
# Note: many are duplicates following this:
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/pairwise.py#L1321
if metric_str == 'haversine':
raise ValueError(" The metric: '{}', is not supported at this time."
.format(metric_str))
if not is_sparse_ and (metric_str not in PAIRWISE_DISTANCE_METRICS):
if metric_str in PAIRWISE_DISTANCE_SPARSE_METRICS:
raise ValueError(" The metric: '{}', is only available on "
"sparse data.".format(metric_str))
else:
raise ValueError("Unknown metric: {}".format(metric_str))
elif is_sparse_ and (metric_str not in PAIRWISE_DISTANCE_SPARSE_METRICS):
raise ValueError("Unknown metric: {}".format(metric_str))
if is_sparse_:
return PAIRWISE_DISTANCE_SPARSE_METRICS[metric_str]
else:
return PAIRWISE_DISTANCE_METRICS[metric_str]
def nan_euclidean_distances(
X, Y=None, *, squared=False, missing_values=cp.nan
):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where,
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``
is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Parameters
----------
X : Dense matrix of shape (n_samples_X, n_features)
Acceptable formats: cuDF DataFrame, Pandas DataFrame, NumPy ndarray,
cuda array interface compliant array like CuPy.
Y : Dense matrix of shape (n_samples_Y, n_features), default=None
Acceptable formats: cuDF DataFrame, Pandas DataFrame, NumPy ndarray,
cuda array interface compliant array like CuPy.
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan or int, default=np.nan
Representation of missing value.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
"""
if isinstance(X, cudf.DataFrame) or isinstance(X, pd.DataFrame):
if (X.isnull().any()).any():
X.fillna(0, inplace=True)
if isinstance(Y, cudf.DataFrame) or isinstance(Y, pd.DataFrame):
if (Y.isnull().any()).any():
Y.fillna(0, inplace=True)
X_m, _n_samples_x, _n_features_x, dtype_x = \
input_to_cuml_array(X, order="K", check_dtype=[np.float32, np.float64])
if Y is None:
Y = X_m
Y_m, _n_samples_y, _n_features_y, _dtype_y = \
input_to_cuml_array(
Y, order=X_m.order, convert_to_dtype=dtype_x,
check_dtype=[dtype_x])
X_m = cp.asarray(X_m)
Y_m = cp.asarray(Y_m)
# Get missing mask for X
missing_X = _get_mask(X_m, missing_values)
# Get missing mask for Y
missing_Y = missing_X if Y is X else _get_mask(Y_m, missing_values)
# set missing values to zero
X_m[missing_X] = 0
Y_m[missing_Y] = 0
# Adjust distances for squared
if X_m.shape == Y_m.shape:
if (X_m == Y_m).all():
distances = cp.asarray(pairwise_distances(
X_m, metric="sqeuclidean"))
else:
distances = cp.asarray(pairwise_distances(
X_m, Y_m, metric="sqeuclidean"))
else:
distances = cp.asarray(pairwise_distances(
X_m, Y_m, metric="sqeuclidean"))
# Adjust distances for missing values
XX = X_m * X_m
YY = Y_m * Y_m
distances -= cp.dot(XX, missing_Y.T)
distances -= cp.dot(missing_X, YY.T)
cp.clip(distances, 0, None, out=distances)
if X_m is Y_m:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
cp.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y_m is X_m else ~missing_Y
present_count = cp.dot(present_X, present_Y.T)
distances[present_count == 0] = cp.nan
# avoid divide by zero
cp.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X_m.shape[1]
if not squared:
cp.sqrt(distances, out=distances)
return distances
@cuml.internals.api_return_array(get_output_type=True)
def pairwise_distances(X, Y=None, metric="euclidean", handle=None,
convert_dtype=True, metric_arg=2, **kwds):
"""
Compute the distance matrix from a vector array `X` and optional `Y`.
This method takes either one or two vector arrays, and returns a distance
matrix.
If `Y` is given (default is `None`), then the returned matrix is the
pairwise distance between the arrays from both `X` and `Y`.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', \
'manhattan'].
Sparse matrices are supported, see 'sparse_pairwise_distances'.
- From scipy.spatial.distance: ['sqeuclidean']
See the documentation for scipy.spatial.distance for details on this
metric. Sparse matrices are supported.
Parameters
----------
X : Dense or sparse matrix (device or host) of shape
(n_samples_x, n_features)
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy, or
cupyx.scipy.sparse for sparse input
Y : array-like (device or host) of shape (n_samples_y, n_features),\
optional
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
metric : {"cityblock", "cosine", "euclidean", "l1", "l2", "manhattan", \
"sqeuclidean"}
The metric to use when calculating distance between instances in a
feature array.
convert_dtype : bool, optional (default = True)
When set to True, the method will, when necessary, convert
Y to be the same data type as X if they differ. This
will increase memory used for the method.
Returns
-------
D : array [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix `X`, if `Y` is None.
If `Y` is not `None`, then D_{i, j} is the distance between the ith
array from `X` and the jth array from `Y`.
Examples
--------
>>> import cupy as cp
>>> from cuml.metrics import pairwise_distances
>>> X = cp.array([[2.0, 3.0], [3.0, 5.0], [5.0, 8.0]])
>>> Y = cp.array([[1.0, 0.0], [2.0, 1.0]])
>>> # Euclidean Pairwise Distance, Single Input:
>>> pairwise_distances(X, metric='euclidean')
array([[0. , 2.236..., 5.830...],
[2.236..., 0. , 3.605...],
[5.830..., 3.605..., 0. ]])
>>> # Cosine Pairwise Distance, Multi-Input:
>>> pairwise_distances(X, Y, metric='cosine')
array([[0.445... , 0.131...],
[0.485..., 0.156...],
[0.470..., 0.146...]])
>>> # Manhattan Pairwise Distance, Multi-Input:
>>> pairwise_distances(X, Y, metric='manhattan')
array([[ 4., 2.],
[ 7., 5.],
[12., 10.]])
"""
if is_sparse(X):
return sparse_pairwise_distances(X, Y, metric, handle,
convert_dtype, **kwds)
handle = Handle() if handle is None else handle
cdef handle_t *handle_ = <handle_t*> <size_t> handle.getHandle()
if metric in ['nan_euclidean']:
return nan_euclidean_distances(X, Y, **kwds)
if metric in ['russellrao'] and not np.all(X.data == 1.):
warnings.warn("X was converted to boolean for metric {}"
.format(metric))
X = np.where(X != 0., 1.0, 0.0)
# Get the input arrays, preserve order and type where possible
X_m, n_samples_x, n_features_x, dtype_x = \
input_to_cuml_array(X, order="K", check_dtype=[np.float32, np.float64])
# Get the order from the CumlArray
input_order = X_m.order
cdef uintptr_t d_X_ptr
cdef uintptr_t d_Y_ptr
cdef uintptr_t d_dest_ptr
if (Y is not None):
# Check for the odd case where one dimension of X is 1. In this case,
# CumlArray always returns order=="C" so instead get the order from Y
if (n_samples_x == 1 or n_features_x == 1):
input_order = "K"
if metric in ['russellrao'] and not np.all(Y.data == 1.):
warnings.warn("Y was converted to boolean for metric {}"
.format(metric))
Y = np.where(Y != 0., 1.0, 0.0)
Y_m, n_samples_y, n_features_y, dtype_y = \
input_to_cuml_array(Y, order=input_order,
convert_to_dtype=(dtype_x if convert_dtype
else None),
check_dtype=[dtype_x])
# Get the order from Y if necessary (It's possible to set order="F" in
# input_to_cuml_array and have Y_m.order=="C")
if (input_order == "K"):
input_order = Y_m.order
else:
# Shallow copy X variables
Y_m = X_m
n_samples_y = n_samples_x
n_features_y = n_features_x
dtype_y = dtype_x
is_row_major = input_order == "C"
# Check feature sizes are equal
if (n_features_x != n_features_y):
raise ValueError("Incompatible dimension for X and Y matrices: \
X.shape[1] == {} while Y.shape[1] == {}"
.format(n_features_x, n_features_y))
# Get the metric string to int
metric_val = _determine_metric(metric)
# Create the output array
dest_m = CumlArray.zeros((n_samples_x, n_samples_y), dtype=dtype_x,
order=input_order)
d_X_ptr = X_m.ptr
d_Y_ptr = Y_m.ptr
d_dest_ptr = dest_m.ptr
# Now execute the functions
if (dtype_x == np.float32):
pairwise_distance(handle_[0],
<float*> d_X_ptr,
<float*> d_Y_ptr,
<float*> d_dest_ptr,
<int> n_samples_x,
<int> n_samples_y,
<int> n_features_x,
<DistanceType> metric_val,
<bool> is_row_major,
<float> metric_arg)
elif (dtype_x == np.float64):
pairwise_distance(handle_[0],
<double*> d_X_ptr,
<double*> d_Y_ptr,
<double*> d_dest_ptr,
<int> n_samples_x,
<int> n_samples_y,
<int> n_features_x,
<DistanceType> metric_val,
<bool> is_row_major,
<double> metric_arg)
else:
raise NotImplementedError("Unsupported dtype: {}".format(dtype_x))
# Sync on the stream before exiting. pairwise_distance does not sync.
handle.sync()
del X_m
del Y_m
return dest_m
@cuml.internals.api_return_array(get_output_type=True)
def sparse_pairwise_distances(X, Y=None, metric="euclidean", handle=None,
convert_dtype=True, metric_arg=2, **kwds):
"""
Compute the distance matrix from a vector array `X` and optional `Y`.
This method takes either one or two sparse vector arrays, and returns a
dense distance matrix.
If `Y` is given (default is `None`), then the returned matrix is the
pairwise distance between the arrays from both `X` and `Y`.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', \
'manhattan'].
- From scipy.spatial.distance: ['sqeuclidean', 'canberra', 'minkowski', \
'jaccard', 'chebyshev', 'dice']
See the documentation for scipy.spatial.distance for details on these
metrics.
- ['inner_product', 'hellinger']
Parameters
----------
X : array-like (device or host) of shape (n_samples_x, n_features)
Acceptable formats: SciPy or Cupy sparse array
Y : array-like (device or host) of shape (n_samples_y, n_features),\
optional
Acceptable formats: SciPy or Cupy sparse array
metric : {"cityblock", "cosine", "euclidean", "l1", "l2", "manhattan", \
"sqeuclidean", "canberra", "lp", "inner_product", "minkowski", \
"jaccard", "hellinger", "chebyshev", "linf", "dice"}
The metric to use when calculating distance between instances in a
feature array.
convert_dtype : bool, optional (default = True)
When set to True, the method will, when necessary, convert
Y to be the same data type as X if they differ. This
will increase memory used for the method.
metric_arg : float, optional (default = 2)
Additional metric-specific argument.
For Minkowski it's the p-norm to apply.
Returns
-------
D : array [n_samples_x, n_samples_x] or [n_samples_x, n_samples_y]
A dense distance matrix D such that D_{i, j} is the distance between
the ith and jth vectors of the given matrix `X`, if `Y` is None.
If `Y` is not `None`, then D_{i, j} is the distance between the ith
array from `X` and the jth array from `Y`.
Examples
--------
.. code-block:: python
>>> import cupyx
>>> from cuml.metrics import sparse_pairwise_distances
>>> X = cupyx.scipy.sparse.random(2, 3, density=0.5, random_state=9)
>>> Y = cupyx.scipy.sparse.random(1, 3, density=0.5, random_state=9)
>>> X.todense()
array([[0.8098..., 0.537..., 0. ],
[0. , 0.856..., 0. ]])
>>> Y.todense()
array([[0. , 0. , 0.993...]])
>>> # Cosine Pairwise Distance, Single Input:
>>> sparse_pairwise_distances(X, metric='cosine')
array([[0. , 0.447...],
[0.447..., 0. ]])
>>> # Squared euclidean Pairwise Distance, Multi-Input:
>>> sparse_pairwise_distances(X, Y, metric='sqeuclidean')
array([[1.931...],
[1.720...]])
>>> # Canberra Pairwise Distance, Multi-Input:
>>> sparse_pairwise_distances(X, Y, metric='canberra')
array([[3.],
[2.]])
"""
handle = Handle() if handle is None else handle
cdef handle_t *handle_ = <handle_t*> <size_t> handle.getHandle()
if (not is_sparse(X)) or (Y is not None and not is_sparse(Y)):
raise ValueError("Input matrices are not sparse.")
dtype_x = X.data.dtype
if dtype_x not in [cp.float32, cp.float64]:
raise TypeError("Unsupported dtype: {}".format(dtype_x))
if scipy.sparse.issparse(X):
X = sparse_scipy_to_cp(X, dtype=None)
if metric in ['jaccard', 'dice'] and not cp.all(X.data == 1.):
warnings.warn("X was converted to boolean for metric {}"
.format(metric))
X.data = (X.data != 0.).astype(dtype_x)
X_m = SparseCumlArray(X)
n_samples_x, n_features_x = X_m.shape
if Y is None:
Y_m = X_m
dtype_y = dtype_x
else:
if scipy.sparse.issparse(Y):
Y = sparse_scipy_to_cp(Y, dtype=dtype_x if convert_dtype else None)
if convert_dtype:
Y = Y.astype(dtype_x)
elif dtype_x != Y.data.dtype:
raise TypeError("Different data types unsupported when "
"convert_dtypes=False")
if metric in ['jaccard', 'dice'] and not cp.all(Y.data == 1.):
dtype_y = Y.data.dtype
warnings.warn("Y was converted to boolean for metric {}"
.format(metric))
Y.data = (Y.data != 0.).astype(dtype_y)
Y_m = SparseCumlArray(Y)
n_samples_y, n_features_y = Y_m.shape
# Check feature sizes are equal
if n_features_x != n_features_y:
raise ValueError("Incompatible dimension for X and Y matrices: \
X.shape[1] == {} while Y.shape[1] == {}"
.format(n_features_x, n_features_y))
# Get the metric string to a distance enum
metric_val = _determine_metric(metric, is_sparse_=True)
x_nrows, y_nrows = X_m.indptr.shape[0] - 1, Y_m.indptr.shape[0] - 1
dest_m = CumlArray.zeros((x_nrows, y_nrows), dtype=dtype_x)
cdef uintptr_t d_dest_ptr = dest_m.ptr
cdef uintptr_t d_X_ptr = X_m.data.ptr
cdef uintptr_t X_m_indptr = X_m.indptr.ptr
cdef uintptr_t X_m_indices = X_m.indices.ptr
cdef uintptr_t d_Y_ptr = Y_m.data.ptr
cdef uintptr_t Y_m_indptr = Y_m.indptr.ptr
cdef uintptr_t Y_m_indices = Y_m.indices.ptr
if (dtype_x == np.float32):
pairwiseDistance_sparse(handle_[0],
<float*> d_X_ptr,
<float*> d_Y_ptr,
<float*> d_dest_ptr,
<int> x_nrows,
<int> y_nrows,
<int> n_features_x,
<int> X_m.nnz,
<int> Y_m.nnz,
<int*> X_m_indptr,
<int*> Y_m_indptr,
<int*> X_m_indices,
<int*> Y_m_indices,
<DistanceType> metric_val,
<float> metric_arg)
elif (dtype_x == np.float64):
pairwiseDistance_sparse(handle_[0],
<double*> d_X_ptr,
<double*> d_Y_ptr,
<double*> d_dest_ptr,
<int> n_samples_x,
<int> n_samples_y,
<int> n_features_x,
<int> X_m.nnz,
<int> Y_m.nnz,
<int*> X_m_indptr,
<int*> Y_m_indptr,
<int*> X_m_indices,
<int*> Y_m_indices,
<DistanceType> metric_val,
<float> metric_arg)
# Sync on the stream before exiting.
handle.sync()
del X_m
del Y_m
return dest_m
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/pairwise_kernels.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.input_utils import input_to_cupy_array
from cuml.metrics import pairwise_distances
import cuml.internals
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
import inspect
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from("numba", "cuda")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
def linear_kernel(X, Y):
return cp.dot(X, Y.T)
def polynomial_kernel(X, Y, degree=3, gamma=None, coef0=1):
if gamma is None:
gamma = 1.0 / X.shape[1]
K = cp.dot(X, Y.T)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y, gamma=None, coef0=1):
if gamma is None:
gamma = 1.0 / X.shape[1]
K = cp.dot(X, Y.T)
K *= gamma
K += coef0
cp.tanh(K, K)
return K
def rbf_kernel(X, Y, gamma=None):
if gamma is None:
gamma = 1.0 / X.shape[1]
K = cp.asarray(pairwise_distances(X, Y, metric="sqeuclidean"))
K *= -gamma
cp.exp(K, K)
return K
def laplacian_kernel(X, Y, gamma=None):
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * cp.asarray(pairwise_distances(X, Y, metric="manhattan"))
cp.exp(K, K)
return K
def cosine_similarity(X, Y):
K = 1.0 - cp.asarray(pairwise_distances(X, Y, metric="cosine"))
return cp.nan_to_num(K, copy=False)
@cuda.jit(device=True)
def additive_chi2_kernel_element(x, y):
res = 0.0
for i in range(len(x)):
denom = x[i] - y[i]
nom = x[i] + y[i]
if nom != 0.0:
res += denom * denom / nom
return -res
def additive_chi2_kernel(X, Y):
return custom_kernel(X, Y, additive_chi2_kernel_element)
def chi2_kernel(X, Y, gamma=1.0):
K = additive_chi2_kernel(X, Y)
K *= gamma
return cp.exp(K, K)
PAIRWISE_KERNEL_FUNCTIONS = {
"linear": linear_kernel,
"additive_chi2": additive_chi2_kernel,
"chi2": chi2_kernel,
"cosine": cosine_similarity,
"laplacian": laplacian_kernel,
"polynomial": polynomial_kernel,
"poly": polynomial_kernel,
"rbf": rbf_kernel,
"sigmoid": sigmoid_kernel,
}
def _filter_params(func, filter_params, **kwds):
# get all the possible extra function arguments, excluding x, y
py_func = func.py_func if hasattr(func, "py_func") else func
all_func_kwargs = list(inspect.signature(py_func).parameters.values())
if len(all_func_kwargs) < 2:
raise ValueError("Expected at least two arguments to kernel function.")
extra_arg_names = set(arg.name for arg in all_func_kwargs[2:])
if not filter_params:
if not set(kwds.keys()) <= extra_arg_names:
raise ValueError(
"kwds contains arguments not used by kernel function"
)
return {k: v for k, v in kwds.items() if k in extra_arg_names}
def _kwds_to_tuple_args(func, **kwds):
# Returns keyword arguments formed as a tuple
# (numba kernels cannot deal with kwargs as a dict)
if not hasattr(func, "py_func"):
raise TypeError("Kernel function should be a numba device function.")
# get all the possible extra function arguments, excluding x, y
all_func_kwargs = list(inspect.signature(func.py_func).parameters.values())
if len(all_func_kwargs) < 2:
raise ValueError("Expected at least two arguments to kernel function.")
all_func_kwargs = all_func_kwargs[2:]
if any(p.default is inspect.Parameter.empty for p in all_func_kwargs):
raise ValueError(
"Extra kernel parameters must be passed as keyword arguments."
)
all_func_kwargs = [(k.name, k.default) for k in all_func_kwargs]
kwds_tuple = tuple(
kwds[k] if k in kwds.keys() else v for (k, v) in all_func_kwargs
)
return kwds_tuple
_kernel_cache = {}
def custom_kernel(X, Y, func, **kwds):
kwds_tuple = _kwds_to_tuple_args(func, **kwds)
def evaluate_pairwise_kernels(X, Y, K):
idx = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
X_m = X.shape[0]
Y_m = Y.shape[0]
row = idx // Y_m
col = idx % Y_m
if idx < X_m * Y_m:
if X is Y and row <= col:
# matrix is symmetric, reuse half the evaluations
k = func(X[row], Y[col], *kwds_tuple)
K[row, col] = k
K[col, row] = k
else:
k = func(X[row], Y[col], *kwds_tuple)
K[row, col] = k
if Y is None:
Y = X
if X.shape[1] != Y.shape[1]:
raise ValueError("X and Y have different dimensions.")
# Here we force K to use 64 bit, even if the input is 32 bit
# 32 bit K results in serious numerical stability problems
K = cp.zeros((X.shape[0], Y.shape[0]), dtype=np.float64)
key = (func, kwds_tuple, X.dtype, Y.dtype)
if key in _kernel_cache:
compiled_kernel = _kernel_cache[key]
else:
compiled_kernel = cuda.jit(evaluate_pairwise_kernels)
_kernel_cache[key] = compiled_kernel
compiled_kernel.forall(X.shape[0] * Y.shape[0])(X, Y, K)
return K
@cuml.internals.api_return_array(get_output_type=True)
def pairwise_kernels(
X,
Y=None,
metric="linear",
*,
filter_params=False,
convert_dtype=True,
**kwds,
):
"""
Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are: ['additive_chi2', 'chi2', 'linear', 'poly',
'polynomial', 'rbf', 'laplacian', 'sigmoid', 'cosine']
Parameters
----------
X : Dense matrix (device or host) of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
Y : Dense matrix (device or host) of shape (n_samples_Y, n_features), \
default=None
A second feature array only if X has shape (n_samples_X, n_features).
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
metric : str or callable (numba device function), default="linear"
The metric to use when calculating kernel between instances in a
feature array.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
convert_dtype : bool, optional (default = True)
When set to True, the method will, when necessary, convert
Y to be the same data type as X if they differ. This
will increase memory used for the method.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from cuml.metrics import pairwise_kernels
>>> from numba import cuda
>>> import math
>>> X = cp.array([[2, 3], [3, 5], [5, 8]])
>>> Y = cp.array([[1, 0], [2, 1]])
>>> pairwise_kernels(X, Y, metric='linear')
array([[ 2, 7],
[ 3, 11],
[ 5, 18]])
>>> @cuda.jit(device=True)
... def custom_rbf_kernel(x, y, gamma=None):
... if gamma is None:
... gamma = 1.0 / len(x)
... sum = 0.0
... for i in range(len(x)):
... sum += (x[i] - y[i]) ** 2
... return math.exp(-gamma * sum)
>>> pairwise_kernels(X, Y, metric=custom_rbf_kernel) # doctest: +SKIP
array([[6.73794700e-03, 1.35335283e-01],
[5.04347663e-07, 2.03468369e-04],
[4.24835426e-18, 2.54366565e-13]])
"""
X = input_to_cupy_array(X).array
if Y is None:
Y = X
else:
Y = input_to_cupy_array(Y).array
if X.shape[1] != Y.shape[1]:
raise ValueError("X and Y have different dimensions.")
if metric == "precomputed":
return X
if metric in PAIRWISE_KERNEL_FUNCTIONS:
kwds = _filter_params(
PAIRWISE_KERNEL_FUNCTIONS[metric], filter_params, **kwds
)
return PAIRWISE_KERNEL_FUNCTIONS[metric](X, Y, **kwds)
elif isinstance(metric, str):
raise ValueError("Unknown kernel %r" % metric)
else:
kwds = _filter_params(metric, filter_params, **kwds)
return custom_kernel(X, Y, metric, **kwds)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/_ranking.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
from cuml.internals.input_utils import input_to_cupy_array
from cuml.internals.array import CumlArray
import cuml.internals
from cuml.internals.safe_imports import cpu_only_import
import typing
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
@cuml.internals.api_return_generic(get_output_type=True)
def precision_recall_curve(
y_true, probs_pred
) -> typing.Tuple[CumlArray, CumlArray, CumlArray]:
"""
Compute precision-recall pairs for different probability thresholds
.. note:: this implementation is restricted to the binary classification
task. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the
number of true positives and ``fp`` the number of false positives. The
precision is intuitively the ability of the classifier not to label as
positive a sample that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number
of true positives and ``fn`` the number of false negatives. The recall
is intuitively the ability of the classifier to find all the positive
samples. The last precision and recall values are 1. and 0.
respectively and do not have a corresponding threshold. This ensures
that the graph starts on the y axis.
Read more in the scikit-learn's `User Guide
<https://scikit-learn.org/stable/modules/model_evaluation.html#precision-recall-f-measure-metrics>`_.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels, {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from cuml.metrics import precision_recall_curve
>>> y_true = cp.array([0, 0, 1, 1])
>>> y_scores = cp.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> print(precision)
[0.666... 0.5 1. 1. ]
>>> print(recall)
[1. 0.5 0.5 0. ]
>>> print(thresholds)
[0.35 0.4 0.8 ]
"""
y_true, n_rows, n_cols, ytype = input_to_cupy_array(
y_true, check_dtype=[np.int32, np.int64, np.float32, np.float64]
)
y_score, _, _, _ = input_to_cupy_array(
probs_pred,
check_dtype=[np.int32, np.int64, np.float32, np.float64],
check_rows=n_rows,
check_cols=n_cols,
)
if cp.any(y_true) == 0:
raise ValueError(
"precision_recall_curve cannot be used when " "y_true is all zero."
)
fps, tps, thresholds = _binary_clf_curve(y_true, y_score)
precision = cp.flip(tps / (tps + fps), axis=0)
recall = cp.flip(tps / tps[-1], axis=0)
n = (recall == 1).sum()
if n > 1:
precision = precision[n - 1 :]
recall = recall[n - 1 :]
thresholds = thresholds[n - 1 :]
precision = cp.concatenate([precision, cp.ones(1)])
recall = cp.concatenate([recall, cp.zeros(1)])
return precision, recall, thresholds
@cuml.internals.api_return_any()
def roc_auc_score(y_true, y_score):
"""
Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC)
from prediction scores.
.. note:: this implementation can only be used with binary classification.
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels. The binary cases
expect labels with shape (n_samples,)
y_score : array-like of shape (n_samples,)
Target scores. In the binary cases, these can be either
probability estimates or non-thresholded decision values (as returned
by `decision_function` on some classifiers). The binary
case expects a shape (n_samples,), and the scores must be the scores of
the class with the greater label.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from cuml.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> print(roc_auc_score(y_true, y_scores))
0.75
"""
y_true, n_rows, n_cols, ytype = input_to_cupy_array(
y_true, check_dtype=[np.int32, np.int64, np.float32, np.float64]
)
y_score, _, _, _ = input_to_cupy_array(
y_score,
check_dtype=[np.int32, np.int64, np.float32, np.float64],
check_rows=n_rows,
check_cols=n_cols,
)
return _binary_roc_auc_score(y_true, y_score)
def _binary_clf_curve(y_true, y_score):
if y_true.dtype.kind == "f" and np.any(y_true != y_true.astype(int)):
raise ValueError("Continuous format of y_true " "is not supported.")
ids = cp.argsort(-y_score)
sorted_score = y_score[ids]
ones = y_true[ids].astype("float32") # for calculating true positives
zeros = 1 - ones # for calculating predicted positives
# calculate groups
group = _group_same_scores(sorted_score)
num = int(group[-1])
tps = cp.zeros(num, dtype="float32")
fps = cp.zeros(num, dtype="float32")
tps = _addup_x_in_group(group, ones, tps)
fps = _addup_x_in_group(group, zeros, fps)
tps = cp.cumsum(tps)
fps = cp.cumsum(fps)
thresholds = cp.unique(y_score)
return fps, tps, thresholds
def _binary_roc_auc_score(y_true, y_score):
"""Compute binary roc_auc_score using cupy"""
if cp.unique(y_true).shape[0] == 1:
raise ValueError(
"roc_auc_score cannot be used when "
"only one class present in y_true. ROC AUC score "
"is not defined in that case."
)
if cp.unique(y_score).shape[0] == 1:
return 0.5
fps, tps, thresholds = _binary_clf_curve(y_true, y_score)
tpr = tps / tps[-1]
fpr = fps / fps[-1]
return _calculate_area_under_curve(fpr, tpr).item()
def _addup_x_in_group(group, x, result):
addup_x_in_group_kernel = cp.RawKernel(
r"""
extern "C" __global__
void addup_x_in_group(const int* group, const float* x,
float* result, int N)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid<N){
atomicAdd(result + group[tid] - 1, x[tid]);
}
}
""",
"addup_x_in_group",
)
N = x.shape[0]
tpb = 256
bpg = math.ceil(N / tpb)
addup_x_in_group_kernel((bpg,), (tpb,), (group, x, result, N))
return result
def _group_same_scores(sorted_score):
mask = cp.empty(sorted_score.shape, dtype=cp.bool_)
mask[0] = True
mask[1:] = sorted_score[1:] != sorted_score[:-1]
group = cp.cumsum(mask, dtype=cp.int32)
return group
def _calculate_area_under_curve(fpr, tpr):
"""helper function to calculate area under curve given fpr & tpr arrays"""
return (
cp.sum((fpr[1:] - fpr[:-1]) * (tpr[1:] + tpr[:-1])) / 2
+ tpr[0] * fpr[0] / 2
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/__init__.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.metrics.trustworthiness import trustworthiness
from cuml.metrics.regression import r2_score
from cuml.metrics.regression import mean_squared_error
from cuml.metrics.regression import mean_squared_log_error
from cuml.metrics.regression import mean_absolute_error
from cuml.metrics.accuracy import accuracy_score
from cuml.metrics.cluster.adjusted_rand_index import adjusted_rand_score
from cuml.metrics._ranking import roc_auc_score
from cuml.metrics._ranking import precision_recall_curve
from cuml.metrics._classification import log_loss
from cuml.metrics.cluster.homogeneity_score import (
cython_homogeneity_score as homogeneity_score,
)
from cuml.metrics.cluster.completeness_score import (
cython_completeness_score as completeness_score,
)
from cuml.metrics.cluster.mutual_info_score import (
cython_mutual_info_score as mutual_info_score,
)
from cuml.metrics.confusion_matrix import confusion_matrix
from cuml.metrics.cluster.entropy import cython_entropy as entropy
from cuml.metrics.pairwise_distances import pairwise_distances
from cuml.metrics.pairwise_distances import sparse_pairwise_distances
from cuml.metrics.pairwise_distances import nan_euclidean_distances
from cuml.metrics.pairwise_distances import PAIRWISE_DISTANCE_METRICS
from cuml.metrics.pairwise_distances import PAIRWISE_DISTANCE_SPARSE_METRICS
from cuml.metrics.pairwise_kernels import pairwise_kernels
from cuml.metrics.pairwise_kernels import PAIRWISE_KERNEL_FUNCTIONS
from cuml.metrics.hinge_loss import hinge_loss
from cuml.metrics.kl_divergence import kl_divergence
from cuml.metrics.cluster.v_measure import cython_v_measure as v_measure_score
__all__ = [
"trustworthiness",
"r2_score",
"mean_squared_error",
"mean_squared_log_error",
"mean_absolute_error",
"accuracy_score",
"adjusted_rand_score",
"roc_auc_score",
"precision_recall_curve",
"log_loss",
"homogeneity_score",
"completeness_score",
"mutual_info_score",
"confusion_matrix",
"entropy",
"nan_euclidean_distances",
"pairwise_distances",
"sparse_pairwise_distances",
"pairwise_kernels",
"hinge_loss",
"kl_divergence",
"v_measure_score",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/metrics/utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
def sorted_unique_labels(*ys):
"""Extract an ordered array of unique labels from one or more arrays of
labels."""
ys = (cp.unique(y) for y in ys)
labels = cp.unique(cp.concatenate(ys))
return labels
| 0 |
rapidsai_public_repos/cuml/python/cuml/metrics | rapidsai_public_repos/cuml/python/cuml/metrics/cluster/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("adjusted_rand_index.pyx" ${adjusted_rand_index_algo} ${metrics_algo})
add_module_gpu_default("completeness_score.pyx" ${completeness_score_algo} ${metrics_algo})
add_module_gpu_default("entropy.pyx" ${entropy_algo} ${metrics_algo})
add_module_gpu_default("homogeneity_score.pyx" ${homogeneity_score_algo} ${metrics_algo})
add_module_gpu_default("mutual_info_score.pyx" ${mutual_info_score_algo} ${metrics_algo})
add_module_gpu_default("silhouette_score.pyx" ${silhouette_score_algo} ${metrics_algo})
add_module_gpu_default("utils.pyx" ${utils_algo} ${metrics_algo})
add_module_gpu_default("v_measure.pyx" ${v_measure_algo} ${metrics_algo})
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/metrics | rapidsai_public_repos/cuml/python/cuml/metrics/cluster/v_measure.pyx | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import cuml.internals
from pylibraft.common.handle cimport handle_t
from libc.stdint cimport uintptr_t
from cuml.metrics.cluster.utils import prepare_cluster_metric_inputs
from pylibraft.common.handle import Handle
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
double v_measure(const handle_t & handle,
const int * y,
const int * y_hat,
const int n,
const int lower_class_range,
const int upper_class_range,
const double beta) except +
@cuml.internals.api_return_any()
def cython_v_measure(labels_true, labels_pred, beta=1.0, handle=None) -> float:
"""
V-measure metric of a cluster labeling given a ground truth.
The V-measure is the harmonic mean between homogeneity and completeness::
v = (1 + beta) * homogeneity * completeness
/ (beta * homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_pred : array-like (device or host) shape = (n_samples,)
The labels predicted by the model for the test dataset.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
labels_true : array-like (device or host) shape = (n_samples,)
The ground truth labels (ints) of the test dataset.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
beta : float, default=1.0
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
Returns
-------
v_measure_value : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
"""
handle = Handle() if handle is None else handle
cdef handle_t *handle_ = <handle_t*> <size_t> handle.getHandle()
(y_true, y_pred, n_rows,
lower_class_range, upper_class_range) = prepare_cluster_metric_inputs(
labels_true,
labels_pred
)
cdef uintptr_t ground_truth_ptr = y_true.ptr
cdef uintptr_t preds_ptr = y_pred.ptr
v_measure_value = v_measure(handle_[0],
<int*> ground_truth_ptr,
<int*> preds_ptr,
<int> n_rows,
<int> lower_class_range,
<int> upper_class_range,
beta)
return v_measure_value
| 0 |
rapidsai_public_repos/cuml/python/cuml/metrics | rapidsai_public_repos/cuml/python/cuml/metrics/cluster/utils.pyx | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import gpu_only_import
import cuml.internals
from cuml.metrics.utils import sorted_unique_labels
from cuml.prims.label import make_monotonic
from cuml.common import input_to_cuml_array
cp = gpu_only_import('cupy')
@cuml.internals.api_return_generic(get_output_type=True)
def prepare_cluster_metric_inputs(labels_true, labels_pred):
"""Helper function to avoid code duplication for homogeneity score, mutual
info score and completeness score.
"""
y_true, n_rows, _, dtype = input_to_cuml_array(
labels_true,
check_dtype=[cp.int32, cp.int64],
check_cols=1,
deepcopy=True # deepcopy because we call make_monotonic inplace below
)
y_pred, _, _, _ = input_to_cuml_array(
labels_pred,
check_dtype=dtype,
check_rows=n_rows,
check_cols=1,
deepcopy=True # deepcopy because we call make_monotonic inplace below
)
classes = sorted_unique_labels(y_true, y_pred)
make_monotonic(y_true, classes=classes, copy=False)
make_monotonic(y_pred, classes=classes, copy=False)
# Those values are only correct because we used make_monotonic
lower_class_range = 0
upper_class_range = len(classes) - 1
return y_true, y_pred, n_rows, lower_class_range, upper_class_range
| 0 |
rapidsai_public_repos/cuml/python/cuml/metrics | rapidsai_public_repos/cuml/python/cuml/metrics/cluster/entropy.pyx | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import math
import typing
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from libc.stdint cimport uintptr_t
import cuml.internals
from pylibraft.common.handle cimport handle_t
from cuml.common import CumlArray
from cuml.internals.input_utils import input_to_cupy_array
from pylibraft.common.handle import Handle
cimport cuml.common.cuda
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
double entropy(const handle_t &handle,
const int *y,
const int n,
const int lower_class_range,
const int upper_class_range) except +
@cuml.internals.api_return_generic()
def _prepare_cluster_input(cluster) -> typing.Tuple[CumlArray, int, int, int]:
"""Helper function to avoid code duplication for clustering metrics."""
cluster_m, n_rows, _, _ = input_to_cupy_array(
cluster,
check_dtype=np.int32,
check_cols=1
)
lower_class_range = cp.min(cluster_m).item()
upper_class_range = cp.max(cluster_m).item()
return cluster_m, n_rows, lower_class_range, upper_class_range
@cuml.internals.api_return_any()
def cython_entropy(clustering, base=None, handle=None) -> float:
"""
Computes the entropy of a distribution for given probability values.
Parameters
----------
clustering : array-like (device or host) shape = (n_samples,)
Clustering of labels. Probabilities are computed based on occurrences
of labels. For instance, to represent a fair coin (2 equally possible
outcomes), the clustering could be [0,1]. For a biased coin with 2/3
probability for tail, the clustering could be [0, 0, 1].
base: float, optional
The logarithmic base to use, defaults to e (natural logarithm).
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
Returns
-------
S : float
The calculated entropy.
"""
handle = Handle() if handle is None else handle
cdef handle_t *handle_ = <handle_t*> <size_t> handle.getHandle()
(clustering, n_rows,
lower_class_range, upper_class_range) = _prepare_cluster_input(clustering)
cdef uintptr_t clustering_ptr = clustering.ptr
S = entropy(handle_[0],
<int*> clustering_ptr,
<int> n_rows,
<int> lower_class_range,
<int> upper_class_range)
if base is not None:
# S needs to be converted from base e
S = math.log(math.exp(S), base)
return S
| 0 |
rapidsai_public_repos/cuml/python/cuml/metrics | rapidsai_public_repos/cuml/python/cuml/metrics/cluster/homogeneity_score.pyx | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import cuml.internals
from pylibraft.common.handle cimport handle_t
from libc.stdint cimport uintptr_t
from cuml.metrics.cluster.utils import prepare_cluster_metric_inputs
from pylibraft.common.handle import Handle
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
double homogeneity_score(const handle_t & handle, const int *y,
const int *y_hat, const int n,
const int lower_class_range,
const int upper_class_range) except +
@cuml.internals.api_return_any()
def cython_homogeneity_score(labels_true, labels_pred, handle=None) -> float:
"""
Computes the homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters contain
only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won’t change the score
value in any way.
This metric is not symmetric: switching label_true with label_pred will
return the completeness_score which will be different in general.
The labels in labels_pred and labels_true are assumed to be drawn from a
contiguous set (Ex: drawn from {2, 3, 4}, but not from {2, 4}). If your
set of labels looks like {2, 4}, convert them to something like {0, 1}.
Parameters
----------
labels_pred : array-like (device or host) shape = (n_samples,)
The labels predicted by the model for the test dataset.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
labels_true : array-like (device or host) shape = (n_samples,)
The ground truth labels (ints) of the test dataset.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
Returns
-------
float
The homogeneity of the predicted labeling given the ground truth.
Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.
"""
handle = Handle() if handle is None else handle
cdef handle_t *handle_ = <handle_t*> <size_t> handle.getHandle()
(y_true, y_pred,
n_rows,
lower_class_range, upper_class_range) = prepare_cluster_metric_inputs(
labels_true,
labels_pred
)
cdef uintptr_t ground_truth_ptr = y_true.ptr
cdef uintptr_t preds_ptr = y_pred.ptr
hom = homogeneity_score(handle_[0],
<int*> ground_truth_ptr,
<int*> preds_ptr,
<int> n_rows,
<int> lower_class_range,
<int> upper_class_range)
return hom
| 0 |
rapidsai_public_repos/cuml/python/cuml/metrics | rapidsai_public_repos/cuml/python/cuml/metrics/cluster/mutual_info_score.pyx | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import cuml.internals
from pylibraft.common.handle cimport handle_t
from libc.stdint cimport uintptr_t
from cuml.metrics.cluster.utils import prepare_cluster_metric_inputs
from pylibraft.common.handle import Handle
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
double mutual_info_score(const handle_t &handle,
const int *y,
const int *y_hat,
const int n,
const int lower_class_range,
const int upper_class_range) except +
@cuml.internals.api_return_any()
def cython_mutual_info_score(labels_true, labels_pred, handle=None) -> float:
"""
Computes the Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won’t change the score
value in any way.
This metric is furthermore symmetric: switching label_true with label_pred
will return the same score value. This can be useful to measure the
agreement of two independent label assignments strategies on the same
dataset when the real ground truth is not known.
The labels in labels_pred and labels_true are assumed to be drawn from a
contiguous set (Ex: drawn from {2, 3, 4}, but not from {2, 4}). If your
set of labels looks like {2, 4}, convert them to something like {0, 1}.
Parameters
----------
handle : cuml.Handle
labels_pred : array-like (device or host) shape = (n_samples,)
A clustering of the data (ints) into disjoint subsets.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
labels_true : array-like (device or host) shape = (n_samples,)
A clustering of the data (ints) into disjoint subsets.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
Returns
-------
float
Mutual information, a non-negative value
"""
handle = Handle() if handle is None else handle
cdef handle_t *handle_ = <handle_t*> <size_t> handle.getHandle()
(y_true, y_pred, n_rows,
lower_class_range, upper_class_range) = prepare_cluster_metric_inputs(
labels_true,
labels_pred
)
cdef uintptr_t ground_truth_ptr = y_true.ptr
cdef uintptr_t preds_ptr = y_pred.ptr
mi = mutual_info_score(handle_[0],
<int*> ground_truth_ptr,
<int*> preds_ptr,
<int> n_rows,
<int> lower_class_range,
<int> upper_class_range)
return mi
| 0 |
rapidsai_public_repos/cuml/python/cuml/metrics | rapidsai_public_repos/cuml/python/cuml/metrics/cluster/silhouette_score.pyx | #
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from libc.stdint cimport uintptr_t
from cuml.common import input_to_cuml_array
from cuml.metrics.pairwise_distances import _determine_metric
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
from cuml.metrics.distance_type cimport DistanceType
from cuml.prims.label.classlabels import make_monotonic, check_labels
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics::Batched":
float silhouette_score(
const handle_t &handle,
float *y,
int n_rows,
int n_cols,
int *labels,
int n_labels,
float *sil_scores,
int chunk,
DistanceType metric) except +
double silhouette_score(
const handle_t &handle,
double *y,
int n_rows,
int n_cols,
int *labels,
int n_labels,
double *sil_scores,
int chunk,
DistanceType metric) except +
def _silhouette_coeff(
X, labels, metric='euclidean', sil_scores=None, chunksize=None,
handle=None):
"""Function wrapped by silhouette_score and silhouette_samples to compute
silhouette coefficients.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The feature vectors for all samples.
labels : array-like, shape = (n_samples,)
The assigned cluster labels for each sample.
metric : string
A string representation of the distance metric to use for evaluating
the silhouette score. Available options are "cityblock", "cosine",
"euclidean", "l1", "l2", "manhattan", and "sqeuclidean".
sil_scores : array_like, shape = (1, n_samples), dtype='float64'
An optional array in which to store the silhouette score for each
sample.
chunksize : integer (default = None)
An integer, 1 <= chunksize <= n_samples to tile the pairwise distance
matrix computations, so as to reduce the quadratic memory usage of
having the entire pairwise distance matrix in GPU memory.
If None, chunksize will automatically be set to 40000, which through
experiments has proved to be a safe number for the computation
to run on a GPU with 16 GB VRAM.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
"""
handle = Handle() if handle is None else handle
cdef handle_t *handle_ = <handle_t*> <size_t> handle.getHandle()
if chunksize is None:
chunksize = 40000
data, n_rows, n_cols, dtype = input_to_cuml_array(
X,
order='C',
check_dtype=[np.float32, np.float64],
)
labels, _, _, _ = input_to_cuml_array(
labels,
order='C',
convert_to_dtype=np.int32
)
n_labels = cp.unique(
labels.to_output(output_type='cupy', output_dtype='int')
).shape[0]
if not check_labels(labels, cp.arange(n_labels, dtype=np.int32)):
mono_labels, _ = make_monotonic(labels, copy=True)
mono_labels, _, _, _ = input_to_cuml_array(
mono_labels,
order='C',
convert_to_dtype=np.int32
)
else:
mono_labels = labels
cdef uintptr_t scores_ptr
if sil_scores is None:
scores_ptr = <uintptr_t> NULL
else:
sil_scores = input_to_cuml_array(
sil_scores,
check_dtype=dtype)[0]
scores_ptr = sil_scores.ptr
metric = _determine_metric(metric)
if dtype == np.float32:
return silhouette_score(handle_[0],
<float*> <uintptr_t> data.ptr,
<int> n_rows,
<int> n_cols,
<int*> <uintptr_t> mono_labels.ptr,
<int> n_labels,
<float*> scores_ptr,
<int> chunksize,
<DistanceType> metric)
elif dtype == np.float64:
return silhouette_score(handle_[0],
<double*> <uintptr_t> data.ptr,
<int> n_rows,
<int> n_cols,
<int*> <uintptr_t> mono_labels.ptr,
<int> n_labels,
<double*> scores_ptr,
<int> chunksize,
<DistanceType> metric)
def cython_silhouette_score(
X,
labels,
metric='euclidean',
chunksize=None,
handle=None):
"""Calculate the mean silhouette coefficient for the provided data.
Given a set of cluster labels for every sample in the provided data,
compute the mean intra-cluster distance (a) and the mean nearest-cluster
distance (b) for each sample. The silhouette coefficient for a sample is
then (b - a) / max(a, b).
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The feature vectors for all samples.
labels : array-like, shape = (n_samples,)
The assigned cluster labels for each sample.
metric : string
A string representation of the distance metric to use for evaluating
the silhouette score. Available options are "cityblock", "cosine",
"euclidean", "l1", "l2", "manhattan", and "sqeuclidean".
chunksize : integer (default = None)
An integer, 1 <= chunksize <= n_samples to tile the pairwise distance
matrix computations, so as to reduce the quadratic memory usage of
having the entire pairwise distance matrix in GPU memory.
If None, chunksize will automatically be set to 40000, which through
experiments has proved to be a safe number for the computation
to run on a GPU with 16 GB VRAM.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
"""
return _silhouette_coeff(
X, labels, chunksize=chunksize, metric=metric, handle=handle
)
def cython_silhouette_samples(
X,
labels,
metric='euclidean',
chunksize=None,
handle=None):
"""Calculate the silhouette coefficient for each sample in the provided data.
Given a set of cluster labels for every sample in the provided data,
compute the mean intra-cluster distance (a) and the mean nearest-cluster
distance (b) for each sample. The silhouette coefficient for a sample is
then (b - a) / max(a, b).
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The feature vectors for all samples.
labels : array-like, shape = (n_samples,)
The assigned cluster labels for each sample.
metric : string
A string representation of the distance metric to use for evaluating
the silhouette score. Available options are "cityblock", "cosine",
"euclidean", "l1", "l2", "manhattan", and "sqeuclidean".
chunksize : integer (default = None)
An integer, 1 <= chunksize <= n_samples to tile the pairwise distance
matrix computations, so as to reduce the quadratic memory usage of
having the entire pairwise distance matrix in GPU memory.
If None, chunksize will automatically be set to 40000, which through
experiments has proved to be a safe number for the computation
to run on a GPU with 16 GB VRAM.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
"""
sil_scores = cp.empty((X.shape[0],), dtype=X.dtype)
_silhouette_coeff(
X, labels, chunksize=chunksize, metric=metric, sil_scores=sil_scores,
handle=handle
)
return sil_scores
| 0 |
rapidsai_public_repos/cuml/python/cuml/metrics | rapidsai_public_repos/cuml/python/cuml/metrics/cluster/adjusted_rand_index.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from libc.stdint cimport uintptr_t
import cuml.internals
from pylibraft.common.handle cimport handle_t
from cuml.common import input_to_cuml_array
from pylibraft.common.handle import Handle
cimport cuml.common.cuda
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
double adjusted_rand_index(handle_t &handle,
int *y,
int *y_hat,
int n)
@cuml.internals.api_return_any()
def adjusted_rand_score(labels_true, labels_pred, handle=None,
convert_dtype=True) -> float:
"""
Adjusted_rand_score is a clustering similarity metric based on the Rand
index and is corrected for chance.
Parameters
----------
labels_true : Ground truth labels to be used as a reference
labels_pred : Array of predicted labels used to evaluate the model
handle : cuml.Handle
Returns
-------
float
The adjusted rand index value between -1.0 and 1.0
"""
handle = Handle() \
if handle is None else handle
cdef handle_t* handle_ =\
<handle_t*><size_t>handle.getHandle()
labels_true, n_rows, _, _ = \
input_to_cuml_array(labels_true, order='C', check_dtype=cp.int32,
convert_to_dtype=(cp.int32 if convert_dtype
else None))
labels_pred, _, _, _ = \
input_to_cuml_array(labels_pred, order='C', check_dtype=cp.int32,
convert_to_dtype=(cp.int32 if convert_dtype
else None))
rand_score = adjusted_rand_index(handle_[0],
<int*><uintptr_t> labels_true.ptr,
<int*><uintptr_t> labels_pred.ptr,
<int> n_rows)
return rand_score
| 0 |
rapidsai_public_repos/cuml/python/cuml/metrics | rapidsai_public_repos/cuml/python/cuml/metrics/cluster/__init__.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.metrics.cluster.adjusted_rand_index import adjusted_rand_score
from cuml.metrics.cluster.homogeneity_score import (
cython_homogeneity_score as homogeneity_score,
)
from cuml.metrics.cluster.completeness_score import (
cython_completeness_score as completeness_score,
)
from cuml.metrics.cluster.mutual_info_score import (
cython_mutual_info_score as mutual_info_score,
)
from cuml.metrics.cluster.entropy import cython_entropy as entropy
from cuml.metrics.cluster.silhouette_score import (
cython_silhouette_score as silhouette_score,
)
from cuml.metrics.cluster.silhouette_score import (
cython_silhouette_samples as silhouette_samples,
)
from cuml.metrics.cluster.v_measure import cython_v_measure as v_measure_score
| 0 |
rapidsai_public_repos/cuml/python/cuml/metrics | rapidsai_public_repos/cuml/python/cuml/metrics/cluster/completeness_score.pyx | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import cuml.internals
from pylibraft.common.handle cimport handle_t
from libc.stdint cimport uintptr_t
from cuml.metrics.cluster.utils import prepare_cluster_metric_inputs
from pylibraft.common.handle import Handle
cdef extern from "cuml/metrics/metrics.hpp" namespace "ML::Metrics":
double completeness_score(const handle_t & handle, const int *y,
const int *y_hat, const int n,
const int lower_class_range,
const int upper_class_range) except +
@cuml.internals.api_return_any()
def cython_completeness_score(labels_true, labels_pred, handle=None) -> float:
"""
Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points that are
members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won’t change the score
value in any way.
This metric is not symmetric: switching label_true with label_pred will
return the homogeneity_score which will be different in general.
The labels in labels_pred and labels_true are assumed to be drawn from a
contiguous set (Ex: drawn from {2, 3, 4}, but not from {2, 4}). If your
set of labels looks like {2, 4}, convert them to something like {0, 1}.
Parameters
----------
labels_pred : array-like (device or host) shape = (n_samples,)
The labels predicted by the model for the test dataset.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
labels_true : array-like (device or host) shape = (n_samples,)
The ground truth labels (ints) of the test dataset.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
Returns
-------
float
The completeness of the predicted labeling given the ground truth.
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
"""
handle = Handle() if handle is None else handle
cdef handle_t *handle_ = <handle_t*> <size_t> handle.getHandle()
(y_true, y_pred, n_rows,
lower_class_range, upper_class_range) = prepare_cluster_metric_inputs(
labels_true,
labels_pred
)
cdef uintptr_t ground_truth_ptr = y_true.ptr
cdef uintptr_t preds_ptr = y_pred.ptr
com = completeness_score(handle_[0],
<int*> ground_truth_ptr,
<int*> preds_ptr,
<int> n_rows,
<int> lower_class_range,
<int> upper_class_range)
return com
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/random_projection/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("random_projection.pyx" ${random_projection_algo})
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
MODULE_PREFIX random_projection_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/random_projection/__init__.py | #
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.random_projection.random_projection import GaussianRandomProjection
from cuml.random_projection.random_projection import SparseRandomProjection
from cuml.random_projection.random_projection import (
johnson_lindenstrauss_min_dim,
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/random_projection/random_projection.pyx | #
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from libc.stdint cimport uintptr_t
from libcpp cimport bool
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.internals.base import Base
from pylibraft.common.handle cimport *
from cuml.common import input_to_cuml_array
from cuml.internals.mixins import FMajorInputTagMixin
from rmm._lib.cuda_stream_view cimport cuda_stream_view
cdef extern from "cuml/random_projection/rproj_c.h" namespace "ML":
# Structure holding random projection hyperparameters
cdef struct paramsRPROJ:
int n_samples # number of samples
int n_features # number of features (original dimension)
int n_components # number of components (target dimension)
double eps # error tolerance according to Johnson-Lindenstrauss lemma # noqa E501
bool gaussian_method # toggle Gaussian or Sparse random projection methods # noqa E501
double density # ratio of non-zero component in the random projection matrix (used for sparse random projection) # noqa E501
bool dense_output # toggle random projection's transformation as a dense or sparse matrix # noqa E501
int random_state # seed used by random generator
# Structure describing random matrix
cdef cppclass rand_mat[T]:
rand_mat(cuda_stream_view stream) except + # random matrix structure constructor (set all to nullptr) # noqa E501
T *dense_data # dense random matrix data
int *indices # sparse CSC random matrix indices
int *indptr # sparse CSC random matrix indptr
T *sparse_data # sparse CSC random matrix data
size_t sparse_data_size # sparse CSC random matrix number of non-zero elements # noqa E501
# Function used to fit the model
cdef void RPROJfit[T](const handle_t& handle, rand_mat[T] *random_matrix,
paramsRPROJ* params) except +
# Function used to apply data transformation
cdef void RPROJtransform[T](const handle_t& handle, T *input,
rand_mat[T] *random_matrix, T *output,
paramsRPROJ* params) except +
# Function used to compute the Johnson Lindenstrauss minimal distance
cdef size_t c_johnson_lindenstrauss_min_dim \
"ML::johnson_lindenstrauss_min_dim" (size_t n_samples,
double eps) except +
def johnson_lindenstrauss_min_dim(n_samples, eps=0.1):
"""
In mathematics, the Johnson–Lindenstrauss lemma states that
high-dimensional data can be embedded into lower dimension while preserving
the distances.
With p the random projection :
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
This function finds the minimum number of components to guarantee that
the embedding is inside the eps error tolerance.
Parameters
----------
n_samples : int
Number of samples.
eps : float in (0,1) (default = 0.1)
Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
Returns
-------
n_components : int
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
"""
return c_johnson_lindenstrauss_min_dim(<size_t>n_samples, <double>eps)
cdef class BaseRandomProjection():
"""
Base class for random projections.
This class is not intended to be used directly.
Random projection is a dimensionality reduction technique. Random
projection methods are powerful methods known for their simplicity,
computational efficiency and restricted model size.
This algorithm also has the advantage to preserve distances well between
any two samples and is thus suitable for methods having this requirement.
Parameters
----------
n_components : int (default = 'auto')
Dimensionality of the target projection space. If set to 'auto',
the parameter is deducted thanks to Johnson–Lindenstrauss lemma.
The automatic deduction make use of the number of samples and
the eps parameter.
The Johnson–Lindenstrauss lemma can produce very conservative
n_components parameter as it makes no assumption on dataset structure.
eps : float (default = 0.1)
Error tolerance during projection. Used by Johnson–Lindenstrauss
automatic deduction when n_components is set to 'auto'.
dense_output : boolean (default = True)
If set to True transformed matrix will be dense otherwise sparse.
random_state : int (default = None)
Seed used to initialize random generator
Attributes
----------
params : Cython structure
Structure holding model's hyperparameters
rand_matS/rand_matD : Cython pointers to structures
Structures holding pointers to data describing random matrix.
S for single/float and D for double.
Notes
------
Inspired from sklearn's implementation :
https://scikit-learn.org/stable/modules/random_projection.html
"""
cdef paramsRPROJ params
cdef rand_mat[float]* rand_matS
cdef rand_mat[double]* rand_matD
def __dealloc__(self):
del self.rand_matS
del self.rand_matD
def __init__(self, *, bool gaussian_method, double density,
n_components='auto', eps=0.1, dense_output=True,
random_state=None):
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef cuda_stream_view stream = handle_.get_stream()
self.rand_matS = new rand_mat[float](stream)
self.rand_matD = new rand_mat[double](stream)
self.params.n_components = n_components if n_components != 'auto'\
else -1
self.params.eps = eps
self.params.dense_output = dense_output
if random_state is not None:
self.params.random_state = random_state
self.params.gaussian_method = gaussian_method
self.params.density = density
@property
def n_components(self):
return self.params.n_components
@n_components.setter
def n_components(self, value):
self.params.n_components = value
@property
def eps(self):
return self.params.eps
@eps.setter
def eps(self, value):
self.params.eps = value
@property
def dense_output(self):
return self.params.dense_output
@dense_output.setter
def dense_output(self, value):
self.params.dense_output = value
@property
def random_state(self):
return self.params.random_state
@random_state.setter
def random_state(self, value):
self.params.random_state = value
@property
def gaussian_method(self):
return self.params.gaussian_method
@gaussian_method.setter
def gaussian_method(self, value):
self.params.gaussian_method = value
@property
def density(self):
return self.params.density
@density.setter
def density(self, value):
self.params.density = value
@cuml.internals.api_base_return_any()
def fit(self, X, y=None):
"""
Fit the model. This function generates the random matrix on GPU.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Used to provide shape information.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
Returns
-------
The transformer itself with deducted 'auto' parameters and
generated random matrix as attributes
"""
_, n_samples, n_features, self.dtype = \
input_to_cuml_array(X, check_dtype=[np.float32, np.float64])
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
self.params.n_samples = n_samples
self.params.n_features = n_features
if self.dtype == np.float32:
RPROJfit[float](handle_[0], self.rand_matS, &self.params)
else:
RPROJfit[double](handle_[0], self.rand_matD, &self.params)
self.handle.sync()
return self
@cuml.internals.api_base_return_array()
def transform(self, X, convert_dtype=True):
"""
Apply transformation on provided data. This function outputs
a multiplication between the input matrix and the generated random
matrix
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Dense matrix (floats or doubles) of shape (n_samples,
n_features).
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
convert_dtype : bool, optional (default = True)
When set to True, the fit method will, when necessary, convert
y to be the same data type as X if they differ. This will
increase memory used for the method.
Returns
-------
The output projected matrix of shape (n_samples, n_components)
Result of multiplication between input matrix and random matrix
"""
X_m, n_samples, n_features, dtype = \
input_to_cuml_array(X, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None))
cdef uintptr_t input_ptr = X_m.ptr
X_new = CumlArray.empty((n_samples, self.params.n_components),
dtype=self.dtype,
order='F',
index=X_m.index)
cdef uintptr_t output_ptr = X_new.ptr
if self.params.n_features != n_features:
raise ValueError("n_features must be same as on fitting: %d" %
self.params.n_features)
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if dtype == np.float32:
RPROJtransform[float](handle_[0],
<float*> input_ptr,
self.rand_matS,
<float*> output_ptr,
&self.params)
else:
RPROJtransform[double](handle_[0],
<double*> input_ptr,
self.rand_matD,
<double*> output_ptr,
&self.params)
self.handle.sync()
return X_new
@cuml.internals.api_base_return_array(get_output_type=False)
def fit_transform(self, X, convert_dtype=True):
return self.fit(X).transform(X, convert_dtype)
class GaussianRandomProjection(Base,
BaseRandomProjection,
FMajorInputTagMixin):
"""
Gaussian Random Projection method derivated from BaseRandomProjection
class.
Random projection is a dimensionality reduction technique. Random
projection methods are powerful methods known for their simplicity,
computational efficiency and restricted model size.
This algorithm also has the advantage to preserve distances well between
any two samples and is thus suitable for methods having this requirement.
The components of the random matrix are drawn from N(0, 1 / n_components).
Examples
--------
.. code-block:: python
from cuml.random_projection import GaussianRandomProjection
from sklearn.datasets import make_blobs
from sklearn.svm import SVC
# dataset generation
data, target = make_blobs(n_samples=800, centers=400, n_features=3000,
random_state=42)
# model fitting
model = GaussianRandomProjection(n_components=5,
random_state=42).fit(data)
# dataset transformation
transformed_data = model.transform(data)
# classifier training
classifier = SVC(gamma=0.001).fit(transformed_data, target)
# classifier scoring
score = classifier.score(transformed_data, target)
# measure information preservation
print("Score: {}".format(score))
Output:
.. code-block:: python
Score: 1.0
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
n_components : int (default = 'auto')
Dimensionality of the target projection space. If set to 'auto',
the parameter is deducted thanks to Johnson–Lindenstrauss lemma.
The automatic deduction make use of the number of samples and
the eps parameter.
The Johnson–Lindenstrauss lemma can produce very conservative
n_components parameter as it makes no assumption on dataset structure.
eps : float (default = 0.1)
Error tolerance during projection. Used by Johnson–Lindenstrauss
automatic deduction when n_components is set to 'auto'.
random_state : int (default = None)
Seed used to initialize random generator
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
gaussian_method : boolean
To be passed to base class in order to determine
random matrix generation method
Notes
-----
This class is unable to be used with ``sklearn.base.clone()`` and will
raise an exception when called.
Inspired by Scikit-learn's implementation :
https://scikit-learn.org/stable/modules/random_projection.html
"""
def __init__(self, *, handle=None, n_components='auto', eps=0.1,
random_state=None, verbose=False, output_type=None):
Base.__init__(self,
handle=handle,
verbose=verbose,
output_type=output_type)
BaseRandomProjection.__init__(
self,
gaussian_method=True,
density=-1.0,
n_components=n_components,
eps=eps,
dense_output=True,
random_state=random_state)
def get_param_names(self):
return Base.get_param_names(self) + [
"n_components",
"eps",
"random_state"
]
class SparseRandomProjection(Base,
BaseRandomProjection,
FMajorInputTagMixin):
"""
Sparse Random Projection method derivated from BaseRandomProjection class.
Random projection is a dimensionality reduction technique. Random
projection methods are powerful methods known for their simplicity,
computational efficiency and restricted model size.
This algorithm also has the advantage to preserve distances well between
any two samples and is thus suitable for methods having this requirement.
Sparse random matrix is an alternative to dense random projection matrix
(e.g. Gaussian) that guarantees similar embedding quality while being much
more memory efficient and allowing faster computation of the projected data
(with sparse enough matrices).
If we note ``s = 1 / density`` the components of the random matrix are
drawn from:
- ``-sqrt(s) / sqrt(n_components)`` - with probability ``1 / 2s``
- ``0`` - with probability ``1 - 1 / s``
- ``+sqrt(s) / sqrt(n_components)`` - with probability ``1 / 2s``
Examples
--------
.. code-block:: python
from cuml.random_projection import SparseRandomProjection
from sklearn.datasets import make_blobs
from sklearn.svm import SVC
# dataset generation
data, target = make_blobs(n_samples=800, centers=400, n_features=3000,
random_state=42)
# model fitting
model = SparseRandomProjection(n_components=5,
random_state=42).fit(data)
# dataset transformation
transformed_data = model.transform(data)
# classifier training
classifier = SVC(gamma=0.001).fit(transformed_data, target)
# classifier scoring
score = classifier.score(transformed_data, target)
# measure information preservation
print("Score: {}".format(score))
Output:
.. code-block:: python
Score: 1.0
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
n_components : int (default = 'auto')
Dimensionality of the target projection space. If set to 'auto',
the parameter is deducted thanks to Johnson–Lindenstrauss lemma.
The automatic deduction make use of the number of samples and
the eps parameter.
The Johnson–Lindenstrauss lemma can produce very conservative
n_components parameter as it makes no assumption on dataset structure.
density : float in range (0, 1] (default = 'auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
eps : float (default = 0.1)
Error tolerance during projection. Used by Johnson–Lindenstrauss
automatic deduction when n_components is set to 'auto'.
dense_output : boolean (default = True)
If set to True transformed matrix will be dense otherwise sparse.
random_state : int (default = None)
Seed used to initialize random generator
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
gaussian_method : boolean
To be passed to base class in order to determine
random matrix generation method
Notes
-----
This class is unable to be used with ``sklearn.base.clone()`` and will
raise an exception when called.
Inspired by Scikit-learn's `implementation
<https://scikit-learn.org/stable/modules/random_projection.html>`_.
"""
def __init__(self, *, handle=None, n_components='auto', density='auto',
eps=0.1, dense_output=True, random_state=None,
verbose=False, output_type=None):
Base.__init__(self,
handle=handle,
verbose=verbose,
output_type=output_type)
BaseRandomProjection.__init__(
self,
gaussian_method=False,
density=(density if density != 'auto' else -1.0),
n_components=n_components,
eps=eps,
dense_output=dense_output,
random_state=random_state)
def get_param_names(self):
return Base.get_param_names(self) + [
"n_components",
"density",
"eps",
"dense_output",
"random_state"
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/naive_bayes/naive_bayes.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.kernel_utils import cuda_kernel_factory
from cuml.internals.input_utils import input_to_cuml_array, input_to_cupy_array
from cuml.prims.array import binarize
from cuml.prims.label import invert_labels
from cuml.prims.label import check_labels
from cuml.prims.label import make_monotonic
from cuml.internals.import_utils import has_scipy
from cuml.common.doc_utils import generate_docstring
from cuml.internals.mixins import ClassifierMixin
from cuml.internals.base import Base
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.common import CumlArray
import math
import warnings
from cuml.internals.safe_imports import (
gpu_only_import,
gpu_only_import_from,
null_decorator,
)
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
def count_features_coo_kernel(float_dtype, int_dtype):
"""
A simple reduction kernel that takes in a sparse (COO) array
of features and computes the sum (or sum squared) for each class
label
"""
kernel_str = r"""({0} *out,
int *rows, int *cols,
{0} *vals, int nnz,
int n_rows, int n_cols,
{1} *labels,
{0} *weights,
bool has_weights,
int n_classes,
bool square) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= nnz) return;
int row = rows[i];
int col = cols[i];
{0} val = vals[i];
{1} label = labels[row];
unsigned out_idx = (col * n_classes) + label;
if(has_weights)
val *= weights[i];
if(square) val *= val;
atomicAdd(out + out_idx, val);
}"""
return cuda_kernel_factory(
kernel_str, (float_dtype, int_dtype), "count_features_coo"
)
def count_classes_kernel(float_dtype, int_dtype):
kernel_str = r"""
({0} *out, int n_rows, {1} *labels) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= n_rows) return;
{1} label = labels[row];
atomicAdd(out + label, ({0})1);
}"""
return cuda_kernel_factory(
kernel_str, (float_dtype, int_dtype), "count_classes"
)
def count_features_dense_kernel(float_dtype, int_dtype):
kernel_str = r"""
({0} *out,
{0} *in,
int n_rows,
int n_cols,
{1} *labels,
{0} *weights,
bool has_weights,
int n_classes,
bool square,
bool rowMajor,
bool categorical) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= n_rows || col >= n_cols) return;
{0} val = !rowMajor ?
in[col * n_rows + row] : in[row * n_cols + col];
{1} label = labels[row];
unsigned out_idx = ((col * n_classes) + label);
if (categorical)
{
out_idx = (val * n_classes * n_cols) + (label * n_cols) + col;
val = 1;
}
if(has_weights)
val *= weights[row];
if(val == 0.0) return;
if(square) val *= val;
atomicAdd(out + out_idx, val);
}"""
return cuda_kernel_factory(
kernel_str, (float_dtype, int_dtype), "count_features_dense"
)
def _convert_x_sparse(X):
X = X.tocoo()
if X.dtype not in [cp.float32, cp.float64]:
raise ValueError(
"Only floating-point dtypes (float32 or "
"float64) are supported for sparse inputs."
)
rows = cp.asarray(X.row, dtype=X.row.dtype)
cols = cp.asarray(X.col, dtype=X.col.dtype)
data = cp.asarray(X.data, dtype=X.data.dtype)
return cupyx.scipy.sparse.coo_matrix((data, (rows, cols)), shape=X.shape)
class _BaseNB(Base, ClassifierMixin):
classes_ = CumlArrayDescriptor()
class_count_ = CumlArrayDescriptor()
feature_count_ = CumlArrayDescriptor()
class_log_prior_ = CumlArrayDescriptor()
feature_log_prob_ = CumlArrayDescriptor()
def __init__(self, *, verbose=False, handle=None, output_type=None):
super(_BaseNB, self).__init__(
verbose=verbose, handle=handle, output_type=output_type
)
def _check_X(self, X):
"""To be overridden in subclasses with the actual checks."""
return X
@generate_docstring(
X="dense_sparse",
return_values={
"name": "y_hat",
"type": "dense",
"description": "Predicted values",
"shape": "(n_rows, 1)",
},
)
def predict(self, X) -> CumlArray:
"""
Perform classification on an array of test vectors X.
"""
if has_scipy():
from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
else:
from cuml.internals.import_utils import (
dummy_function_always_false as scipy_sparse_isspmatrix,
)
# todo: use a sparse CumlArray style approach when ready
# https://github.com/rapidsai/cuml/issues/2216
if scipy_sparse_isspmatrix(X) or cupyx.scipy.sparse.isspmatrix(X):
X = _convert_x_sparse(X)
index = None
else:
X = input_to_cuml_array(
X, order="K", check_dtype=[cp.float32, cp.float64, cp.int32]
)
index = X.index
# todo: improve index management for cupy based codebases
X = X.array.to_output("cupy")
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
indices = cp.argmax(jll, axis=1).astype(self.classes_.dtype)
y_hat = invert_labels(indices, classes=self.classes_)
y_hat = CumlArray(data=y_hat, index=index)
return y_hat
@generate_docstring(
X="dense_sparse",
return_values={
"name": "C",
"type": "dense",
"description": (
"Returns the log-probability of the samples for each class in "
"the model. The columns correspond to the classes in sorted "
"order, as they appear in the attribute `classes_`."
),
"shape": "(n_rows, 1)",
},
)
def predict_log_proba(self, X) -> CumlArray:
"""
Return log-probability estimates for the test vector X.
"""
if has_scipy():
from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
else:
from cuml.internals.import_utils import (
dummy_function_always_false as scipy_sparse_isspmatrix,
)
# todo: use a sparse CumlArray style approach when ready
# https://github.com/rapidsai/cuml/issues/2216
if scipy_sparse_isspmatrix(X) or cupyx.scipy.sparse.isspmatrix(X):
X = _convert_x_sparse(X)
index = None
else:
X = input_to_cuml_array(
X, order="K", check_dtype=[cp.float32, cp.float64, cp.int32]
)
index = X.index
# todo: improve index management for cupy based codebases
X = X.array.to_output("cupy")
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
# normalize by P(X) = P(f_1, ..., f_n)
# Compute log(sum(exp()))
# Subtract max in exp to prevent inf
a_max = cp.amax(jll, axis=1, keepdims=True)
exp = cp.exp(jll - a_max)
logsumexp = cp.log(cp.sum(exp, axis=1))
a_max = cp.squeeze(a_max, axis=1)
log_prob_x = a_max + logsumexp
if log_prob_x.ndim < 2:
log_prob_x = log_prob_x.reshape((1, log_prob_x.shape[0]))
result = jll - log_prob_x.T
result = CumlArray(data=result, index=index)
return result
@generate_docstring(
X="dense_sparse",
return_values={
"name": "C",
"type": "dense",
"description": (
"Returns the probability of the samples for each class in the "
"model. The columns correspond to the classes in sorted order,"
" as they appear in the attribute `classes_`."
),
"shape": "(n_rows, 1)",
},
)
def predict_proba(self, X) -> CumlArray:
"""
Return probability estimates for the test vector X.
"""
result = cp.exp(self.predict_log_proba(X))
return result
class GaussianNB(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
priors : array-like of shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the
CUDA stream that will be used for the model's computations, so
users can run different models concurrently in different streams
by creating handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> X = cp.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1],
... [3, 2]], cp.float32)
>>> Y = cp.array([1, 1, 1, 2, 2, 2], cp.float32)
>>> from cuml.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict(cp.array([[-0.8, -1]], cp.float32)))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, cp.unique(Y))
GaussianNB()
>>> print(clf_pf.predict(cp.array([[-0.8, -1]], cp.float32)))
[1]
"""
def __init__(
self,
*,
priors=None,
var_smoothing=1e-9,
output_type=None,
handle=None,
verbose=False,
):
super(GaussianNB, self).__init__(
handle=handle, verbose=verbose, output_type=output_type
)
self.priors = priors
self.var_smoothing = var_smoothing
self.fit_called_ = False
self.classes_ = None
def fit(self, X, y, sample_weight=None) -> "GaussianNB":
"""
Fit Gaussian Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, cupy sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like shape (n_samples) Target values.
sample_weight : array-like of shape (n_samples)
Weights applied to individual samples (1. for unweighted).
Currently sample weight is ignored.
"""
return self._partial_fit(
X,
y,
_classes=cp.unique(y),
_refit=True,
sample_weight=sample_weight,
)
@nvtx_annotate(
message="naive_bayes.GaussianNB._partial_fit", domain="cuml_python"
)
def _partial_fit(
self,
X,
y,
_classes=None,
_refit=False,
sample_weight=None,
convert_dtype=True,
) -> "GaussianNB":
if has_scipy():
from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
else:
from cuml.internals.import_utils import (
dummy_function_always_false as scipy_sparse_isspmatrix,
)
if getattr(self, "classes_") is None and _classes is None:
raise ValueError(
"classes must be passed on the first call " "to partial_fit."
)
if scipy_sparse_isspmatrix(X) or cupyx.scipy.sparse.isspmatrix(X):
X = _convert_x_sparse(X)
else:
X = input_to_cupy_array(
X, order="K", check_dtype=[cp.float32, cp.float64, cp.int32]
).array
expected_y_dtype = (
cp.int32 if X.dtype in [cp.float32, cp.int32] else cp.int64
)
y = input_to_cupy_array(
y,
convert_to_dtype=(expected_y_dtype if convert_dtype else False),
check_dtype=expected_y_dtype,
).array
if _classes is not None:
_classes, *_ = input_to_cuml_array(
_classes,
order="K",
convert_to_dtype=(
expected_y_dtype if convert_dtype else False
),
)
Y, label_classes = make_monotonic(y, classes=_classes, copy=True)
if _refit:
self.classes_ = None
def var_sparse(X, axis=0):
# Compute the variance on dense and sparse matrices
return ((X - X.mean(axis=axis)) ** 2).mean(axis=axis)
self.epsilon_ = self.var_smoothing * var_sparse(X).max()
if not self.fit_called_:
self.fit_called_ = True
# Original labels are stored on the instance
if _classes is not None:
check_labels(Y, _classes.to_output("cupy"))
self.classes_ = _classes
else:
self.classes_ = label_classes
n_features = X.shape[1]
n_classes = len(self.classes_)
self.n_classes_ = n_classes
self.n_features_ = n_features
self.theta_ = cp.zeros((n_classes, n_features))
self.sigma_ = cp.zeros((n_classes, n_features))
self.class_count_ = cp.zeros(n_classes, dtype=X.dtype)
if self.priors is not None:
if len(self.priors) != n_classes:
raise ValueError(
"Number of priors must match number of" " classes."
)
if not cp.isclose(self.priors.sum(), 1):
raise ValueError("The sum of the priors should be 1.")
if (self.priors < 0).any():
raise ValueError("Priors must be non-negative.")
self.class_prior, *_ = input_to_cupy_array(
self.priors, check_dtype=[cp.float32, cp.float64]
)
else:
self.sigma_[:, :] -= self.epsilon_
unique_y = cp.unique(y)
unique_y_in_classes = cp.in1d(unique_y, cp.array(self.classes_))
if not cp.all(unique_y_in_classes):
raise ValueError(
"The target label(s) %s in y do not exist "
"in the initial classes %s"
% (unique_y[~unique_y_in_classes], self.classes_)
)
self.theta_, self.sigma_ = self._update_mean_variance(X, Y)
self.sigma_[:, :] += self.epsilon_
if self.priors is None:
self.class_prior = self.class_count_ / self.class_count_.sum()
return self
def partial_fit(
self, X, y, classes=None, sample_weight=None
) -> "GaussianNB":
"""
Incremental fit on a batch of samples.
This method is expected to be called several times consecutively on
different chunks of a dataset so as to implement out-of-core or online
learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible (as long
as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, cupy sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. A sparse matrix in COO
format is preferred, other formats will go through a conversion
to COO.
y : array-like of shape (n_samples) Target values.
classes : array-like of shape (n_classes)
List of all the classes that can possibly appear in the y
vector. Must be provided at the first call to partial_fit,
can be omitted in subsequent calls.
sample_weight : array-like of shape (n_samples)
Weights applied to individual samples (1. for
unweighted). Currently sample weight is ignored.
Returns
-------
self : object
"""
return self._partial_fit(
X, y, classes, _refit=False, sample_weight=sample_weight
)
def _update_mean_variance(self, X, Y, sample_weight=None):
if sample_weight is None:
sample_weight = cp.zeros(0)
labels_dtype = self.classes_.dtype
mu = self.theta_
var = self.sigma_
early_return = self.class_count_.sum() == 0
n_past = cp.expand_dims(self.class_count_, axis=1).copy()
tpb = 32
n_rows = X.shape[0]
n_cols = X.shape[1]
if X.shape[0] == 0:
return mu, var
# Make sure Y is cp array not CumlArray
Y = cp.asarray(Y)
new_mu = cp.zeros(
(self.n_classes_, self.n_features_), order="F", dtype=X.dtype
)
new_var = cp.zeros(
(self.n_classes_, self.n_features_), order="F", dtype=X.dtype
)
class_counts = cp.zeros(self.n_classes_, order="F", dtype=X.dtype)
if cupyx.scipy.sparse.isspmatrix(X):
X = X.tocoo()
count_features_coo = count_features_coo_kernel(
X.dtype, labels_dtype
)
# Run once for averages
count_features_coo(
(math.ceil(X.nnz / tpb),),
(tpb,),
(
new_mu,
X.row,
X.col,
X.data,
X.nnz,
n_rows,
n_cols,
Y,
sample_weight,
sample_weight.shape[0] > 0,
self.n_classes_,
False,
),
)
# Run again for variance
count_features_coo(
(math.ceil(X.nnz / tpb),),
(tpb,),
(
new_var,
X.row,
X.col,
X.data,
X.nnz,
n_rows,
n_cols,
Y,
sample_weight,
sample_weight.shape[0] > 0,
self.n_classes_,
True,
),
)
else:
count_features_dense = count_features_dense_kernel(
X.dtype, labels_dtype
)
# Run once for averages
count_features_dense(
(math.ceil(n_rows / tpb), math.ceil(n_cols / tpb), 1),
(tpb, tpb, 1),
(
new_mu,
X,
n_rows,
n_cols,
Y,
sample_weight,
sample_weight.shape[0] > 0,
self.n_classes_,
False,
X.flags["C_CONTIGUOUS"],
False,
),
)
# Run again for variance
count_features_dense(
(math.ceil(n_rows / tpb), math.ceil(n_cols / tpb), 1),
(tpb, tpb, 1),
(
new_var,
X,
n_rows,
n_cols,
Y,
sample_weight,
sample_weight.shape[0] > 0,
self.n_classes_,
True,
X.flags["C_CONTIGUOUS"],
False,
),
)
count_classes = count_classes_kernel(X.dtype, labels_dtype)
count_classes(
(math.ceil(n_rows / tpb),), (tpb,), (class_counts, n_rows, Y)
)
self.class_count_ += class_counts
# Avoid any division by zero
class_counts = cp.expand_dims(class_counts, axis=1)
class_counts += cp.finfo(X.dtype).eps
new_mu /= class_counts
# Construct variance from sum squares
new_var = (new_var / class_counts) - new_mu**2
if early_return:
return new_mu, new_var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight.shape[0] > 0:
n_new = float(sample_weight.sum())
else:
n_new = class_counts
n_total = n_past + n_new
total_mu = (new_mu * n_new + mu * n_past) / n_total
old_ssd = var * n_past
new_ssd = n_new * new_var
ssd_sum = old_ssd + new_ssd
combined_feature_counts = n_new * n_past / n_total
mean_adj = (mu - new_mu) ** 2
total_ssd = ssd_sum + combined_feature_counts * mean_adj
total_var = total_ssd / n_total
return total_mu, total_var
def _joint_log_likelihood(self, X):
joint_log_likelihood = []
for i in range(len(self.classes_)):
jointi = cp.log(self.class_prior[i])
n_ij = -0.5 * cp.sum(cp.log(2.0 * cp.pi * self.sigma_[i, :]))
centered = (X - self.theta_[i, :]) ** 2
zvals = centered / self.sigma_[i, :]
summed = cp.sum(zvals, axis=1)
n_ij = -(0.5 * summed) + n_ij
joint_log_likelihood.append(jointi + n_ij)
return cp.array(joint_log_likelihood).T
def get_param_names(self):
return super().get_param_names() + ["priors", "var_smoothing"]
class _BaseDiscreteNB(_BaseNB):
def __init__(
self,
*,
alpha=1.0,
fit_prior=True,
class_prior=None,
verbose=False,
handle=None,
output_type=None,
):
super(_BaseDiscreteNB, self).__init__(
verbose=verbose, handle=handle, output_type=output_type
)
if class_prior is not None:
self.class_prior, *_ = input_to_cuml_array(class_prior)
else:
self.class_prior = None
if alpha < 0:
raise ValueError("Smoothing parameter alpha should be >= 0.")
self.alpha = alpha
self.fit_prior = fit_prior
self.fit_called_ = False
self.n_classes_ = 0
self.n_features_ = None
# Needed until Base no longer assumed cumlHandle
self.handle = None
def _check_X_y(self, X, y):
return X, y
def _update_class_log_prior(self, class_prior=None):
if class_prior is not None:
if class_prior.shape[0] != self.n_classes_:
raise ValueError(
"Number of classes must match " "number of priors"
)
self.class_log_prior_ = cp.log(class_prior)
elif self.fit_prior:
log_class_count = cp.log(self.class_count_)
self.class_log_prior_ = log_class_count - cp.log(
self.class_count_.sum()
)
else:
self.class_log_prior_ = cp.full(
self.n_classes_, -math.log(self.n_classes_)
)
def partial_fit(
self, X, y, classes=None, sample_weight=None
) -> "_BaseDiscreteNB":
"""
Incremental fit on a batch of samples.
This method is expected to be called several times consecutively on
different chunks of a dataset so as to implement out-of-core or online
learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible (as long
as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, cupy sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features
y : array-like of shape (n_samples) Target values.
classes : array-like of shape (n_classes)
List of all the classes that can possibly appear in the y
vector. Must be provided at the first call to partial_fit,
can be omitted in subsequent calls.
sample_weight : array-like of shape (n_samples)
Weights applied to individual samples (1. for
unweighted). Currently sample weight is ignored.
Returns
-------
self : object
"""
return self._partial_fit(
X, y, sample_weight=sample_weight, _classes=classes
)
@nvtx_annotate(
message="naive_bayes._BaseDiscreteNB._partial_fit",
domain="cuml_python",
)
def _partial_fit(
self, X, y, sample_weight=None, _classes=None, convert_dtype=True
) -> "_BaseDiscreteNB":
if has_scipy():
from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
else:
from cuml.internals.import_utils import (
dummy_function_always_false as scipy_sparse_isspmatrix,
)
# TODO: use SparseCumlArray
if scipy_sparse_isspmatrix(X) or cupyx.scipy.sparse.isspmatrix(X):
X = _convert_x_sparse(X)
else:
X = input_to_cupy_array(
X, order="K", check_dtype=[cp.float32, cp.float64, cp.int32]
).array
expected_y_dtype = (
cp.int32 if X.dtype in [cp.float32, cp.int32] else cp.int64
)
y = input_to_cupy_array(
y,
convert_to_dtype=(expected_y_dtype if convert_dtype else False),
check_dtype=expected_y_dtype,
).array
if _classes is not None:
_classes, *_ = input_to_cuml_array(
_classes,
order="K",
convert_to_dtype=(
expected_y_dtype if convert_dtype else False
),
)
Y, label_classes = make_monotonic(y, classes=_classes, copy=True)
X, Y = self._check_X_y(X, Y)
if not self.fit_called_:
self.fit_called_ = True
if _classes is not None:
check_labels(Y, _classes.to_output("cupy"))
self.classes_ = _classes
else:
self.classes_ = label_classes
self.n_classes_ = self.classes_.shape[0]
self.n_features_ = X.shape[1]
self._init_counters(self.n_classes_, self.n_features_, X.dtype)
else:
check_labels(Y, self.classes_)
if cupyx.scipy.sparse.isspmatrix(X):
# X is assumed to be a COO here
self._count_sparse(X.row, X.col, X.data, X.shape, Y, self.classes_)
else:
self._count(X, Y, self.classes_)
self._update_feature_log_prob(self.alpha)
self._update_class_log_prior(class_prior=self.class_prior)
return self
def fit(self, X, y, sample_weight=None) -> "_BaseDiscreteNB":
"""
Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, cupy sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like shape (n_samples) Target values.
sample_weight : array-like of shape (n_samples)
Weights applied to individual samples (1. for unweighted).
Currently sample weight is ignored.
"""
self.fit_called_ = False
return self.partial_fit(X, y, sample_weight)
def _init_counters(self, n_effective_classes, n_features, dtype):
self.class_count_ = cp.zeros(
n_effective_classes, order="F", dtype=dtype
)
self.feature_count_ = cp.zeros(
(n_effective_classes, n_features), order="F", dtype=dtype
)
def update_log_probs(self):
"""
Updates the log probabilities. This enables lazy update for
applications like distributed Naive Bayes, so that the model
can be updated incrementally without incurring this cost each
time.
"""
self._update_feature_log_prob(self.alpha)
self._update_class_log_prior(class_prior=self.class_prior)
def _count(self, X, Y, classes):
"""
Sum feature counts & class prior counts and add to current model.
Parameters
----------
X : cupy.ndarray or cupyx.scipy.sparse matrix of size
(n_rows, n_features)
Y : cupy.array of monotonic class labels
"""
n_classes = classes.shape[0]
sample_weight = cp.zeros(0)
if X.ndim != 2:
raise ValueError("Input samples should be a 2D array")
if Y.dtype != classes.dtype:
warnings.warn(
"Y dtype does not match classes_ dtype. Y will be "
"converted, which will increase memory consumption"
)
# Make sure Y is a cupy array, not CumlArray
Y = cp.asarray(Y)
counts = cp.zeros(
(n_classes, self.n_features_), order="F", dtype=X.dtype
)
class_c = cp.zeros(n_classes, order="F", dtype=X.dtype)
n_rows = X.shape[0]
n_cols = X.shape[1]
tpb = 32
labels_dtype = classes.dtype
count_features_dense = count_features_dense_kernel(
X.dtype, labels_dtype
)
count_features_dense(
(math.ceil(n_rows / tpb), math.ceil(n_cols / tpb), 1),
(tpb, tpb, 1),
(
counts,
X,
n_rows,
n_cols,
Y,
sample_weight,
sample_weight.shape[0] > 0,
n_classes,
False,
X.flags["C_CONTIGUOUS"],
False,
),
)
tpb = 256
count_classes = count_classes_kernel(X.dtype, labels_dtype)
count_classes((math.ceil(n_rows / tpb),), (tpb,), (class_c, n_rows, Y))
self.feature_count_ += counts
self.class_count_ += class_c
def _count_sparse(
self, x_coo_rows, x_coo_cols, x_coo_data, x_shape, Y, classes
):
"""
Sum feature counts & class prior counts and add to current model.
Parameters
----------
x_coo_rows : cupy.ndarray of size (nnz)
x_coo_cols : cupy.ndarray of size (nnz)
x_coo_data : cupy.ndarray of size (nnz)
Y : cupy.array of monotonic class labels
"""
n_classes = classes.shape[0]
if Y.dtype != classes.dtype:
warnings.warn(
"Y dtype does not match classes_ dtype. Y will be "
"converted, which will increase memory consumption"
)
sample_weight = cp.zeros(0)
# Make sure Y is a cupy array, not CumlArray
Y = cp.asarray(Y)
counts = cp.zeros(
(n_classes, self.n_features_), order="F", dtype=x_coo_data.dtype
)
class_c = cp.zeros(n_classes, order="F", dtype=x_coo_data.dtype)
n_rows = x_shape[0]
n_cols = x_shape[1]
tpb = 256
labels_dtype = classes.dtype
count_features_coo = count_features_coo_kernel(
x_coo_data.dtype, labels_dtype
)
count_features_coo(
(math.ceil(x_coo_rows.shape[0] / tpb),),
(tpb,),
(
counts,
x_coo_rows,
x_coo_cols,
x_coo_data,
x_coo_rows.shape[0],
n_rows,
n_cols,
Y,
sample_weight,
sample_weight.shape[0] > 0,
n_classes,
False,
),
)
count_classes = count_classes_kernel(x_coo_data.dtype, labels_dtype)
count_classes((math.ceil(n_rows / tpb),), (tpb,), (class_c, n_rows, Y))
self.feature_count_ = self.feature_count_ + counts
self.class_count_ = self.class_count_ + class_c
def get_param_names(self):
return super().get_param_names() + [
"alpha",
"fit_prior",
"class_prior",
]
class MultinomialNB(_BaseDiscreteNB):
# TODO: Make this extend cuml.Base:
# https://github.com/rapidsai/cuml/issues/1834
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification
with discrete features (e.g., word counts for text classification).
The multinomial distribution normally requires integer feature counts.
However, in practice, fractional counts such as tf-idf may also work.
Parameters
----------
alpha : float (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter (0 for no
smoothing).
fit_prior : boolean (default=True)
Whether to learn class prior probabilities or no. If false, a
uniform prior will be used.
class_prior : array-like, size (n_classes) (default=None)
Prior probabilities of the classes. If specified, the priors are
not adjusted according to the data.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the
CUDA stream that will be used for the model's computations, so
users can run different models concurrently in different streams
by creating handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
class_count_ : ndarray of shape (n_classes)
Number of samples encountered for each class during fitting.
class_log_prior_ : ndarray of shape (n_classes)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
n_features_ : int
Number of features of each sample.
Examples
--------
Load the 20 newsgroups dataset from Scikit-learn and train a
Naive Bayes classifier.
.. code-block:: python
>>> import cupy as cp
>>> import cupyx
>>> from sklearn.datasets import fetch_20newsgroups
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> from cuml.naive_bayes import MultinomialNB
>>> # Load corpus
>>> twenty_train = fetch_20newsgroups(subset='train', shuffle=True,
... random_state=42)
>>> # Turn documents into term frequency vectors
>>> count_vect = CountVectorizer()
>>> features = count_vect.fit_transform(twenty_train.data)
>>> # Put feature vectors and labels on the GPU
>>> X = cupyx.scipy.sparse.csr_matrix(features.tocsr(),
... dtype=cp.float32)
>>> y = cp.asarray(twenty_train.target, dtype=cp.int32)
>>> # Train model
>>> model = MultinomialNB()
>>> model.fit(X, y)
MultinomialNB()
>>> # Compute accuracy on training set
>>> model.score(X, y)
0.9245...
"""
def __init__(
self,
*,
alpha=1.0,
fit_prior=True,
class_prior=None,
output_type=None,
handle=None,
verbose=False,
):
super(MultinomialNB, self).__init__(
alpha=alpha,
fit_prior=fit_prior,
class_prior=class_prior,
handle=handle,
output_type=output_type,
verbose=verbose,
)
def _update_feature_log_prob(self, alpha):
"""
Apply add-lambda smoothing to raw counts and recompute
log probabilities
Parameters
----------
alpha : float amount of smoothing to apply (0. means no smoothing)
"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1).reshape(-1, 1)
self.feature_log_prob_ = cp.log(smoothed_fc) - cp.log(
smoothed_cc.reshape(-1, 1)
)
def _joint_log_likelihood(self, X):
"""
Calculate the posterior log probability of the samples X
Parameters
----------
X : array-like of size (n_samples, n_features)
"""
ret = X.dot(self.feature_log_prob_.T)
ret += self.class_log_prior_
return ret
class BernoulliNB(_BaseDiscreteNB):
"""
Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, default=0.0
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the
CUDA stream that will be used for the model's computations, so
users can run different models concurrently in different streams
by creating handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
class_count_ : ndarray of shape (n_classes)
Number of samples encountered for each class during fitting.
class_log_prior_ : ndarray of shape (n_classes)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
n_features_ : int
Number of features of each sample.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> rng = cp.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100), dtype=cp.int32)
>>> Y = cp.array([1, 2, 3, 4, 4, 5])
>>> from cuml.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(
self,
*,
alpha=1.0,
binarize=0.0,
fit_prior=True,
class_prior=None,
output_type=None,
handle=None,
verbose=False,
):
super(BernoulliNB, self).__init__(
alpha=alpha,
fit_prior=fit_prior,
class_prior=class_prior,
handle=handle,
output_type=output_type,
verbose=verbose,
)
self.binarize = binarize
def _check_X(self, X):
X = super()._check_X(X)
if self.binarize is not None:
if cupyx.scipy.sparse.isspmatrix(X):
X.data = binarize(X.data, threshold=self.binarize)
else:
X = binarize(X, threshold=self.binarize)
return X
def _check_X_y(self, X, y):
X, y = super()._check_X_y(X, y)
if self.binarize is not None:
if cupyx.scipy.sparse.isspmatrix(X):
X.data = binarize(X.data, threshold=self.binarize)
else:
X = binarize(X, threshold=self.binarize)
return X, y
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError(
"Expected input with %d features, got %d instead"
% (n_features, n_features_X)
)
neg_prob = cp.log(1 - cp.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = X.dot((self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
def _update_feature_log_prob(self, alpha):
"""
Apply add-lambda smoothing to raw counts and recompute
log probabilities
Parameters
----------
alpha : float amount of smoothing to apply (0. means no smoothing)
"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = cp.log(smoothed_fc) - cp.log(
smoothed_cc.reshape(-1, 1)
)
def get_param_names(self):
return super().get_param_names() + ["binarize"]
class ComplementNB(_BaseDiscreteNB):
"""
The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
norm : bool, default=False
Whether or not a second normalization of the weights is performed.
The default behavior mirrors the implementation found in Mahout and
Weka, which do not follow the full algorithm described in Table 9 of
the paper.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the
CUDA stream that will be used for the model's computations, so
users can run different models concurrently in different streams
by creating handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
class_count_ : ndarray of shape (n_classes)
Number of samples encountered for each class during fitting.
class_log_prior_ : ndarray of shape (n_classes)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
n_features_ : int
Number of features of each sample.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> rng = cp.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100), dtype=cp.int32)
>>> Y = cp.array([1, 2, 3, 4, 4, 5])
>>> from cuml.naive_bayes import ComplementNB
>>> clf = ComplementNB()
>>> clf.fit(X, Y)
ComplementNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
"""
def __init__(
self,
*,
alpha=1.0,
fit_prior=True,
class_prior=None,
norm=False,
output_type=None,
handle=None,
verbose=False,
):
super(ComplementNB, self).__init__(
alpha=alpha,
fit_prior=fit_prior,
class_prior=class_prior,
handle=handle,
output_type=output_type,
verbose=verbose,
)
self.norm = norm
def _check_X(self, X):
X = super()._check_X(X)
if cupyx.scipy.sparse.isspmatrix(X):
X_min = X.data.min()
else:
X_min = X.min()
if X_min < 0:
raise ValueError("Negative values in data passed to ComplementNB")
return X
def _check_X_y(self, X, y):
X, y = super()._check_X_y(X, y)
if cupyx.scipy.sparse.isspmatrix(X):
X_min = X.data.min()
else:
X_min = X.min()
if X_min < 0:
raise ValueError("Negative values in data passed to ComplementNB")
return X, y
def _count(self, X, Y, classes):
super()._count(X, Y, classes)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _count_sparse(
self, x_coo_rows, x_coo_cols, x_coo_data, x_shape, Y, classes
):
super()._count_sparse(
x_coo_rows, x_coo_cols, x_coo_data, x_shape, Y, classes
)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
jll = X.dot(self.feature_log_prob_.T)
if len(self.class_count_) == 1:
jll += self.class_log_prior_
return jll
def _update_feature_log_prob(self, alpha):
"""
Apply smoothing to raw counts and compute the weights.
Parameters
----------
alpha : float amount of smoothing to apply (0. means no smoothing)
"""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = cp.log(comp_count / comp_count.sum(axis=1, keepdims=True))
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = logged / summed
else:
feature_log_prob = -logged
self.feature_log_prob_ = feature_log_prob
def get_param_names(self):
return super().get_param_names() + ["norm"]
class CategoricalNB(_BaseDiscreteNB):
"""
Naive Bayes classifier for categorical features
The categorical Naive Bayes classifier is suitable for classification with
discrete features that are categorically distributed. The categories of
each feature are drawn from a categorical distribution.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the
CUDA stream that will be used for the model's computations, so
users can run different models concurrently in different streams
by creating handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
category_count_ : ndarray of shape (n_features, n_classes, n_categories)
With n_categories being the highest category of all the features.
This array provides the number of samples encountered for each feature,
class and category of the specific feature.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_log_prob_ : ndarray of shape (n_features, n_classes, n_categories)
With n_categories being the highest category of all the features.
Each array of shape (n_classes, n_categories) provides the empirical
log probability of categories given the respective feature
and class, ``P(x_i|y)``.
This attribute is not available when the model has been trained with
sparse data.
n_features_ : int
Number of features of each sample.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> rng = cp.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100), dtype=cp.int32)
>>> y = cp.array([1, 2, 3, 4, 5, 6])
>>> from cuml.naive_bayes import CategoricalNB
>>> clf = CategoricalNB()
>>> clf.fit(X, y)
CategoricalNB()
>>> print(clf.predict(X[2:3]))
[3]
"""
def __init__(
self,
*,
alpha=1.0,
fit_prior=True,
class_prior=None,
output_type=None,
handle=None,
verbose=False,
):
super(CategoricalNB, self).__init__(
alpha=alpha,
fit_prior=fit_prior,
class_prior=class_prior,
handle=handle,
output_type=output_type,
verbose=verbose,
)
def _check_X_y(self, X, y):
if cupyx.scipy.sparse.isspmatrix(X):
warnings.warn(
"X dtype is not int32. X will be "
"converted, which will increase memory consumption"
)
X.data = X.data.astype(cp.int32)
x_min = X.data.min()
else:
if X.dtype not in [cp.int32]:
warnings.warn(
"X dtype is not int32. X will be "
"converted, which will increase memory "
"consumption"
)
X = input_to_cupy_array(
X, order="K", convert_to_dtype=cp.int32
).array
x_min = X.min()
if x_min < 0:
raise ValueError("Negative values in data passed to CategoricalNB")
return X, y
def _check_X(self, X):
if cupyx.scipy.sparse.isspmatrix(X):
warnings.warn(
"X dtype is not int32. X will be "
"converted, which will increase memory consumption"
)
X.data = X.data.astype(cp.int32)
x_min = X.data.min()
else:
if X.dtype not in [cp.int32]:
warnings.warn(
"X dtype is not int32. X will be "
"converted, which will increase memory "
"consumption"
)
X = input_to_cupy_array(
X, order="K", convert_to_dtype=cp.int32
).array
x_min = X.min()
if x_min < 0:
raise ValueError("Negative values in data passed to CategoricalNB")
return X
def fit(self, X, y, sample_weight=None) -> "CategoricalNB":
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples), default=None
Weights applied to individual samples (1. for unweighted).
Currently sample weight is ignored.
Returns
-------
self : object
"""
return super().fit(X, y, sample_weight=sample_weight)
def partial_fit(
self, X, y, classes=None, sample_weight=None
) -> "CategoricalNB":
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples)
Target values.
classes : array-like of shape (n_classes), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples), default=None
Weights applied to individual samples (1. for unweighted).
Currently sample weight is ignored.
Returns
-------
self : object
"""
return super().partial_fit(X, y, classes, sample_weight=sample_weight)
def _count_sparse(
self, x_coo_rows, x_coo_cols, x_coo_data, x_shape, Y, classes
):
"""
Sum feature counts & class prior counts and add to current model.
Parameters
----------
x_coo_rows : cupy.ndarray of size (nnz)
x_coo_cols : cupy.ndarray of size (nnz)
x_coo_data : cupy.ndarray of size (nnz)
Y : cupy.array of monotonic class labels
"""
n_classes = classes.shape[0]
n_rows = x_shape[0]
n_cols = x_shape[1]
x_coo_nnz = x_coo_rows.shape[0]
labels_dtype = classes.dtype
tpb = 256
if Y.dtype != classes.dtype:
warnings.warn(
"Y dtype does not match classes_ dtype. Y will be "
"converted, which will increase memory consumption"
)
# Make sure Y is a cupy array, not CumlArray
Y = cp.asarray(Y)
class_c = cp.zeros(n_classes, dtype=self.class_count_.dtype)
count_classes = count_classes_kernel(
self.class_count_.dtype, labels_dtype
)
count_classes((math.ceil(n_rows / tpb),), (tpb,), (class_c, n_rows, Y))
highest_feature = int(x_coo_data.max()) + 1
feature_diff = highest_feature - self.category_count_.shape[1]
# In case of a partial fit, pad the array to have the highest feature
if not cupyx.scipy.sparse.issparse(self.category_count_):
self.category_count_ = cupyx.scipy.sparse.coo_matrix(
(self.n_features_ * n_classes, highest_feature)
)
elif feature_diff > 0:
self.category_count_ = cupyx.scipy.sparse.coo_matrix(
self.category_count_,
shape=(self.n_features_ * n_classes, highest_feature),
)
highest_feature = self.category_count_.shape[1]
count_features_coo = cp.ElementwiseKernel(
"int32 row, int32 col, int32 val, int32 nnz, int32 n_classes, \
int32 n_cols, raw T labels",
"int32 out_row, int32 out_col",
"""
T label = labels[row];
out_row = col + n_cols * label;
out_col = val;
""",
"count_features_categorical_coo_kernel",
)
counts_rows, counts_cols = count_features_coo(
x_coo_rows, x_coo_cols, x_coo_data, x_coo_nnz, n_classes, n_cols, Y
)
# Create the sparse category count matrix from the result of
# the raw kernel
counts = cupyx.scipy.sparse.coo_matrix(
(cp.ones(x_coo_nnz), (counts_rows, counts_cols)),
shape=(self.n_features_ * n_classes, highest_feature),
).tocsr()
# Adjust with the missing (zeros) data of the sparse matrix
for i in range(n_classes):
counts[i * n_cols : (i + 1) * n_cols, 0] = (Y == i).sum() - counts[
i * n_cols : (i + 1) * n_cols
].sum(1)
self.category_count_ = (self.category_count_ + counts).tocoo()
self.class_count_ = self.class_count_ + class_c
def _count(self, X, Y, classes):
Y = cp.asarray(Y)
tpb = 32
n_rows = X.shape[0]
n_cols = X.shape[1]
n_classes = classes.shape[0]
labels_dtype = classes.dtype
sample_weight = cp.zeros(0, dtype=X.dtype)
highest_feature = int(X.max()) + 1
feature_diff = highest_feature - self.category_count_.shape[2]
# In case of a partial fit, pad the array to have the highest feature
if feature_diff > 0:
self.category_count_ = cp.pad(
self.category_count_,
[(0, 0), (0, 0), (0, feature_diff)],
"constant",
)
highest_feature = self.category_count_.shape[2]
counts = cp.zeros(
(self.n_features_, n_classes, highest_feature),
order="F",
dtype=X.dtype,
)
count_features = count_features_dense_kernel(X.dtype, Y.dtype)
count_features(
(math.ceil(n_rows / tpb), math.ceil(n_cols / tpb), 1),
(tpb, tpb, 1),
(
counts,
X,
n_rows,
n_cols,
Y,
sample_weight,
sample_weight.shape[0] > 0,
self.n_classes_,
False,
X.flags["C_CONTIGUOUS"],
True,
),
)
self.category_count_ += counts
class_c = cp.zeros(n_classes, order="F", dtype=self.class_count_.dtype)
count_classes = count_classes_kernel(class_c.dtype, labels_dtype)
count_classes((math.ceil(n_rows / tpb),), (tpb,), (class_c, n_rows, Y))
self.class_count_ += class_c
def _init_counters(self, n_effective_classes, n_features, dtype):
self.class_count_ = cp.zeros(
n_effective_classes, order="F", dtype=cp.float64
)
self.category_count_ = cp.zeros(
(n_features, n_effective_classes, 0), order="F", dtype=dtype
)
def _update_feature_log_prob(self, alpha):
highest_feature = cp.zeros(self.n_features_, dtype=cp.float64)
if cupyx.scipy.sparse.issparse(self.category_count_):
# For sparse data we avoid the creation of the dense matrix
# feature_log_prob_. This can be created on the fly during
# the prediction without using as much memory.
features = self.category_count_.row % self.n_features_
cupyx.scatter_max(
highest_feature, features, self.category_count_.col
)
highest_feature = (highest_feature + 1) * alpha
smoothed_class_count = self.category_count_.sum(axis=1)
smoothed_class_count = smoothed_class_count.reshape(
(self.n_classes_, self.n_features_)
).T
smoothed_class_count += highest_feature[:, cp.newaxis]
smoothed_cat_count = cupyx.scipy.sparse.coo_matrix(
self.category_count_
)
smoothed_cat_count.data = cp.log(smoothed_cat_count.data + alpha)
self.smoothed_cat_count = smoothed_cat_count.tocsr()
self.smoothed_class_count = cp.log(smoothed_class_count)
else:
indices = self.category_count_.nonzero()
cupyx.scatter_max(highest_feature, indices[0], indices[2])
highest_feature = (highest_feature + 1) * alpha
smoothed_class_count = (
self.category_count_.sum(axis=2)
+ highest_feature[:, cp.newaxis]
)
smoothed_cat_count = self.category_count_ + alpha
self.feature_log_prob_ = cp.log(smoothed_cat_count) - cp.log(
smoothed_class_count[:, :, cp.newaxis]
)
def _joint_log_likelihood(self, X):
if not X.shape[1] == self.n_features_:
raise ValueError(
"Expected input with %d features, got %d instead"
% (self.n_features_, X.shape[1])
)
n_rows = X.shape[0]
if cupyx.scipy.sparse.isspmatrix(X):
# For sparse data we assume that most categories will be zeros,
# so we first compute the jll for categories 0
features_zeros = self.smoothed_cat_count[:, 0].todense()
features_zeros = features_zeros.reshape(
self.n_classes_, self.n_features_
).T
if self.alpha != 1.0:
features_zeros[cp.where(features_zeros == 0)] += cp.log(
self.alpha
)
features_zeros -= self.smoothed_class_count
features_zeros = features_zeros.sum(0)
jll = cp.repeat(features_zeros[cp.newaxis, :], n_rows, axis=0)
X = X.tocoo()
col_indices = X.col
# Adjust with the non-zeros data by adding jll_data (non-zeros)
# and subtracting jll_zeros which are the zeros
# that were first computed
for i in range(self.n_classes_):
jll_data = self.smoothed_cat_count[
col_indices + i * self.n_features_, X.data
].ravel()
jll_zeros = self.smoothed_cat_count[
col_indices + i * self.n_features_, 0
].todense()[:, 0]
if self.alpha != 1.0:
jll_data[cp.where(jll_data == 0)] += cp.log(self.alpha)
jll_zeros[cp.where(jll_zeros == 0)] += cp.log(self.alpha)
jll_data -= jll_zeros
cupyx.scatter_add(jll[:, i], X.row, jll_data)
else:
col_indices = cp.indices(X.shape)[1].flatten()
jll = self.feature_log_prob_[col_indices, :, X.ravel()]
jll = jll.reshape((n_rows, self.n_features_, self.n_classes_))
jll = jll.sum(1)
jll += self.class_log_prior_
return jll
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/naive_bayes/__init__.py | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.naive_bayes.naive_bayes import MultinomialNB
from cuml.naive_bayes.naive_bayes import BernoulliNB
from cuml.naive_bayes.naive_bayes import GaussianNB
from cuml.naive_bayes.naive_bayes import ComplementNB
from cuml.naive_bayes.naive_bayes import CategoricalNB
__all__ = [
"MultinomialNB",
"BernoulliNB",
"GaussianNB",
"ComplementNB",
"CategoricalNB",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/multiclass/multiclass.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.internals.base import Base
from cuml.internals.import_utils import has_sklearn
from cuml.internals.mixins import ClassifierMixin
from cuml.common.doc_utils import generate_docstring
from cuml.common import (
input_to_host_array,
input_to_host_array_with_sparse_support,
)
from cuml.internals.input_utils import (
input_to_cupy_array,
determine_array_type_full,
)
from cuml.internals.array_sparse import SparseCumlArray
from cuml.internals import _deprecate_pos_args
class MulticlassClassifier(Base, ClassifierMixin):
"""
Wrapper around scikit-learn multiclass classifiers that allows to
choose different multiclass strategies.
The input can be any kind of cuML compatible array, and the output type
follows cuML's output type configuration rules.
Berofe passing the data to scikit-learn, it is converted to host (numpy)
array. Under the hood the data is partitioned for binary classification,
and it is transformed back to the device by the cuML estimator. These
copies back and forth the device and the host have some overhead. For more
details see issue https://github.com/rapidsai/cuml/issues/2876.
Examples
--------
.. code-block:: python
>>> from cuml.linear_model import LogisticRegression
>>> from cuml.multiclass import MulticlassClassifier
>>> from cuml.datasets.classification import make_classification
>>> X, y = make_classification(n_samples=10, n_features=6,
... n_informative=4, n_classes=3,
... random_state=137)
>>> cls = MulticlassClassifier(LogisticRegression(), strategy='ovo')
>>> cls.fit(X,y)
MulticlassClassifier()
>>> cls.predict(X)
array([2, 0, 2, 2, 2, 1, 1, 0, 1, 1])
Parameters
----------
estimator : cuML estimator
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
strategy: string {'ovr', 'ovo'}, default='ovr'
Multiclass classification strategy: 'ovr': one vs. rest or 'ovo': one
vs. one
Attributes
----------
classes_ : float, shape (`n_classes_`)
Array of class labels.
n_classes_ : int
Number of classes.
"""
@_deprecate_pos_args(version="21.06")
def __init__(
self,
estimator,
*,
handle=None,
verbose=False,
output_type=None,
strategy="ovr",
):
super().__init__(
handle=handle, verbose=verbose, output_type=output_type
)
self.strategy = strategy
self.estimator = estimator
@property
@cuml.internals.api_base_return_array_skipall
def classes_(self):
return self.multiclass_estimator.classes_
@property
@cuml.internals.api_base_return_any_skipall
def n_classes_(self):
return self.multiclass_estimator.n_classes_
@generate_docstring(y="dense_anydtype")
def fit(self, X, y) -> "MulticlassClassifier":
"""
Fit a multiclass classifier.
"""
if not has_sklearn():
raise ImportError(
"Scikit-learn is needed to use "
"MulticlassClassifier derived classes."
)
import sklearn.multiclass
if self.strategy == "ovr":
self.multiclass_estimator = sklearn.multiclass.OneVsRestClassifier(
self.estimator, n_jobs=None
)
elif self.strategy == "ovo":
self.multiclass_estimator = sklearn.multiclass.OneVsOneClassifier(
self.estimator, n_jobs=None
)
else:
raise ValueError(
"Invalid multiclass strategy "
+ str(self.strategy)
+ ", must be one of "
'{"ovr", "ovo"}'
)
X = input_to_host_array_with_sparse_support(X)
y = input_to_host_array(y).array
with cuml.internals.exit_internal_api():
self.multiclass_estimator.fit(X, y)
return self
@generate_docstring(
return_values={
"name": "preds",
"type": "dense",
"description": "Predicted values",
"shape": "(n_samples, 1)",
}
)
def predict(self, X) -> CumlArray:
"""
Predict using multi class classifier.
"""
X = input_to_host_array_with_sparse_support(X)
with cuml.internals.exit_internal_api():
return self.multiclass_estimator.predict(X)
@generate_docstring(
return_values={
"name": "results",
"type": "dense",
"description": "Decision function \
values",
"shape": "(n_samples, 1)",
}
)
def decision_function(self, X) -> CumlArray:
"""
Calculate the decision function.
"""
X = input_to_host_array_with_sparse_support(X)
with cuml.internals.exit_internal_api():
return self.multiclass_estimator.decision_function(X)
def get_param_names(self):
return super().get_param_names() + ["estimator", "strategy"]
class OneVsRestClassifier(MulticlassClassifier):
"""
Wrapper around Sckit-learn's class with the same name. The input can be
any kind of cuML compatible array, and the output type follows cuML's
output type configuration rules.
Berofe passing the data to scikit-learn, it is converted to host (numpy)
array. Under the hood the data is partitioned for binary classification,
and it is transformed back to the device by the cuML estimator. These
copies back and forth the device and the host have some overhead. For more
details see issue https://github.com/rapidsai/cuml/issues/2876.
For documentation see `scikit-learn's OneVsRestClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html>`_.
Examples
--------
.. code-block:: python
>>> from cuml.linear_model import LogisticRegression
>>> from cuml.multiclass import OneVsRestClassifier
>>> from cuml.datasets.classification import make_classification
>>> X, y = make_classification(n_samples=10, n_features=6,
... n_informative=4, n_classes=3,
... random_state=137)
>>> cls = OneVsRestClassifier(LogisticRegression())
>>> cls.fit(X,y)
OneVsRestClassifier()
>>> cls.predict(X)
array([2, 0, 2, 2, 2, 1, 1, 0, 1, 1])
Parameters
----------
estimator : cuML estimator
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
"""
@_deprecate_pos_args(version="21.06")
def __init__(
self, estimator, *args, handle=None, verbose=False, output_type=None
):
super().__init__(
estimator,
*args,
handle=handle,
verbose=verbose,
output_type=output_type,
strategy="ovr",
)
def get_param_names(self):
param_names = super().get_param_names()
param_names.remove("strategy")
return param_names
class OneVsOneClassifier(MulticlassClassifier):
"""
Wrapper around Sckit-learn's class with the same name. The input can be
any kind of cuML compatible array, and the output type follows cuML's
output type configuration rules.
Berofe passing the data to scikit-learn, it is converted to host (numpy)
array. Under the hood the data is partitioned for binary classification,
and it is transformed back to the device by the cuML estimator. These
copies back and forth the device and the host have some overhead. For more
details see issue https://github.com/rapidsai/cuml/issues/2876.
For documentation see `scikit-learn's OneVsOneClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsOneClassifier.html>`_.
Examples
--------
.. code-block:: python
>>> from cuml.linear_model import LogisticRegression
>>> from cuml.multiclass import OneVsOneClassifier
>>> from cuml.datasets.classification import make_classification
>>> X, y = make_classification(n_samples=10, n_features=6,
... n_informative=4, n_classes=3,
... random_state=137)
>>> cls = OneVsOneClassifier(LogisticRegression())
>>> cls.fit(X,y)
OneVsOneClassifier()
>>> cls.predict(X)
array([2, 0, 2, 2, 2, 1, 1, 0, 1, 1])
Parameters
----------
estimator : cuML estimator
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
"""
@_deprecate_pos_args(version="21.06")
def __init__(
self, estimator, *args, handle=None, verbose=False, output_type=None
):
super().__init__(
estimator,
*args,
handle=handle,
verbose=verbose,
output_type=output_type,
strategy="ovo",
)
def get_param_names(self):
param_names = super().get_param_names()
param_names.remove("strategy")
return param_names
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/multiclass/__init__.py | # Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.multiclass.multiclass import OneVsOneClassifier
from cuml.multiclass.multiclass import OneVsRestClassifier
from cuml.multiclass.multiclass import MulticlassClassifier
__all__ = [
"OneVsOneClassifier",
"OneVsRestClassifier",
"MulticlassClassifier",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_lars.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sklearn
from sklearn.linear_model import Lars as skLars
from sklearn.datasets import fetch_california_housing
from cuml.testing.utils import (
array_equal,
unit_param,
quality_param,
stress_param,
)
from cuml.experimental.linear_model import Lars as cuLars
import sys
import pytest
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
# As tests directory is not a module, we need to add it to the path
sys.path.insert(0, ".")
from test_linear_model import make_regression_dataset # noqa: E402
def normalize_data(X, y):
y_mean = np.mean(y)
y = y - y_mean
x_mean = np.mean(X, axis=0)
x_scale = np.sqrt(np.var(X, axis=0) * X.shape[0])
x_scale[x_scale == 0] = 1
X = (X - x_mean) / x_scale
return X, y, x_mean, x_scale, y_mean
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(90000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([1, 1]),
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("precompute", [True, False, "precompute"])
def test_lars_model(datatype, nrows, column_info, precompute, normalize):
ncols, n_info = column_info
X_train, X_test, y_train, y_test = make_regression_dataset(
datatype, nrows, ncols, n_info
)
if precompute == "precompute" or not normalize:
# Apply normalization manually, because the solver expects normalized
# input data
X_train, y_train, x_mean, x_scale, y_mean = normalize_data(
X_train, y_train
)
y_test = y_test - y_mean
X_test = (X_test - x_mean) / x_scale
if precompute == "precompute":
precompute = np.dot(X_train.T, X_train)
params = {"precompute": precompute, "normalize": normalize}
# Initialization of cuML's LARS
culars = cuLars(**params)
# fit and predict cuml LARS
culars.fit(X_train, y_train)
cu_score_train = culars.score(X_train, y_train)
cu_score_test = culars.score(X_test, y_test)
if nrows < 500000:
# sklearn model initialization, fit and predict
sklars = skLars(**params)
sklars.fit(X_train, y_train)
# Set tolerance to include the 95% confidence interval around
# scikit-learn accuracy.
accuracy_target = sklars.score(X_test, y_test)
tol = 1.96 * np.sqrt(accuracy_target * (1.0 - accuracy_target) / 100.0)
if tol < 0.001:
tol = 0.001 # We allow at least 0.1% tolerance
print(cu_score_train, cu_score_test, accuracy_target, tol)
assert cu_score_train >= sklars.score(X_train, y_train) - tol
assert cu_score_test >= accuracy_target - tol
else:
assert cu_score_test > 0.95
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
@pytest.mark.parametrize("precompute", [True, False])
def test_lars_collinear(datatype, nrows, column_info, precompute):
ncols, n_info = column_info
if nrows == 500000 and ncols == 1000 and pytest.max_gpu_memory < 32:
if pytest.adapt_stress_test:
nrows = nrows * pytest.max_gpu_memory // 32
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
X_train, X_test, y_train, y_test = make_regression_dataset(
datatype, nrows, ncols, n_info
)
n_duplicate = min(ncols, 100)
X_train = np.concatenate((X_train, X_train[:, :n_duplicate]), axis=1)
X_test = np.concatenate((X_test, X_test[:, :n_duplicate]), axis=1)
params = {"precompute": precompute, "n_nonzero_coefs": ncols + n_duplicate}
culars = cuLars(**params)
culars.fit(X_train, y_train)
assert culars.score(X_train, y_train) > 0.85
assert culars.score(X_test, y_test) > 0.85
@pytest.mark.skipif(
sklearn.__version__ >= "1.0",
reason="discrepancies on coefficients with sklearn 1.2",
)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"params",
[
{"precompute": True},
{"precompute": False},
{"n_nonzero_coefs": 5},
{"n_nonzero_coefs": 2},
{"n_nonzero_coefs": 2, "fit_intercept": False},
],
)
def test_lars_attributes(datatype, params):
X, y = fetch_california_housing(return_X_y=True)
X = X.astype(datatype)
y = y.astype(datatype)
culars = cuLars(**params)
culars.fit(X, y)
sklars = skLars(**params)
sklars.fit(X, y)
assert culars.score(X, y) >= sklars.score(X, y) - 0.01
limit_max_iter = "n_nonzero_coefs" in params
if limit_max_iter:
n_iter_tol = 0
else:
n_iter_tol = 2
assert abs(culars.n_iter_ - sklars.n_iter_) <= n_iter_tol
tol = 1e-4 if params.pop("fit_intercept", True) else 1e-1
n = min(culars.n_iter_, sklars.n_iter_)
assert array_equal(
culars.alphas_[:n], sklars.alphas_[:n], unit_tol=tol, total_tol=1e-4
)
assert array_equal(culars.active_[:n], sklars.active_[:n])
if limit_max_iter:
assert array_equal(culars.coef_, sklars.coef_)
if hasattr(sklars, "coef_path_"):
assert array_equal(
culars.coef_path_,
sklars.coef_path_[sklars.active_],
unit_tol=1e-3,
)
intercept_diff = abs(culars.intercept_ - sklars.intercept_)
if abs(sklars.intercept_) > 1e-6:
intercept_diff /= sklars.intercept_
assert intercept_diff <= 1e-3
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
def test_lars_copy_X(datatype):
X, y = fetch_california_housing(return_X_y=True)
X = cp.asarray(X, dtype=datatype, order="F")
y = cp.asarray(y, dtype=datatype, order="F")
X0 = cp.copy(X)
culars1 = cuLars(precompute=False, copy_X=True)
culars1.fit(X, y)
# Test that array was not changed
assert cp.all(X0 == X)
# We make a copy of X during preprocessing, we should preprocess
# in place if copy_X is false to save memory. Afterwards we can enable
# the following test:
# culars2 = cuLars(precompute=False, copy_X=False)
# culars2.fit(X, y)
# Test that array was changed i.e. no unnecessary copies were made
# assert cp.any(X0 != X)
#
# assert abs(culars1.score(X, y) - culars2.score(X, y)) < 1e-9
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_sparsefuncs.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
from cuml.common.sparsefuncs import csr_row_normalize_l1
from cuml.common.sparsefuncs import csr_row_normalize_l2
from sklearn.utils.sparsefuncs_fast import inplace_csr_row_normalize_l1
from sklearn.utils.sparsefuncs_fast import inplace_csr_row_normalize_l2
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
sp = cpu_only_import("scipy.sparse")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
@pytest.mark.parametrize(
"norm, ref_norm",
[
(csr_row_normalize_l1, inplace_csr_row_normalize_l1),
(csr_row_normalize_l2, inplace_csr_row_normalize_l2),
],
)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("seed, shape", [(10, (10, 5)), (123, (500, 12))])
def test_csr_norms(norm, ref_norm, dtype, seed, shape):
X = np.random.RandomState(seed).randn(*shape).astype(dtype)
X_csr = sp.csr_matrix(X)
X_csr_gpu = cupyx.scipy.sparse.csr_matrix(X_csr)
norm(X_csr_gpu)
ref_norm(X_csr)
# checks that array have been changed inplace
assert cp.any(cp.not_equal(X_csr_gpu.todense(), cp.array(X)))
cp.testing.assert_array_almost_equal(X_csr_gpu.todense(), X_csr.todense())
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_preprocessing.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
import pytest
import sklearn
from cuml.preprocessing import (
Binarizer as cuBinarizer,
FunctionTransformer as cuFunctionTransformer,
KBinsDiscretizer as cuKBinsDiscretizer,
KernelCenterer as cuKernelCenterer,
MaxAbsScaler as cuMaxAbsScaler,
MinMaxScaler as cuMinMaxScaler,
MissingIndicator as cuMissingIndicator,
Normalizer as cuNormalizer,
PolynomialFeatures as cuPolynomialFeatures,
PowerTransformer as cuPowerTransformer,
QuantileTransformer as cuQuantileTransformer,
RobustScaler as cuRobustScaler,
SimpleImputer as cuSimpleImputer,
StandardScaler as cuStandardScaler,
)
from cuml.preprocessing import (
add_dummy_feature as cu_add_dummy_feature,
binarize as cu_binarize,
maxabs_scale as cu_maxabs_scale,
minmax_scale as cu_minmax_scale,
normalize as cu_normalize,
power_transform as cu_power_transform,
quantile_transform as cu_quantile_transform,
robust_scale as cu_robust_scale,
scale as cu_scale,
)
from sklearn.preprocessing import (
Binarizer as skBinarizer,
FunctionTransformer as skFunctionTransformer,
KBinsDiscretizer as skKBinsDiscretizer,
KernelCenterer as skKernelCenterer,
MaxAbsScaler as skMaxAbsScaler,
MinMaxScaler as skMinMaxScaler,
Normalizer as skNormalizer,
PolynomialFeatures as skPolynomialFeatures,
PowerTransformer as skPowerTransformer,
QuantileTransformer as skQuantileTransformer,
RobustScaler as skRobustScaler,
StandardScaler as skStandardScaler,
)
from sklearn.preprocessing import (
add_dummy_feature as sk_add_dummy_feature,
binarize as sk_binarize,
maxabs_scale as sk_maxabs_scale,
minmax_scale as sk_minmax_scale,
normalize as sk_normalize,
power_transform as sk_power_transform,
quantile_transform as sk_quantile_transform,
robust_scale as sk_robust_scale,
scale as sk_scale,
)
from sklearn.impute import (
MissingIndicator as skMissingIndicator,
SimpleImputer as skSimpleImputer,
)
from cuml.testing.test_preproc_utils import (
clf_dataset,
int_dataset,
blobs_dataset,
nan_filled_positive,
sparse_nan_filled_positive,
sparse_clf_dataset,
sparse_blobs_dataset,
sparse_int_dataset,
sparse_imputer_dataset,
sparse_dataset_with_coo,
) # noqa: F401
from cuml.testing.test_preproc_utils import assert_allclose
from cuml.metrics import pairwise_kernels
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
cpx = gpu_only_import("cupyx")
scipy = cpu_only_import("scipy")
@pytest.mark.parametrize("feature_range", [(0, 1), (0.1, 0.8)])
def test_minmax_scaler(
failure_logger, clf_dataset, feature_range # noqa: F811
):
X_np, X = clf_dataset
scaler = cuMinMaxScaler(feature_range=feature_range, copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skMinMaxScaler(feature_range=feature_range, copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("feature_range", [(0, 1), (0.1, 0.8)])
def test_minmax_scale(
failure_logger, clf_dataset, axis, feature_range # noqa: F811
):
X_np, X = clf_dataset
t_X = cu_minmax_scale(X, feature_range=feature_range, axis=axis)
assert type(t_X) == type(X)
sk_t_X = sk_minmax_scale(X_np, feature_range=feature_range, axis=axis)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
def test_standard_scaler(
failure_logger, clf_dataset, with_mean, with_std # noqa: F811
):
X_np, X = clf_dataset
scaler = cuStandardScaler(
with_mean=with_mean, with_std=with_std, copy=True
)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skStandardScaler(
with_mean=with_mean, with_std=with_std, copy=True
)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("with_std", [True, False])
def test_standard_scaler_sparse(
failure_logger, sparse_clf_dataset, with_std # noqa: F811
):
X_np, X = sparse_clf_dataset
scaler = cuStandardScaler(with_mean=False, with_std=with_std, copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
# assert type(t_X) == type(X)
# assert type(r_X) == type(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
if cpx.scipy.sparse.issparse(t_X):
assert cpx.scipy.sparse.issparse(r_X)
if scipy.sparse.issparse(t_X):
assert scipy.sparse.issparse(r_X)
scaler = skStandardScaler(copy=True, with_mean=False, with_std=with_std)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
# The numerical warning is triggered when centering or scaling
# cannot be done as single steps. Its display can be safely disabled.
# For more information see : https://github.com/rapidsai/cuml/issues/4203
@pytest.mark.filterwarnings("ignore:Numerical issues::")
def test_scale(
failure_logger, clf_dataset, axis, with_mean, with_std # noqa: F811
):
X_np, X = clf_dataset
t_X = cu_scale(
X, axis=axis, with_mean=with_mean, with_std=with_std, copy=True
)
assert type(t_X) == type(X)
sk_t_X = sk_scale(
X_np, axis=axis, with_mean=with_mean, with_std=with_std, copy=True
)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("with_std", [True, False])
def test_scale_sparse(
failure_logger, sparse_clf_dataset, with_std # noqa: F811
):
X_np, X = sparse_clf_dataset
t_X = cu_scale(X, with_mean=False, with_std=with_std, copy=True)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_scale(X_np, with_mean=False, with_std=with_std, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("axis", [0, 1])
def test_maxabs_scale(failure_logger, clf_dataset, axis): # noqa: F811
X_np, X = clf_dataset
t_X = cu_maxabs_scale(X, axis=axis)
assert type(t_X) == type(X)
sk_t_X = sk_maxabs_scale(X_np, axis=axis)
assert_allclose(t_X, sk_t_X)
def test_maxabs_scaler(failure_logger, clf_dataset): # noqa: F811
X_np, X = clf_dataset
scaler = cuMaxAbsScaler(copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skMaxAbsScaler(copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
def test_maxabs_scaler_sparse(
failure_logger, sparse_clf_dataset
): # noqa: F811
X_np, X = sparse_clf_dataset
scaler = cuMaxAbsScaler(copy=True)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
# assert type(t_X) == type(X)
# assert type(r_X) == type(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
if cpx.scipy.sparse.issparse(t_X):
assert cpx.scipy.sparse.issparse(r_X)
if scipy.sparse.issparse(t_X):
assert scipy.sparse.issparse(r_X)
scaler = skMaxAbsScaler(copy=True)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("norm", ["l1", "l2", "max"])
def test_normalizer(failure_logger, clf_dataset, norm): # noqa: F811
X_np, X = clf_dataset
normalizer = cuNormalizer(norm=norm, copy=True)
t_X = normalizer.fit_transform(X)
assert type(t_X) == type(X)
normalizer = skNormalizer(norm=norm, copy=True)
sk_t_X = normalizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("norm", ["l1", "l2", "max"])
def test_normalizer_sparse(
failure_logger, sparse_clf_dataset, norm # noqa: F811
):
X_np, X = sparse_clf_dataset
if X.format == "csc":
pytest.skip("Skipping CSC matrices")
normalizer = cuNormalizer(norm=norm, copy=True)
t_X = normalizer.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
normalizer = skNormalizer(norm=norm, copy=True)
sk_t_X = normalizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("norm", ["l1", "l2", "max"])
@pytest.mark.parametrize("return_norm", [True, False])
def test_normalize(
failure_logger, clf_dataset, axis, norm, return_norm # noqa: F811
):
X_np, X = clf_dataset
if return_norm:
t_X, t_norms = cu_normalize(
X, axis=axis, norm=norm, return_norm=return_norm
)
sk_t_X, sk_t_norms = sk_normalize(
X_np, axis=axis, norm=norm, return_norm=return_norm
)
assert_allclose(t_norms, sk_t_norms)
else:
t_X = cu_normalize(X, axis=axis, norm=norm, return_norm=return_norm)
sk_t_X = sk_normalize(
X_np, axis=axis, norm=norm, return_norm=return_norm
)
assert type(t_X) == type(X)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("norm", ["l1", "l2", "max"])
def test_normalize_sparse(
failure_logger, sparse_clf_dataset, norm # noqa: F811
):
X_np, X = sparse_clf_dataset
axis = 0 if X.format == "csc" else 1
t_X = cu_normalize(X, axis=axis, norm=norm)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_normalize(X_np, axis=axis, norm=norm)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize(
"strategy", ["mean", "median", "most_frequent", "constant"]
)
@pytest.mark.parametrize("missing_values", [0, 1, np.nan])
@pytest.mark.parametrize("add_indicator", [False, True])
def test_imputer(
failure_logger,
random_seed,
int_dataset, # noqa: F811
strategy,
missing_values,
add_indicator,
):
zero_filled, one_filled, nan_filled = int_dataset
if missing_values == 0:
X_np, X = zero_filled
elif missing_values == 1:
X_np, X = one_filled
else:
X_np, X = nan_filled
np.random.seed(random_seed)
fill_value = np.random.randint(10, size=1)[0]
imputer = cuSimpleImputer(
copy=True,
missing_values=missing_values,
strategy=strategy,
fill_value=fill_value,
add_indicator=add_indicator,
)
t_X = imputer.fit_transform(X)
assert type(t_X) == type(X)
imputer = skSimpleImputer(
copy=True,
missing_values=missing_values,
strategy=strategy,
fill_value=fill_value,
add_indicator=add_indicator,
)
sk_t_X = imputer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize(
"strategy", ["mean", "median", "most_frequent", "constant"]
)
def test_imputer_sparse(sparse_imputer_dataset, strategy): # noqa: F811
missing_values, X_sp, X = sparse_imputer_dataset
if X.format == "csr":
pytest.skip("Skipping CSR matrices")
fill_value = np.random.randint(10, size=1)[0]
imputer = cuSimpleImputer(
copy=True,
missing_values=missing_values,
strategy=strategy,
fill_value=fill_value,
)
t_X = imputer.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
imputer = skSimpleImputer(
copy=True,
missing_values=missing_values,
strategy=strategy,
fill_value=fill_value,
)
sk_t_X = imputer.fit_transform(X_sp)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("degree", [2, 3])
@pytest.mark.parametrize("interaction_only", [True, False])
@pytest.mark.parametrize("include_bias", [True, False])
@pytest.mark.parametrize("order", ["C", "F"])
def test_poly_features(
failure_logger,
clf_dataset,
degree, # noqa: F811
interaction_only,
include_bias,
order,
):
X_np, X = clf_dataset
polyfeatures = cuPolynomialFeatures(
degree=degree,
order=order,
interaction_only=interaction_only,
include_bias=include_bias,
)
t_X = polyfeatures.fit_transform(X)
assert type(X) == type(t_X)
cu_feature_names = polyfeatures.get_feature_names()
if isinstance(t_X, np.ndarray):
if order == "C":
assert t_X.flags["C_CONTIGUOUS"]
elif order == "F":
assert t_X.flags["F_CONTIGUOUS"]
polyfeatures = skPolynomialFeatures(
degree=degree,
order=order,
interaction_only=interaction_only,
include_bias=include_bias,
)
sk_t_X = polyfeatures.fit_transform(X_np)
if sklearn.__version__ <= "1.0":
sk_feature_names = polyfeatures.get_feature_names()
assert_allclose(t_X, sk_t_X, rtol=0.1, atol=0.1)
if sklearn.__version__ <= "1.0":
assert sk_feature_names == cu_feature_names
@pytest.mark.parametrize("degree", [2, 3])
@pytest.mark.parametrize("interaction_only", [True, False])
@pytest.mark.parametrize("include_bias", [True, False])
def test_poly_features_sparse(
failure_logger,
sparse_clf_dataset, # noqa: F811
degree,
interaction_only,
include_bias,
):
X_np, X = sparse_clf_dataset
polyfeatures = cuPolynomialFeatures(
degree=degree,
interaction_only=interaction_only,
include_bias=include_bias,
)
t_X = polyfeatures.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
polyfeatures = skPolynomialFeatures(
degree=degree,
interaction_only=interaction_only,
include_bias=include_bias,
)
sk_t_X = polyfeatures.fit_transform(X_np)
assert_allclose(t_X, sk_t_X, rtol=0.1, atol=0.1)
@pytest.mark.parametrize("value", [1.0, 42])
def test_add_dummy_feature(failure_logger, clf_dataset, value): # noqa: F811
X_np, X = clf_dataset
t_X = cu_add_dummy_feature(X, value=value)
assert type(t_X) == type(X)
sk_t_X = sk_add_dummy_feature(X_np, value=value)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("value", [1.0, 42])
def test_add_dummy_feature_sparse(
failure_logger, sparse_dataset_with_coo, value # noqa: F811
):
X_np, X = sparse_dataset_with_coo
t_X = cu_add_dummy_feature(X, value=value)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_add_dummy_feature(X_np, value=value)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0.0, 1.0])
def test_binarize(failure_logger, clf_dataset, threshold): # noqa: F811
X_np, X = clf_dataset
t_X = cu_binarize(X, threshold=threshold, copy=True)
assert type(t_X) == type(X)
sk_t_X = sk_binarize(X_np, threshold=threshold, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0.0, 1.0])
def test_binarize_sparse(
failure_logger, sparse_clf_dataset, threshold # noqa: F811
):
X_np, X = sparse_clf_dataset
t_X = cu_binarize(X, threshold=threshold, copy=True)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_binarize(X_np, threshold=threshold, copy=True)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0.0, 1.0])
def test_binarizer(failure_logger, clf_dataset, threshold): # noqa: F811
X_np, X = clf_dataset
binarizer = cuBinarizer(threshold=threshold, copy=True)
t_X = binarizer.fit_transform(X)
assert type(t_X) == type(X)
binarizer = skBinarizer(threshold=threshold, copy=True)
sk_t_X = binarizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("threshold", [0.0, 1.0])
def test_binarizer_sparse(
failure_logger, sparse_clf_dataset, threshold # noqa: F811
):
X_np, X = sparse_clf_dataset
binarizer = cuBinarizer(threshold=threshold, copy=True)
t_X = binarizer.fit_transform(X)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
binarizer = skBinarizer(threshold=threshold, copy=True)
sk_t_X = binarizer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25.0, 75.0), (10.0, 90.0)])
def test_robust_scaler(
failure_logger,
clf_dataset, # noqa: F811
with_centering,
with_scaling,
quantile_range,
):
X_np, X = clf_dataset
scaler = cuRobustScaler(
with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True,
)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
scaler = skRobustScaler(
with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True,
)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25.0, 75.0), (10.0, 90.0)])
def test_robust_scaler_sparse(
failure_logger,
sparse_clf_dataset, # noqa: F811
with_scaling,
quantile_range,
):
X_np, X = sparse_clf_dataset
if X.format != "csc":
X = X.tocsc()
scaler = cuRobustScaler(
with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True,
)
t_X = scaler.fit_transform(X)
r_X = scaler.inverse_transform(t_X)
# assert type(t_X) == type(X)
# assert type(r_X) == type(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
if cpx.scipy.sparse.issparse(t_X):
assert cpx.scipy.sparse.issparse(r_X)
if scipy.sparse.issparse(t_X):
assert scipy.sparse.issparse(r_X)
scaler = skRobustScaler(
with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True,
)
sk_t_X = scaler.fit_transform(X_np)
sk_r_X = scaler.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25.0, 75.0), (10.0, 90.0)])
def test_robust_scale(
failure_logger,
clf_dataset, # noqa: F811
with_centering,
axis,
with_scaling,
quantile_range,
):
X_np, X = clf_dataset
t_X = cu_robust_scale(
X,
axis=axis,
with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True,
)
assert type(t_X) == type(X)
sk_t_X = sk_robust_scale(
X_np,
axis=axis,
with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True,
)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("quantile_range", [(25.0, 75.0), (10.0, 90.0)])
def test_robust_scale_sparse(
failure_logger,
sparse_clf_dataset, # noqa: F811
axis,
with_scaling,
quantile_range,
):
X_np, X = sparse_clf_dataset
if X.format != "csc" and axis == 0:
X = X.tocsc()
elif X.format != "csr" and axis == 1:
X = X.tocsr()
t_X = cu_robust_scale(
X,
axis=axis,
with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True,
)
# assert type(t_X) == type(X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
sk_t_X = sk_robust_scale(
X_np,
axis=axis,
with_centering=False,
with_scaling=with_scaling,
quantile_range=quantile_range,
copy=True,
)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("n_bins", [5, 20])
@pytest.mark.parametrize("encode", ["ordinal", "onehot-dense", "onehot"])
@pytest.mark.parametrize(
"strategy",
[
pytest.param(
"uniform",
marks=pytest.mark.xfail(
strict=False,
reason="Intermittent mismatch with sklearn"
" (https://github.com/rapidsai/cuml/issues/3481)",
),
),
pytest.param(
"quantile",
marks=pytest.mark.xfail(
strict=False,
reason="Intermittent mismatch with sklearn"
" (https://github.com/rapidsai/cuml/issues/2933)",
),
),
"kmeans",
],
)
def test_kbinsdiscretizer(
failure_logger, blobs_dataset, n_bins, encode, strategy # noqa: F811
):
X_np, X = blobs_dataset
transformer = cuKBinsDiscretizer(
n_bins=n_bins, encode=encode, strategy=strategy
)
t_X = transformer.fit_transform(X)
r_X = transformer.inverse_transform(t_X)
if encode != "onehot":
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
transformer = skKBinsDiscretizer(
n_bins=n_bins, encode=encode, strategy=strategy
)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
if strategy == "kmeans":
assert_allclose(t_X, sk_t_X, ratio_tol=0.2)
else:
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("missing_values", [0, 1, np.nan])
@pytest.mark.parametrize("features", ["missing-only", "all"])
def test_missing_indicator(
failure_logger, int_dataset, missing_values, features # noqa: F811
):
zero_filled, one_filled, nan_filled = int_dataset
if missing_values == 0:
X_np, X = zero_filled
elif missing_values == 1:
X_np, X = one_filled
else:
X_np, X = nan_filled
indicator = cuMissingIndicator(
missing_values=missing_values, features=features
)
ft_X = indicator.fit_transform(X)
assert type(ft_X) == type(X)
indicator.fit(X)
t_X = indicator.transform(X)
assert type(t_X) == type(X)
indicator = skMissingIndicator(
missing_values=missing_values, features=features
)
sk_ft_X = indicator.fit_transform(X_np)
indicator.fit(X_np)
sk_t_X = indicator.transform(X_np)
assert_allclose(ft_X, sk_ft_X)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("features", ["missing-only", "all"])
def test_missing_indicator_sparse(
failure_logger, sparse_int_dataset, features # noqa: F811
):
X_np, X = sparse_int_dataset
indicator = cuMissingIndicator(features=features, missing_values=1)
ft_X = indicator.fit_transform(X)
# assert type(ft_X) == type(X)
assert cpx.scipy.sparse.issparse(ft_X) or scipy.sparse.issparse(ft_X)
indicator.fit(X)
t_X = indicator.transform(X)
# assert type(t_X) == type(X)
assert cpx.scipy.sparse.issparse(t_X) or scipy.sparse.issparse(t_X)
indicator = skMissingIndicator(features=features, missing_values=1)
sk_ft_X = indicator.fit_transform(X_np)
indicator.fit(X_np)
sk_t_X = indicator.transform(X_np)
assert_allclose(ft_X, sk_ft_X)
assert_allclose(t_X, sk_t_X)
def test_function_transformer(clf_dataset): # noqa: F811
X_np, X = clf_dataset
transformer = cuFunctionTransformer(
func=cp.exp, inverse_func=cp.log, check_inverse=False
)
t_X = transformer.fit_transform(X)
r_X = transformer.inverse_transform(t_X)
assert type(t_X) == type(X)
assert type(r_X) == type(t_X)
transformer = skFunctionTransformer(
func=np.exp, inverse_func=np.log, check_inverse=False
)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
def test_function_transformer_sparse(sparse_clf_dataset): # noqa: F811
X_np, X = sparse_clf_dataset
transformer = cuFunctionTransformer(
func=lambda x: x * 2, inverse_func=lambda x: x / 2, accept_sparse=True
)
t_X = transformer.fit_transform(X)
r_X = transformer.inverse_transform(t_X)
assert cpx.scipy.sparse.issparse(t_X) or scipy.sparse.issparse(t_X)
assert cpx.scipy.sparse.issparse(r_X) or scipy.sparse.issparse(r_X)
transformer = skFunctionTransformer(
func=lambda x: x * 2, inverse_func=lambda x: x / 2, accept_sparse=True
)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("n_quantiles", [30, 100])
@pytest.mark.parametrize("output_distribution", ["uniform", "normal"])
@pytest.mark.parametrize("ignore_implicit_zeros", [False, True])
@pytest.mark.parametrize("subsample", [100])
def test_quantile_transformer(
failure_logger,
nan_filled_positive, # noqa: F811
n_quantiles,
output_distribution,
ignore_implicit_zeros,
subsample,
):
X_np, X = nan_filled_positive
transformer = cuQuantileTransformer(
n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample,
random_state=42,
copy=True,
)
t_X = transformer.fit_transform(X)
assert type(t_X) == type(X)
r_X = transformer.inverse_transform(t_X)
assert type(r_X) == type(t_X)
quantiles_ = transformer.quantiles_
references_ = transformer.references_
transformer = skQuantileTransformer(
n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample,
random_state=42,
copy=True,
)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
sk_quantiles_ = transformer.quantiles_
sk_references_ = transformer.references_
assert_allclose(quantiles_, sk_quantiles_)
assert_allclose(references_, sk_references_)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("n_quantiles", [30, 100])
@pytest.mark.parametrize("output_distribution", ["uniform", "normal"])
@pytest.mark.parametrize("ignore_implicit_zeros", [False, True])
@pytest.mark.parametrize("subsample", [100])
def test_quantile_transformer_sparse(
failure_logger,
sparse_nan_filled_positive, # noqa: F811
n_quantiles,
output_distribution,
ignore_implicit_zeros,
subsample,
):
X_np, X = sparse_nan_filled_positive
X_np = X_np.tocsc()
X = X.tocsr().tocsc()
transformer = cuQuantileTransformer(
n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample,
random_state=42,
copy=True,
)
t_X = transformer.fit_transform(X)
t_X = t_X.tocsc()
r_X = transformer.inverse_transform(t_X)
if cpx.scipy.sparse.issparse(X):
assert cpx.scipy.sparse.issparse(t_X)
if scipy.sparse.issparse(X):
assert scipy.sparse.issparse(t_X)
quantiles_ = transformer.quantiles_
references_ = transformer.references_
transformer = skQuantileTransformer(
n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample,
random_state=42,
copy=True,
)
sk_t_X = transformer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
sk_quantiles_ = transformer.quantiles_
sk_references_ = transformer.references_
assert_allclose(quantiles_, sk_quantiles_)
assert_allclose(references_, sk_references_)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("n_quantiles", [30, 100])
@pytest.mark.parametrize("output_distribution", ["uniform", "normal"])
@pytest.mark.parametrize("ignore_implicit_zeros", [False, True])
@pytest.mark.parametrize("subsample", [100])
def test_quantile_transform(
failure_logger,
nan_filled_positive, # noqa: F811
axis,
n_quantiles,
output_distribution,
ignore_implicit_zeros,
subsample,
):
X_np, X = nan_filled_positive
t_X = cu_quantile_transform(
X,
axis=axis,
n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample,
random_state=42,
copy=True,
)
assert type(t_X) == type(X)
sk_t_X = sk_quantile_transform(
X_np,
axis=axis,
n_quantiles=n_quantiles,
output_distribution=output_distribution,
ignore_implicit_zeros=ignore_implicit_zeros,
subsample=subsample,
random_state=42,
copy=True,
)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("method", ["yeo-johnson", "box-cox"])
@pytest.mark.parametrize("standardize", [False, True])
def test_power_transformer(
failure_logger, nan_filled_positive, method, standardize # noqa: F811
):
X_np, X = nan_filled_positive
transformer = cuPowerTransformer(
method=method, standardize=standardize, copy=True
)
ft_X = transformer.fit_transform(X)
assert type(ft_X) == type(X)
t_X = transformer.transform(X)
assert type(t_X) == type(X)
r_X = transformer.inverse_transform(t_X)
assert type(r_X) == type(t_X)
normalizer = skPowerTransformer(
method=method, standardize=standardize, copy=True
)
sk_t_X = normalizer.fit_transform(X_np)
sk_r_X = transformer.inverse_transform(sk_t_X)
assert_allclose(ft_X, sk_t_X)
assert_allclose(t_X, sk_t_X)
assert_allclose(r_X, sk_r_X)
@pytest.mark.parametrize("method", ["yeo-johnson", "box-cox"])
@pytest.mark.parametrize("standardize", [False, True])
def test_power_transform(
failure_logger, nan_filled_positive, method, standardize # noqa: F811
):
X_np, X = nan_filled_positive
t_X = cu_power_transform(X, method=method, standardize=standardize)
assert type(t_X) == type(X)
sk_t_X = sk_power_transform(X_np, method=method, standardize=standardize)
assert_allclose(t_X, sk_t_X)
def test_kernel_centerer():
X = np.array([[1.0, -2.0, 2.0], [-2.0, 1.0, 3.0], [4.0, 1.0, -2.0]])
K = pairwise_kernels(X, metric="linear")
model = cuKernelCenterer()
model.fit(K)
t_X = model.transform(K, copy=True)
assert type(t_X) == type(X)
model = skKernelCenterer()
sk_t_X = model.fit_transform(K)
assert_allclose(sk_t_X, t_X)
def test__repr__():
assert cuBinarizer().__repr__() == "Binarizer()"
assert cuFunctionTransformer().__repr__() == "FunctionTransformer()"
assert cuKBinsDiscretizer().__repr__() == "KBinsDiscretizer()"
assert cuKernelCenterer().__repr__() == "KernelCenterer()"
assert cuMaxAbsScaler().__repr__() == "MaxAbsScaler()"
assert cuMinMaxScaler().__repr__() == "MinMaxScaler()"
assert cuMissingIndicator().__repr__() == "MissingIndicator()"
assert cuNormalizer().__repr__() == "Normalizer()"
assert cuPolynomialFeatures().__repr__() == "PolynomialFeatures()"
assert cuQuantileTransformer().__repr__() == "QuantileTransformer()"
assert cuRobustScaler().__repr__() == "RobustScaler()"
assert cuSimpleImputer().__repr__() == "SimpleImputer()"
assert cuStandardScaler().__repr__() == "StandardScaler()"
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_no_cuinit.py | # Copyright (c) 2023, NVIDIA CORPORATION.
import os
import subprocess
import sys
from shutil import which
import pytest
GDB_COMMANDS = """
set confirm off
set breakpoint pending on
break cuInit
run
exit
"""
@pytest.fixture(scope="module")
def cuda_gdb(request):
gdb = which("cuda-gdb")
if gdb is None:
request.applymarker(
pytest.mark.xfail(reason="No cuda-gdb found, can't detect cuInit"),
)
return gdb
else:
output = subprocess.run(
[gdb, "--version"], capture_output=True, text=True
)
if output.returncode != 0:
request.applymarker(
pytest.mark.xfail(
reason=(
"cuda-gdb not working on this platform, "
f"can't detect cuInit: {output.stderr}"
)
),
)
return gdb
def test_cuml_import_no_cuinit(cuda_gdb):
# When RAPIDS_NO_INITIALIZE is set, importing cuml should _not_
# create a CUDA context (i.e. cuInit should not be called).
# Intercepting the call to cuInit programmatically is tricky since
# the way it is resolved from dynamic libraries by
# cuda-python/numba/cupy is multitudinous (see discussion at
# https://github.com/rapidsai/cuml/pull/12361 which does this, but
# needs provide hooks that override dlsym, cuGetProcAddress, and
# cuInit.
# Instead, we just run under GDB and see if we hit a breakpoint
env = os.environ.copy()
env["RAPIDS_NO_INITIALIZE"] = "1"
output = subprocess.run(
[
cuda_gdb,
"-x",
"-",
"--args",
sys.executable,
"-c",
"import cuml",
],
input=GDB_COMMANDS,
env=env,
capture_output=True,
text=True,
)
cuInit_called = output.stdout.find("in cuInit ()")
print("Command output:\n")
print("*** STDOUT ***")
print(output.stdout)
print("*** STDERR ***")
print(output.stderr)
assert output.returncode == 0
assert cuInit_called < 0
def test_cuml_create_estimator_cuinit(cuda_gdb):
# This tests that our gdb scripting correctly identifies cuInit
# when it definitely should have been called.
env = os.environ.copy()
env["RAPIDS_NO_INITIALIZE"] = "1"
output = subprocess.run(
[
cuda_gdb,
"-x",
"-",
"--args",
sys.executable,
"-c",
"import cupy as cp; a = cp.ones(10)",
],
input=GDB_COMMANDS,
env=env,
capture_output=True,
text=True,
)
cuInit_called = output.stdout.find("in cuInit ()")
print("Command output:\n")
print("*** STDOUT ***")
print(output.stdout)
print("*** STDERR ***")
print(output.stderr)
assert output.returncode == 0
assert cuInit_called >= 0
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_thirdparty.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.test_preproc_utils import assert_allclose
from sklearn.utils.sparsefuncs import (
inplace_csr_column_scale as sk_inplace_csr_column_scale,
inplace_csr_row_scale as sk_inplace_csr_row_scale,
inplace_column_scale as sk_inplace_column_scale,
mean_variance_axis as sk_mean_variance_axis,
min_max_axis as sk_min_max_axis,
)
from cuml._thirdparty.sklearn.utils.sparsefuncs import (
inplace_csr_column_scale as cu_inplace_csr_column_scale,
inplace_csr_row_scale as cu_inplace_csr_row_scale,
inplace_column_scale as cu_inplace_column_scale,
mean_variance_axis as cu_mean_variance_axis,
min_max_axis as cu_min_max_axis,
)
from sklearn.utils.extmath import (
row_norms as sk_row_norms,
_incremental_mean_and_var as sk_incremental_mean_and_var,
)
from cuml._thirdparty.sklearn.utils.extmath import (
row_norms as cu_row_norms,
_incremental_mean_and_var as cu_incremental_mean_and_var,
)
from cuml._thirdparty.sklearn.utils.validation import check_X_y
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
cpx = gpu_only_import("cupyx")
@pytest.fixture(scope="session")
def random_dataset(request, random_seed):
cp.random.seed(random_seed)
X = cp.random.rand(100, 10)
return X.get(), X
@pytest.fixture(scope="session", params=["cupy-csr", "cupy-csc"])
def sparse_random_dataset(request, random_seed):
cp.random.seed(random_seed)
X = cp.random.rand(100, 10)
random_loc = cp.random.choice(X.size, int(X.size * 0.3), replace=False)
X.ravel()[random_loc] = 0
if request.param == "cupy-csr":
X_sparse = cpx.scipy.sparse.csr_matrix(X)
elif request.param == "cupy-csc":
X_sparse = cpx.scipy.sparse.csc_matrix(X)
return X.get(), X, X_sparse.get(), X_sparse
def test_check_X_y():
X = np.ones((100, 10))
y1 = np.ones((100,))
y2 = np.ones((100, 1))
y3 = np.ones((100, 2))
y4 = np.ones((101,))
check_X_y(X, y1, multi_output=False)
check_X_y(X, y2, multi_output=False)
with pytest.raises(Exception):
check_X_y(X, y3, multi_output=False)
with pytest.raises(Exception):
check_X_y(X, y4, multi_output=False)
with pytest.raises(Exception):
check_X_y(X, y4, multi_output=True)
@pytest.mark.parametrize("square", [False, True])
def test_row_norms(failure_logger, sparse_random_dataset, square):
X_np, X, X_sparse_np, X_sparse = sparse_random_dataset
cu_norms = cu_row_norms(X_np, squared=square)
sk_norms = sk_row_norms(X, squared=square)
assert_allclose(cu_norms, sk_norms)
cu_norms = cu_row_norms(X_sparse, squared=square)
sk_norms = sk_row_norms(X_sparse_np, squared=square)
assert_allclose(cu_norms, sk_norms)
def test_incremental_mean_and_var(failure_logger, random_seed, random_dataset):
X_np, X = random_dataset
cp.random.seed(random_seed)
last_mean = cp.random.rand(10)
last_variance = cp.random.rand(10)
last_sample_count = cp.random.rand(10)
cu_mean, cu_variance, cu_sample_count = cu_incremental_mean_and_var(
X, last_mean, last_variance, last_sample_count
)
sk_mean, sk_variance, sk_sample_count = sk_incremental_mean_and_var(
X_np, last_mean.get(), last_variance.get(), last_sample_count.get()
)
assert_allclose(cu_mean, sk_mean)
assert_allclose(cu_variance, sk_variance)
assert_allclose(cu_sample_count, sk_sample_count)
def test_inplace_csr_column_scale(
failure_logger, random_seed, sparse_random_dataset
):
_, _, X_sparse_np, X_sparse = sparse_random_dataset
if X_sparse.format != "csr":
pytest.skip()
cp.random.seed(random_seed)
scale = cp.random.rand(10)
cu_inplace_csr_column_scale(X_sparse, scale)
sk_inplace_csr_column_scale(X_sparse_np, scale.get())
assert_allclose(X_sparse, X_sparse_np)
def test_inplace_csr_row_scale(
failure_logger, random_seed, sparse_random_dataset
):
_, _, X_sparse_np, X_sparse = sparse_random_dataset
if X_sparse.format != "csr":
pytest.skip()
cp.random.seed(random_seed)
scale = cp.random.rand(100)
cu_inplace_csr_row_scale(X_sparse, scale)
sk_inplace_csr_row_scale(X_sparse_np, scale.get())
assert_allclose(X_sparse, X_sparse_np)
def test_inplace_column_scale(
failure_logger, random_seed, sparse_random_dataset
):
_, X, X_sparse_np, X_sparse = sparse_random_dataset
cp.random.seed(random_seed)
scale = cp.random.rand(10)
cu_inplace_column_scale(X_sparse, scale)
sk_inplace_column_scale(X_sparse_np, scale.get())
assert_allclose(X_sparse, X_sparse_np)
with pytest.raises(Exception):
cu_inplace_column_scale(X, scale)
@pytest.mark.parametrize("axis", [0, 1])
def test_mean_variance_axis(failure_logger, sparse_random_dataset, axis):
_, _, X_sparse_np, X_sparse = sparse_random_dataset
cu_mean, cu_variance = cu_mean_variance_axis(X_sparse, axis=axis)
sk_mean, sk_variance = sk_mean_variance_axis(X_sparse_np, axis=axis)
assert_allclose(cu_mean, sk_mean)
assert_allclose(cu_variance, sk_variance)
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("ignore_nan", [False, True])
# ignore warning about changing sparsity in both cupy and scipy
@pytest.mark.filterwarnings("ignore:(.*)expensive(.*)::")
def test_min_max_axis(failure_logger, sparse_random_dataset, axis, ignore_nan):
_, X, X_sparse_np, X_sparse = sparse_random_dataset
X_sparse[0, 0] = np.nan
X_sparse_np[0, 0] = np.nan
cu_min, cu_max = cu_min_max_axis(
X_sparse, axis=axis, ignore_nan=ignore_nan
)
sk_min, sk_max = sk_min_max_axis(
X_sparse_np, axis=axis, ignore_nan=ignore_nan
)
if axis is not None:
assert_allclose(cu_min, sk_min)
assert_allclose(cu_max, sk_max)
else:
assert cu_min == sk_min or (cp.isnan(cu_min) and np.isnan(sk_min))
assert cu_max == sk_max or (cp.isnan(cu_max) and np.isnan(sk_max))
with pytest.raises(Exception):
cu_min_max_axis(X, axis=axis, ignore_nan=ignore_nan)
@pytest.fixture(scope="session", params=["cupy-csr", "cupy-csc"])
def sparse_extremes(request, random_seed):
X = cp.array(
[
[-0.9933658, 0.871748, 0.44418066],
[0.87808335, cp.nan, 0.18183318],
[cp.nan, 0.25030251, -0.7269053],
[cp.nan, 0.17725405, cp.nan],
[cp.nan, cp.nan, cp.nan],
[0.0, 0.0, 0.44418066],
[0.0, 0.0, 0.0],
[0.0, 0.0, cp.nan],
[0.0, cp.nan, cp.nan],
]
)
if request.param == "cupy-csr":
X_sparse = cpx.scipy.sparse.csr_matrix(X)
elif request.param == "cupy-csc":
X_sparse = cpx.scipy.sparse.csc_matrix(X)
return X_sparse.get(), X_sparse
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("ignore_nan", [False, True])
# ignore warning about changing sparsity in both cupy and scipy
@pytest.mark.filterwarnings("ignore:(.*)expensive(.*)::")
# ignore warning about all nan row in sparse_extremes
@pytest.mark.filterwarnings("ignore:All-NaN(.*)::")
def test_min_max_axis_extremes(sparse_extremes, axis, ignore_nan):
X_sparse_np, X_sparse = sparse_extremes
cu_min, cu_max = cu_min_max_axis(
X_sparse, axis=axis, ignore_nan=ignore_nan
)
sk_min, sk_max = sk_min_max_axis(
X_sparse_np, axis=axis, ignore_nan=ignore_nan
)
if axis is not None:
assert_allclose(cu_min, sk_min)
assert_allclose(cu_max, sk_max)
else:
assert cu_min == sk_min or (cp.isnan(cu_min) and np.isnan(sk_min))
assert cu_max == sk_max or (cp.isnan(cu_max) and np.isnan(sk_max))
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_make_blobs.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
# Testing parameters for scalar parameter tests
dtype = ["single", "double"]
n_samples = [100, 1000]
n_features = [2, 10, 100]
centers = [
None,
2,
5,
]
cluster_std = [0.01, 0.1]
center_box = [
(-10.0, 10.0),
[-20.0, 20.0],
]
shuffle = [True, False]
random_state = [None, 9]
@pytest.mark.parametrize("dtype", dtype)
@pytest.mark.parametrize("n_samples", n_samples)
@pytest.mark.parametrize("n_features", n_features)
@pytest.mark.parametrize("centers", centers)
@pytest.mark.parametrize("cluster_std", cluster_std)
@pytest.mark.parametrize("center_box", center_box)
@pytest.mark.parametrize("shuffle", shuffle)
@pytest.mark.parametrize("random_state", random_state)
@pytest.mark.parametrize("order", ["F", "C"])
def test_make_blobs_scalar_parameters(
dtype,
n_samples,
n_features,
centers,
cluster_std,
center_box,
shuffle,
random_state,
order,
):
out, labels = cuml.make_blobs(
dtype=dtype,
n_samples=n_samples,
n_features=n_features,
centers=centers,
cluster_std=0.001,
center_box=center_box,
shuffle=shuffle,
random_state=random_state,
order=order,
)
assert out.shape == (n_samples, n_features), "out shape mismatch"
assert labels.shape == (n_samples,), "labels shape mismatch"
if order == "F":
assert out.flags["F_CONTIGUOUS"]
elif order == "C":
assert out.flags["C_CONTIGUOUS"]
if centers is None:
assert cp.unique(labels).shape == (3,), "unexpected number of clusters"
elif centers <= n_samples:
assert cp.unique(labels).shape == (
centers,
), "unexpected number of clusters"
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_svm.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import platform
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.datasets import make_classification, make_gaussian_quantiles
from sklearn.datasets import make_regression, make_friedman1
from sklearn.datasets import load_iris, make_blobs
from sklearn import svm
from cuml.testing.utils import (
unit_param,
quality_param,
stress_param,
compare_svm,
compare_probabilistic_svm,
svm_array_equal,
)
from cuml.common import input_to_cuml_array
import cuml.svm as cu_svm
import cuml
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
cuda = gpu_only_import_from("numba", "cuda")
cudf = gpu_only_import("cudf")
scipy_sparse = cpu_only_import("scipy.sparse")
IS_ARM = platform.processor() == "aarch64"
def make_dataset(dataset, n_rows, n_cols, n_classes=2, n_informative=2):
np.random.seed(137)
if n_rows * 0.25 < 4000:
# Use at least 4000 test samples
n_test = 4000
if n_rows > 1000:
# To avoid a large increase in test time (which is between
# O(n_rows^2) and O(n_rows^3)).
n_rows = int(n_rows * 0.75)
n_rows += n_test
else:
n_test = n_rows * 0.25
if dataset == "classification1":
X, y = make_classification(
n_rows,
n_cols,
n_informative=n_informative,
n_redundant=0,
n_classes=n_classes,
n_clusters_per_class=1,
)
elif dataset == "classification2":
X, y = make_classification(
n_rows,
n_cols,
n_informative=n_informative,
n_redundant=0,
n_classes=n_classes,
n_clusters_per_class=2,
)
elif dataset == "gaussian":
X, y = make_gaussian_quantiles(
n_samples=n_rows, n_features=n_cols, n_classes=n_classes
)
elif dataset == "blobs":
X, y = make_blobs(
n_samples=n_rows, n_features=n_cols, centers=n_classes
)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=n_test)
# correct case when not all classes made it into the training set
if np.unique(y_train).size < n_classes:
for i in range(n_classes):
y_train[i] = i
return X_train, X_test, y_train, y_test
def get_binary_iris_dataset():
iris = load_iris()
X = iris.data
y = iris.target
y = (y > 0).astype(X.dtype)
scaler = StandardScaler()
X = scaler.fit_transform(X)
return X, y
@pytest.mark.parametrize(
"params",
[
{"kernel": "linear", "C": 1},
{"kernel": "linear", "C": 1, "tol": 1e-6},
{"kernel": "linear", "C": 10},
{"kernel": "rbf", "C": 1, "gamma": 1},
{"kernel": "rbf", "C": 1, "gamma": "auto"},
{"kernel": "rbf", "C": 0.1, "gamma": "auto"},
{"kernel": "rbf", "C": 10, "gamma": "auto"},
{"kernel": "rbf", "C": 1, "gamma": "scale"},
{"kernel": "poly", "C": 1, "gamma": 1},
{"kernel": "poly", "C": 1, "gamma": "auto"},
{"kernel": "poly", "C": 1, "gamma": "scale"},
{"kernel": "poly", "C": 1, "gamma": "auto", "degree": 2},
{"kernel": "poly", "C": 1, "gamma": "auto", "coef0": 1.37},
{"kernel": "sigmoid", "C": 1, "gamma": "auto"},
{"kernel": "sigmoid", "C": 1, "gamma": "scale", "coef0": 0.42},
],
)
def test_svm_skl_cmp_kernels(params):
# X_train, X_test, y_train, y_test = make_dataset('gaussian', 1000, 4)
X_train, y_train = get_binary_iris_dataset()
cuSVC = cu_svm.SVC(**params)
cuSVC.fit(X_train, y_train)
sklSVC = svm.SVC(**params)
sklSVC.fit(X_train, y_train)
compare_svm(cuSVC, sklSVC, X_train, y_train, cmp_decision_func=True)
@pytest.mark.parametrize(
"params",
[
{"kernel": "linear", "C": 1},
{"kernel": "rbf", "C": 1, "gamma": 1},
{"kernel": "poly", "C": 1, "gamma": 1},
],
)
@pytest.mark.parametrize("dataset", ["classification2", "gaussian", "blobs"])
@pytest.mark.parametrize(
"n_rows", [3, unit_param(100), quality_param(1000), stress_param(5000)]
)
@pytest.mark.parametrize(
"n_cols", [2, unit_param(100), quality_param(1000), stress_param(1000)]
)
def test_svm_skl_cmp_datasets(params, dataset, n_rows, n_cols):
if (
params["kernel"] == "linear"
and dataset in ["gaussian", "classification2"]
and n_rows > 1000
and n_cols >= 1000
):
# linear kernel will not fit the gaussian dataset, but takes very long
return
X_train, X_test, y_train, y_test = make_dataset(dataset, n_rows, n_cols)
# Default to numpy for testing
with cuml.using_output_type("numpy"):
cuSVC = cu_svm.SVC(**params)
cuSVC.fit(X_train, y_train)
sklSVC = svm.SVC(**params)
sklSVC.fit(X_train, y_train)
compare_svm(
cuSVC, sklSVC, X_test, y_test, coef_tol=1e-5, report_summary=True
)
@pytest.mark.parametrize("params", [{"kernel": "rbf", "C": 1, "gamma": 1}])
@pytest.mark.parametrize("sparse", [True, False])
def test_svm_skl_cmp_multiclass(
params, sparse, dataset="classification2", n_rows=100, n_cols=6
):
X_train, X_test, y_train, y_test = make_dataset(
dataset, n_rows, n_cols, n_classes=3, n_informative=6
)
if sparse:
X_train = scipy_sparse.csr_matrix(X_train)
X_test = scipy_sparse.csr_matrix(X_test)
# Default to numpy for testing
with cuml.using_output_type("numpy"):
cuSVC = cu_svm.SVC(**params)
cuSVC.fit(X_train, y_train)
sklSVC = svm.SVC(**params)
sklSVC.fit(X_train, y_train)
compare_svm(
cuSVC, sklSVC, X_test, y_test, coef_tol=1e-5, report_summary=True
)
@pytest.mark.parametrize(
"params",
[
{"kernel": "rbf", "C": 5, "gamma": 0.005, "probability": False},
{"kernel": "rbf", "C": 5, "gamma": 0.005, "probability": True},
],
)
def test_svm_skl_cmp_decision_function(params, n_rows=4000, n_cols=20):
X_train, X_test, y_train, y_test = make_dataset(
"classification1", n_rows, n_cols
)
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
cuSVC = cu_svm.SVC(**params)
cuSVC.fit(X_train, y_train)
pred = cuSVC.predict(X_test)
assert pred.dtype == y_train.dtype
df1 = cuSVC.decision_function(X_test)
assert df1.dtype == X_train.dtype
sklSVC = svm.SVC(**params)
sklSVC.fit(X_train, y_train)
df2 = sklSVC.decision_function(X_test)
if params["probability"]:
tol = 2e-2 # See comments in SVC decision_function method
else:
tol = 1e-5
assert mean_squared_error(df1, df2) < tol
@pytest.mark.parametrize(
"params",
[
{"kernel": "linear", "C": 1},
{"kernel": "rbf", "C": 1, "gamma": 1},
{"kernel": "poly", "C": 1, "gamma": 1},
{"kernel": "sigmoid", "C": 1, "gamma": 1},
],
)
@pytest.mark.parametrize(
"n_pred", [unit_param(5000), quality_param(100000), stress_param(1000000)]
)
def test_svm_predict(params, n_pred):
n_rows = 500
n_cols = 2
X, y = make_blobs(
n_samples=n_rows + n_pred,
n_features=n_cols,
centers=[[-5, -5], [5, 5]],
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_rows
)
cuSVC = cu_svm.SVC(**params)
cuSVC.fit(X_train, y_train)
y_pred = cuSVC.predict(X_test)
n_correct = np.sum(y_test == y_pred)
accuracy = n_correct * 100 / n_pred
assert accuracy > 99
# Probabilisic SVM uses scikit-learn's CalibratedClassifierCV, and therefore
# the input array is converted to numpy under the hood. We explicitly test for
# all supported input types, to avoid errors like
# https://github.com/rapidsai/cuml/issues/3090
@pytest.mark.parametrize(
"in_type", ["numpy", "numba", "cudf", "cupy", "pandas", "cuml"]
)
def test_svm_skl_cmp_predict_proba(in_type, n_rows=10000, n_cols=20):
params = {
"kernel": "rbf",
"C": 1,
"tol": 1e-3,
"gamma": "scale",
"probability": True,
}
X, y = make_classification(
n_samples=n_rows,
n_features=n_cols,
n_informative=2,
n_redundant=10,
random_state=137,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.8, random_state=42
)
X_m = input_to_cuml_array(X_train).array
y_m = input_to_cuml_array(y_train).array
cuSVC = cu_svm.SVC(**params)
cuSVC.fit(X_m.to_output(in_type), y_m.to_output(in_type))
sklSVC = svm.SVC(**params)
sklSVC.fit(X_train, y_train)
compare_probabilistic_svm(cuSVC, sklSVC, X_test, y_test, 1e-3, 1e-2)
@pytest.mark.parametrize("class_weight", [None, {1: 10}, "balanced"])
@pytest.mark.parametrize("sample_weight", [None, True])
def test_svc_weights(class_weight, sample_weight):
# We are using the following example as a test case
# https://scikit-learn.org/stable/auto_examples/svm/plot_separating_hyperplane_unbalanced.html
X, y = make_blobs(
n_samples=[1000, 100],
centers=[[0.0, 0.0], [2.0, 2.0]],
cluster_std=[1.5, 0.5],
random_state=137,
shuffle=False,
)
if sample_weight:
# Put large weight on class 1
sample_weight = y * 9 + 1
params = {"kernel": "linear", "C": 1, "gamma": "scale"}
params["class_weight"] = class_weight
cuSVC = cu_svm.SVC(**params)
cuSVC.fit(X, y, sample_weight)
if class_weight is not None or sample_weight is not None:
# Standalone test: check if smaller blob is correctly classified in the
# presence of class weights
X_1 = X[y == 1, :]
y_1 = np.ones(X_1.shape[0])
cu_score = cuSVC.score(X_1, y_1)
assert cu_score > 0.9
sklSVC = svm.SVC(**params)
sklSVC.fit(X, y, sample_weight)
compare_svm(cuSVC, sklSVC, X, y, coef_tol=1e-5, report_summary=True)
@pytest.mark.parametrize(
"params",
[
pytest.param(
{"kernel": "poly", "degree": 40, "C": 1, "gamma": "auto"},
marks=pytest.mark.xfail(
reason="fp overflow in kernel "
"function due to non scaled input "
"features"
),
),
pytest.param(
{
"kernel": "poly",
"degree": 40,
"C": 1,
"gamma": "scale",
"x_arraytype": "numpy",
}
),
pytest.param(
{
"kernel": "poly",
"degree": 40,
"C": 1,
"gamma": "scale",
"x_arraytype": "dataframe",
}
),
pytest.param(
{
"kernel": "poly",
"degree": 40,
"C": 1,
"gamma": "scale",
"x_arraytype": "numba",
}
),
],
)
def test_svm_gamma(params):
# Note: we test different array types to make sure that the X.var() is
# calculated correctly for gamma == 'scale' option.
x_arraytype = params.pop("x_arraytype", "numpy")
n_rows = 500
n_cols = 380
centers = [10 * np.ones(380), -10 * np.ones(380)]
X, y = make_blobs(
n_samples=n_rows, n_features=n_cols, random_state=137, centers=centers
)
X = X.astype(np.float32)
if x_arraytype == "dataframe":
y = cudf.Series(y)
elif x_arraytype == "numba":
X = cuda.to_device(X)
# Using degree 40 polynomials and fp32 training would fail with
# gamma = 1/(n_cols*X.std()), but it works with the correct implementation:
# gamma = 1/(n_cols*X.var())
cuSVC = cu_svm.SVC(**params)
cuSVC.fit(X, y)
accuracy = cuSVC.score(X, y) * 100
assert accuracy > 70
@pytest.mark.parametrize("x_dtype", [np.float32, np.float64])
@pytest.mark.parametrize("y_dtype", [np.float32, np.float64, np.int32])
def test_svm_numeric_arraytype(x_dtype, y_dtype):
X, y = get_binary_iris_dataset()
X = X.astype(x_dtype, order="F")
y = y.astype(y_dtype)
params = {"kernel": "rbf", "C": 1, "gamma": 0.25}
cuSVC = cu_svm.SVC(**params)
cuSVC.fit(X, y)
intercept_exp = 0.23468959692060373
n_sv_exp = 15
assert abs(cuSVC.intercept_ - intercept_exp) / intercept_exp < 1e-3
assert cuSVC.n_support_ == n_sv_exp
n_pred_wrong = np.sum(cuSVC.predict(X) - y)
assert n_pred_wrong == 0
def get_memsize(svc):
"""Calculates the memory occupied by the parameters of an SVC object
Parameters
----------
svc : cuML SVC classifier object
Return
------
The GPU memory usage in bytes.
"""
ms = 0
for a in ["dual_coef_", "support_", "support_vectors_"]:
x = getattr(svc, a)
ms += np.prod(x[0].shape) * x[0].dtype.itemsize
return ms
@pytest.mark.xfail(reason="Need rapidsai/rmm#415 to detect memleak robustly")
@pytest.mark.memleak
@pytest.mark.parametrize("params", [{"kernel": "rbf", "C": 1, "gamma": 1}])
@pytest.mark.parametrize(
"n_rows", [unit_param(500), quality_param(1000), stress_param(1000)]
)
@pytest.mark.parametrize(
"n_iter", [unit_param(10), quality_param(100), stress_param(1000)]
)
@pytest.mark.parametrize("n_cols", [1000])
@pytest.mark.parametrize("use_handle", [True, False])
def test_svm_memleak(
params, n_rows, n_iter, n_cols, use_handle, dataset="blobs"
):
"""
Test whether there is any memory leak.
.. note:: small `n_rows`, and `n_cols` values will result in small model
size, that will not be measured by get_memory_info.
"""
X_train, X_test, y_train, y_test = make_dataset(dataset, n_rows, n_cols)
stream = cuml.cuda.Stream()
handle = cuml.Handle(stream=stream)
# Warmup. Some modules that are used in SVC allocate space on the device
# and consume memory. Here we make sure that this allocation is done
# before the first call to get_memory_info.
tmp = cu_svm.SVC(handle=handle, **params)
tmp.fit(X_train, y_train)
ms = get_memsize(tmp)
print(
"Memory consumption of SVC object is {} MiB".format(
ms / (1024 * 1024.0)
)
)
free_mem = cuda.current_context().get_memory_info()[0]
# Check first whether the get_memory_info gives us the correct memory
# footprint
cuSVC = cu_svm.SVC(handle=handle, **params)
cuSVC.fit(X_train, y_train)
delta_mem = free_mem - cuda.current_context().get_memory_info()[0]
assert delta_mem >= ms
# Main test loop
b_sum = 0
for i in range(n_iter):
cuSVC = cu_svm.SVC(handle=handle, **params)
cuSVC.fit(X_train, y_train)
b_sum += cuSVC.intercept_
cuSVC.predict(X_train)
del cuSVC
handle.sync()
delta_mem = free_mem - cuda.current_context().get_memory_info()[0]
print("Delta GPU mem: {} bytes".format(delta_mem))
assert delta_mem == 0
@pytest.mark.xfail(reason="Need rapidsai/rmm#415 to detect memleak robustly")
@pytest.mark.memleak
@pytest.mark.parametrize(
"params", [{"kernel": "poly", "degree": 30, "C": 1, "gamma": 1}]
)
def test_svm_memleak_on_exception(
params, n_rows=1000, n_iter=10, n_cols=1000, dataset="blobs"
):
"""
Test whether there is any mem leak when we exit training with an exception.
The poly kernel with degree=30 will overflow, and triggers the
'SMO error: NaN found...' exception.
"""
X_train, y_train = make_blobs(
n_samples=n_rows, n_features=n_cols, random_state=137, centers=2
)
X_train = X_train.astype(np.float32)
stream = cuml.cuda.Stream()
handle = cuml.Handle(stream=stream)
# Warmup. Some modules that are used in SVC allocate space on the device
# and consume memory. Here we make sure that this allocation is done
# before the first call to get_memory_info.
tmp = cu_svm.SVC(handle=handle, **params)
with pytest.raises(RuntimeError):
tmp.fit(X_train, y_train)
# SMO error: NaN found during fitting.
free_mem = cuda.current_context().get_memory_info()[0]
# Main test loop
for i in range(n_iter):
cuSVC = cu_svm.SVC(handle=handle, **params)
with pytest.raises(RuntimeError):
cuSVC.fit(X_train, y_train)
# SMO error: NaN found during fitting.
del cuSVC
handle.sync()
delta_mem = free_mem - cuda.current_context().get_memory_info()[0]
print("Delta GPU mem: {} bytes".format(delta_mem))
assert delta_mem == 0
def make_regression_dataset(dataset, n_rows, n_cols):
np.random.seed(137)
if dataset == "reg1":
X, y = make_regression(
n_rows, n_cols, n_informative=2, n_targets=1, random_state=137
)
elif dataset == "reg2":
X, y = make_regression(
n_rows,
n_cols,
n_informative=2,
n_targets=1,
random_state=137,
noise=10,
)
elif dataset == "Friedman":
X, y = make_friedman1(
n_samples=n_rows, n_features=n_cols, noise=0.0, random_state=137
)
else:
raise ValueError("Wrong option for dataste: ", dataset)
scaler = StandardScaler()
X = scaler.fit_transform(X)
dtype = np.float32
X = X.astype(dtype)
y = y.astype(dtype)
X_train, X_test, y_train, y_test = train_test_split(X, y)
return X_train, X_test, y_train, y_test
def compare_svr(svr1, svr2, X_test, y_test, tol=1e-3):
if X_test.shape[0] > 1:
score1 = svr1.score(X_test, y_test)
score2 = svr2.score(X_test, y_test)
assert abs(score1 - score2) < tol
else:
y_pred1 = svr1.predict(X_test)
y_pred2 = svr2.predict(X_test)
mse1 = mean_squared_error(y_test, y_pred1)
mse2 = mean_squared_error(y_test, y_pred2)
assert (mse1 - mse2) / mse2 < tol
@pytest.mark.parametrize(
"params",
[
{"kernel": "linear", "C": 1, "gamma": 1},
{"kernel": "rbf", "C": 1, "gamma": 1},
{"kernel": "poly", "C": 1, "gamma": 1},
],
)
@pytest.mark.parametrize("dataset", ["reg1", "reg2", "Friedman"])
@pytest.mark.parametrize(
"n_rows",
[unit_param(3), unit_param(100), quality_param(1000), stress_param(5000)],
)
@pytest.mark.parametrize(
"n_cols",
[unit_param(5), unit_param(100), quality_param(1000), stress_param(1000)],
)
def test_svr_skl_cmp(params, dataset, n_rows, n_cols):
"""Compare to Sklearn SVR"""
if dataset == "Friedman" and n_cols < 5:
# We need at least 5 feature columns for the Friedman dataset
return
X_train, X_test, y_train, y_test = make_regression_dataset(
dataset, n_rows, n_cols
)
cuSVR = cu_svm.SVR(**params)
cuSVR.fit(X_train, y_train)
sklSVR = svm.SVR(**params)
sklSVR.fit(X_train, y_train)
compare_svr(cuSVR, sklSVR, X_test, y_test)
def test_svr_skl_cmp_weighted():
"""Compare to Sklearn SVR, use sample weights"""
X, y = make_regression(
n_samples=100,
n_features=5,
n_informative=2,
n_targets=1,
random_state=137,
noise=10,
)
sample_weights = 10 * np.sin(np.linspace(0, 2 * np.pi, len(y))) + 10.1
params = {"kernel": "linear", "C": 10, "gamma": 1}
cuSVR = cu_svm.SVR(**params)
cuSVR.fit(X, y, sample_weights)
sklSVR = svm.SVR(**params)
sklSVR.fit(X, y, sample_weights)
compare_svr(cuSVR, sklSVR, X, y)
@pytest.mark.parametrize("classifier", [True, False])
@pytest.mark.parametrize("train_dtype", [np.float32, np.float64])
@pytest.mark.parametrize("test_dtype", [np.float64, np.float32])
def test_svm_predict_convert_dtype(train_dtype, test_dtype, classifier):
X, y = make_classification(n_samples=50, random_state=0)
X = X.astype(train_dtype)
y = y.astype(train_dtype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
if classifier:
clf = cu_svm.SVC()
else:
clf = cu_svm.SVR()
clf.fit(X_train, y_train)
clf.predict(X_test.astype(test_dtype))
@pytest.mark.skipif(
IS_ARM,
reason="Test fails unexpectedly on ARM. "
"github.com/rapidsai/cuml/issues/5100",
)
def test_svm_no_support_vectors():
n_rows = 10
n_cols = 3
X = cp.random.uniform(size=(n_rows, n_cols), dtype=cp.float64)
y = cp.ones((n_rows, 1))
model = cuml.svm.SVR(kernel="linear", C=10)
model.fit(X, y)
pred = model.predict(X)
assert svm_array_equal(pred, y, 0)
assert model.n_support_ == 0
assert abs(model.intercept_ - 1) <= 1e-6
assert svm_array_equal(model.coef_, cp.zeros((1, n_cols)))
assert model.dual_coef_.shape == (1, 0)
assert model.support_.shape == (0,)
assert model.support_vectors_.shape[0] == 0
# Check disabled due to https://github.com/rapidsai/cuml/issues/4095
# assert model.support_vectors_.shape[1] == n_cols
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_hdbscan.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
from sklearn.model_selection import train_test_split
from sklearn import datasets
from hdbscan.plots import CondensedTree
import hdbscan
from cuml.internals import logger
import pytest
from cuml.cluster.hdbscan import HDBSCAN, condense_hierarchy
from cuml.cluster.hdbscan.prediction import (
all_points_membership_vectors,
approximate_predict,
membership_vector,
)
from sklearn.datasets import make_blobs
from cuml.metrics import adjusted_rand_score
from cuml.testing.utils import get_pattern, array_equal
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
dataset_names = ["noisy_circles", "noisy_moons", "varied"]
def assert_cluster_counts(sk_agg, cuml_agg, digits=25):
sk_unique, sk_counts = np.unique(sk_agg.labels_, return_counts=True)
sk_counts = np.sort(sk_counts)
cu_unique, cu_counts = cp.unique(cuml_agg.labels_, return_counts=True)
cu_counts = cp.sort(cu_counts).get()
np.testing.assert_almost_equal(sk_counts, cu_counts, decimal=-1 * digits)
def get_children(roots, parents, arr):
"""
Simple helper function to return the children of the condensed tree
given an array of parents.
"""
ret = []
for root in roots:
level = np.where(parents == root)
ret.extend(arr[level])
return np.array(ret).ravel()
def get_bfs_level(n, roots, parents, children, arr):
"""
Simple helper function to perform a bfs through n
levels of a condensed tree.
"""
level = roots
for i in range(n - 1):
level = get_children(level, parents, children)
return get_children(level, parents, arr)
def assert_condensed_trees(sk_agg, min_cluster_size):
"""
Because of differences in the renumbering and sort ordering,
the condensed tree arrays from cuml and scikit-learn cannot
be compared directly. This function performs a BFS through
the condensed trees, comparing the cluster sizes and lambda
values at each level of the trees.
"""
slt = sk_agg.single_linkage_tree_._linkage
condensed_tree = condense_hierarchy(slt, min_cluster_size)
cu_parents = condensed_tree._raw_tree["parent"]
sk_parents = sk_agg.condensed_tree_._raw_tree["parent"]
cu_children = condensed_tree._raw_tree["child"]
sk_children = sk_agg.condensed_tree_._raw_tree["child"]
cu_lambda = condensed_tree._raw_tree["lambda_val"]
sk_lambda = sk_agg.condensed_tree_._raw_tree["lambda_val"]
cu_child_size = condensed_tree._raw_tree["child_size"]
sk_child_size = sk_agg.condensed_tree_._raw_tree["child_size"]
# Start at the root, perform bfs
l2_cu = [1000]
l2_sk = [1000]
lev = 1
while len(l2_cu) != 0 or len(l2_sk) != 0:
l2_cu = get_bfs_level(lev, [1000], cu_parents, cu_children, cu_lambda)
l2_sk = get_bfs_level(lev, [1000], sk_parents, sk_children, sk_lambda)
s2_cu = get_bfs_level(
lev, [1000], cu_parents, cu_children, cu_child_size
)
s2_sk = get_bfs_level(
lev, [1000], sk_parents, sk_children, sk_child_size
)
s2_cu.sort()
s2_sk.sort()
l2_cu.sort()
l2_sk.sort()
lev += 1
assert np.allclose(l2_cu, l2_sk, atol=1e-5, rtol=1e-6)
assert np.allclose(s2_cu, s2_sk, atol=1e-5, rtol=1e-6)
assert lev > 1
def assert_membership_vectors(cu_vecs, sk_vecs):
"""
Assert the membership vectors by taking the adjusted rand score
of the argsorted membership vectors.
"""
if sk_vecs.shape == cu_vecs.shape:
cu_labels_sorted = np.argsort(cu_vecs)[::-1]
sk_labels_sorted = np.argsort(sk_vecs)[::-1]
k = min(sk_vecs.shape[1], 10)
for i in range(k):
assert (
adjusted_rand_score(
cu_labels_sorted[:, i], sk_labels_sorted[:, i]
)
>= 0.90
)
@pytest.mark.parametrize("nrows", [500])
@pytest.mark.parametrize("ncols", [25])
@pytest.mark.parametrize("nclusters", [2, 5])
@pytest.mark.parametrize("min_samples", [25, 60])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("min_cluster_size", [30, 50])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("connectivity", ["knn"])
def test_hdbscan_blobs(
nrows,
ncols,
nclusters,
connectivity,
cluster_selection_epsilon,
cluster_selection_method,
allow_single_cluster,
min_cluster_size,
max_cluster_size,
min_samples,
):
X, y = make_blobs(
n_samples=int(nrows),
n_features=ncols,
centers=nclusters,
cluster_std=0.7,
shuffle=False,
random_state=42,
)
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
min_samples=min_samples,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
)
cuml_agg.fit(X)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_samples=min_samples,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
)
sk_agg.fit(cp.asnumpy(X))
assert_condensed_trees(sk_agg, min_cluster_size)
assert_cluster_counts(sk_agg, cuml_agg)
assert adjusted_rand_score(cuml_agg.labels_, sk_agg.labels_) >= 0.95
assert len(np.unique(sk_agg.labels_)) == len(cp.unique(cuml_agg.labels_))
assert np.allclose(
np.sort(sk_agg.cluster_persistence_),
np.sort(cuml_agg.cluster_persistence_),
rtol=0.01,
atol=0.01,
)
@pytest.mark.skipif(
cp.cuda.driver.get_build_version() <= 11020,
reason="Test failing on driver 11.2",
)
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 50.0, 150.0])
@pytest.mark.parametrize(
"min_samples_cluster_size_bounds", [(150, 150, 0), (50, 25, 0)]
)
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("connectivity", ["knn"])
def test_hdbscan_sklearn_datasets(
test_datasets,
connectivity,
cluster_selection_epsilon,
cluster_selection_method,
min_samples_cluster_size_bounds,
allow_single_cluster,
):
(
min_samples,
min_cluster_size,
max_cluster_size,
) = min_samples_cluster_size_bounds
X = test_datasets.data
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
gen_min_span_tree=True,
min_samples=min_samples,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
)
cuml_agg.fit(X)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_samples=min_samples,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
)
sk_agg.fit(cp.asnumpy(X))
assert_condensed_trees(sk_agg, min_cluster_size)
assert_cluster_counts(sk_agg, cuml_agg)
assert len(np.unique(sk_agg.labels_)) == len(cp.unique(cuml_agg.labels_))
assert adjusted_rand_score(cuml_agg.labels_, sk_agg.labels_) > 0.85
assert np.allclose(
np.sort(sk_agg.cluster_persistence_),
np.sort(cuml_agg.cluster_persistence_),
rtol=0.1,
atol=0.1,
)
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 50.0, 150.0])
@pytest.mark.parametrize("min_samples", [150, 50, 5, 400])
@pytest.mark.parametrize("min_cluster_size", [150, 25, 5, 250])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("connectivity", ["knn"])
def test_hdbscan_sklearn_extract_clusters(
test_datasets,
connectivity,
cluster_selection_epsilon,
cluster_selection_method,
min_samples,
min_cluster_size,
max_cluster_size,
allow_single_cluster,
):
X = test_datasets.data
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
gen_min_span_tree=True,
min_samples=min_samples,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_samples=min_samples,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
)
sk_agg.fit(cp.asnumpy(X))
cuml_agg._extract_clusters(sk_agg.condensed_tree_)
assert adjusted_rand_score(cuml_agg.labels_test, sk_agg.labels_) == 1.0
assert np.allclose(
cp.asnumpy(cuml_agg.probabilities_test), sk_agg.probabilities_
)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("dataset", dataset_names)
@pytest.mark.parametrize("min_samples", [15])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0])
@pytest.mark.parametrize("min_cluster_size", [25])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom"])
@pytest.mark.parametrize("connectivity", ["knn"])
def test_hdbscan_cluster_patterns(
dataset,
nrows,
connectivity,
cluster_selection_epsilon,
cluster_selection_method,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
min_samples,
):
# This also tests duplicate data points
X, y = get_pattern(dataset, nrows)[0]
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
min_samples=min_samples,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
)
cuml_agg.fit(X)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_samples=min_samples,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
)
sk_agg.fit(cp.asnumpy(X))
assert_condensed_trees(sk_agg, min_cluster_size)
assert_cluster_counts(sk_agg, cuml_agg)
assert len(np.unique(sk_agg.labels_)) == len(cp.unique(cuml_agg.labels_))
assert adjusted_rand_score(cuml_agg.labels_, sk_agg.labels_) > 0.95
assert np.allclose(
np.sort(sk_agg.cluster_persistence_),
np.sort(cuml_agg.cluster_persistence_),
rtol=0.1,
atol=0.1,
)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("dataset", dataset_names)
@pytest.mark.parametrize("min_samples", [5, 50, 400, 800])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 50.0, 150.0])
@pytest.mark.parametrize("min_cluster_size", [10, 25, 100, 350])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("connectivity", ["knn"])
def test_hdbscan_cluster_patterns_extract_clusters(
dataset,
nrows,
connectivity,
cluster_selection_epsilon,
cluster_selection_method,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
min_samples,
):
# This also tests duplicate data points
X, y = get_pattern(dataset, nrows)[0]
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
min_samples=min_samples,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_samples=min_samples,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
)
sk_agg.fit(cp.asnumpy(X))
cuml_agg._extract_clusters(sk_agg.condensed_tree_)
assert adjusted_rand_score(cuml_agg.labels_test, sk_agg.labels_) == 1.0
assert np.allclose(
cp.asnumpy(cuml_agg.probabilities_test), sk_agg.probabilities_
)
def test_hdbscan_core_dists_bug_4054():
"""
This test explicitly verifies that the MRE from
https://github.com/rapidsai/cuml/issues/4054
matches the reference impl
"""
X, y = datasets.make_moons(n_samples=10000, noise=0.12, random_state=0)
cu_labels_ = HDBSCAN(min_samples=25, min_cluster_size=25).fit_predict(X)
sk_labels_ = hdbscan.HDBSCAN(
min_samples=25, min_cluster_size=25, approx_min_span_tree=False
).fit_predict(X)
assert adjusted_rand_score(cu_labels_, sk_labels_) > 0.99
@pytest.mark.parametrize(
"metric, supported",
[("euclidean", True), ("l1", False), ("l2", True), ("abc", False)],
)
def test_hdbscan_metric_parameter_input(metric, supported):
"""
tests how valid and invalid arguments to the metric
parameter are handled
"""
X, y = make_blobs(n_samples=10000, n_features=15, random_state=12)
clf = HDBSCAN(metric=metric)
if supported:
clf.fit(X)
else:
with pytest.raises(ValueError):
clf.fit(X)
def test_hdbscan_empty_cluster_tree():
raw_tree = np.recarray(
shape=(5,),
formats=[np.intp, np.intp, float, np.intp],
names=("parent", "child", "lambda_val", "child_size"),
)
raw_tree["parent"] = np.asarray([5, 5, 5, 5, 5])
raw_tree["child"] = [0, 1, 2, 3, 4]
raw_tree["lambda_val"] = [1.0, 1.0, 1.0, 1.0, 1.0]
raw_tree["child_size"] = [1, 1, 1, 1, 1]
condensed_tree = CondensedTree(raw_tree, 0.0, True)
cuml_agg = HDBSCAN(
allow_single_cluster=True, cluster_selection_method="eom"
)
cuml_agg._extract_clusters(condensed_tree)
# We just care that all points are assigned to the root cluster
assert np.sum(cuml_agg.labels_test.to_output("numpy")) == 0
def test_hdbscan_plots():
X, y = make_blobs(
n_samples=int(100),
n_features=100,
centers=10,
cluster_std=0.7,
shuffle=False,
random_state=42,
)
cuml_agg = HDBSCAN(gen_min_span_tree=True)
cuml_agg.fit(X)
assert cuml_agg.condensed_tree_ is not None
assert cuml_agg.minimum_spanning_tree_ is not None
assert cuml_agg.single_linkage_tree_ is not None
cuml_agg = HDBSCAN(gen_min_span_tree=False)
cuml_agg.fit(X)
assert cuml_agg.minimum_spanning_tree_ is None
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("ncols", [10, 25])
@pytest.mark.parametrize("nclusters", [10, 15])
@pytest.mark.parametrize("allow_single_cluster", [False, True])
@pytest.mark.parametrize("min_cluster_size", [30, 60])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 0.5])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("batch_size", [128, 1000])
def test_all_points_membership_vectors_blobs(
nrows,
ncols,
nclusters,
cluster_selection_epsilon,
cluster_selection_method,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
batch_size,
):
X, y = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=nclusters,
cluster_std=0.7,
shuffle=True,
random_state=42,
)
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
prediction_data=True,
)
cuml_agg.fit(X)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
prediction_data=True,
)
sk_agg.fit(cp.asnumpy(X))
cu_membership_vectors = all_points_membership_vectors(cuml_agg, batch_size)
cu_membership_vectors.sort(axis=1)
sk_membership_vectors = hdbscan.all_points_membership_vectors(
sk_agg
).astype("float32")
sk_membership_vectors.sort(axis=1)
assert_membership_vectors(cu_membership_vectors, sk_membership_vectors)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("min_samples", [5, 15])
@pytest.mark.parametrize("min_cluster_size", [300, 500])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 0.5])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("connectivity", ["knn"])
@pytest.mark.parametrize("batch_size", [128, 1000])
def test_all_points_membership_vectors_moons(
nrows,
min_samples,
cluster_selection_epsilon,
cluster_selection_method,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
connectivity,
batch_size,
):
X, y = datasets.make_moons(n_samples=nrows, noise=0.05, random_state=42)
cuml_agg = HDBSCAN(
verbose=logger.level_info,
min_samples=min_samples,
allow_single_cluster=allow_single_cluster,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
prediction_data=True,
)
cuml_agg.fit(X)
sk_agg = hdbscan.HDBSCAN(
min_samples=min_samples,
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
prediction_data=True,
)
sk_agg.fit(X)
cu_membership_vectors = all_points_membership_vectors(cuml_agg, batch_size)
sk_membership_vectors = hdbscan.all_points_membership_vectors(
sk_agg
).astype("float32")
assert_membership_vectors(cu_membership_vectors, sk_membership_vectors)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("min_samples", [5, 15])
@pytest.mark.parametrize("min_cluster_size", [300, 500])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 0.5])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("connectivity", ["knn"])
@pytest.mark.parametrize("batch_size", [128, 1000])
def test_all_points_membership_vectors_circles(
nrows,
min_samples,
cluster_selection_epsilon,
cluster_selection_method,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
connectivity,
batch_size,
):
X, y = datasets.make_circles(
n_samples=nrows, factor=0.5, noise=0.05, random_state=42
)
cuml_agg = HDBSCAN(
verbose=logger.level_info,
min_samples=min_samples,
allow_single_cluster=allow_single_cluster,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
prediction_data=True,
)
cuml_agg.fit(X)
sk_agg = hdbscan.HDBSCAN(
min_samples=min_samples,
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
prediction_data=True,
)
sk_agg.fit(X)
cu_membership_vectors = all_points_membership_vectors(cuml_agg, batch_size)
sk_membership_vectors = hdbscan.all_points_membership_vectors(
sk_agg
).astype("float32")
assert_membership_vectors(cu_membership_vectors, sk_membership_vectors)
@pytest.mark.skipif(
cp.cuda.driver.get_build_version() <= 11020,
reason="Test failing on driver 11.2",
)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("n_points_to_predict", [200, 500])
@pytest.mark.parametrize("ncols", [10, 25])
@pytest.mark.parametrize("nclusters", [15])
@pytest.mark.parametrize("min_cluster_size", [30, 60])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 0.5])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
def test_approximate_predict_blobs(
nrows,
n_points_to_predict,
ncols,
nclusters,
cluster_selection_epsilon,
cluster_selection_method,
min_cluster_size,
max_cluster_size,
allow_single_cluster,
):
X, y = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=nclusters,
cluster_std=0.7,
shuffle=True,
random_state=42,
)
points_to_predict, _ = make_blobs(
n_samples=n_points_to_predict,
n_features=ncols,
centers=nclusters,
cluster_std=0.7,
shuffle=True,
random_state=42,
)
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
prediction_data=True,
)
cuml_agg.fit(X)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
prediction_data=True,
)
sk_agg.fit(cp.asnumpy(X))
cu_labels, cu_probs = approximate_predict(cuml_agg, points_to_predict)
sk_labels, sk_probs = hdbscan.approximate_predict(
sk_agg, points_to_predict
)
assert adjusted_rand_score(cu_labels, sk_labels) >= 0.95
assert np.allclose(cu_probs, sk_probs, atol=0.05)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("n_points_to_predict", [50])
@pytest.mark.parametrize("min_samples", [15, 30])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 0.5])
@pytest.mark.parametrize("min_cluster_size", [5, 15])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("connectivity", ["knn"])
def test_approximate_predict_moons(
nrows,
n_points_to_predict,
min_samples,
cluster_selection_epsilon,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
cluster_selection_method,
connectivity,
):
X, y = datasets.make_moons(
n_samples=nrows + n_points_to_predict, noise=0.05, random_state=42
)
X_train = X[:nrows]
X_test = X[nrows:]
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
min_samples=min_samples,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
prediction_data=True,
)
cuml_agg.fit(X_train)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_samples=min_samples,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
prediction_data=True,
)
sk_agg.fit(cp.asnumpy(X_train))
cu_labels, cu_probs = approximate_predict(cuml_agg, X_test)
sk_labels, sk_probs = hdbscan.approximate_predict(sk_agg, X_test)
sk_unique = np.unique(sk_labels)
cu_unique = np.unique(cu_labels)
if len(sk_unique) == len(cu_unique):
assert adjusted_rand_score(cu_labels, sk_labels) >= 0.99
assert array_equal(cu_probs, sk_probs, unit_tol=0.05, total_tol=0.005)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("n_points_to_predict", [50])
@pytest.mark.parametrize("min_samples", [5, 15])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 0.5])
@pytest.mark.parametrize("min_cluster_size", [50, 100])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("connectivity", ["knn"])
def test_approximate_predict_circles(
nrows,
n_points_to_predict,
min_samples,
cluster_selection_epsilon,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
cluster_selection_method,
connectivity,
):
X, y = datasets.make_circles(
n_samples=nrows + n_points_to_predict,
factor=0.8,
noise=0.05,
random_state=42,
)
X_train = X[:nrows]
X_test = X[nrows:]
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
min_samples=min_samples,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
prediction_data=True,
)
cuml_agg.fit(X_train)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_samples=min_samples,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
prediction_data=True,
)
sk_agg.fit(cp.asnumpy(X_train))
cu_labels, cu_probs = approximate_predict(cuml_agg, X_test)
sk_labels, sk_probs = hdbscan.approximate_predict(sk_agg, X_test)
sk_unique = np.unique(sk_labels)
cu_unique = np.unique(cu_labels)
if len(sk_unique) == len(cu_unique):
assert adjusted_rand_score(cu_labels, sk_labels) >= 0.99
assert array_equal(cu_probs, sk_probs, unit_tol=0.05, total_tol=0.005)
@pytest.mark.parametrize("n_points_to_predict", [200])
@pytest.mark.parametrize("min_samples", [15])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.5])
@pytest.mark.parametrize("min_cluster_size", [100])
@pytest.mark.parametrize("allow_single_cluster", [False])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom"])
@pytest.mark.parametrize("connectivity", ["knn"])
def test_approximate_predict_digits(
n_points_to_predict,
min_samples,
cluster_selection_epsilon,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
cluster_selection_method,
connectivity,
):
digits = datasets.load_digits()
X, y = digits.data, digits.target
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=n_points_to_predict,
train_size=X.shape[0] - n_points_to_predict,
random_state=42,
shuffle=True,
stratify=y,
)
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
min_samples=min_samples,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
prediction_data=True,
)
cuml_agg.fit(X_train)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_samples=min_samples,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
prediction_data=True,
)
sk_agg.fit(X_train)
cu_labels, cu_probs = approximate_predict(cuml_agg, X_test)
sk_labels, sk_probs = hdbscan.approximate_predict(sk_agg, X_test)
assert adjusted_rand_score(cu_labels, sk_labels) >= 0.98
assert array_equal(cu_probs, sk_probs, unit_tol=0.001, total_tol=0.006)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("n_points_to_predict", [200, 500])
@pytest.mark.parametrize("ncols", [10, 25])
@pytest.mark.parametrize("nclusters", [15])
@pytest.mark.parametrize("min_cluster_size", [30, 60])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 0.5])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("batch_size", [128])
def test_membership_vector_blobs(
nrows,
n_points_to_predict,
ncols,
nclusters,
cluster_selection_epsilon,
cluster_selection_method,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
batch_size,
):
X, y = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=nclusters,
cluster_std=0.7,
shuffle=True,
random_state=42,
)
points_to_predict, _ = make_blobs(
n_samples=n_points_to_predict,
n_features=ncols,
centers=nclusters,
cluster_std=0.7,
shuffle=True,
random_state=42,
)
cuml_agg = HDBSCAN(
verbose=logger.level_info,
allow_single_cluster=allow_single_cluster,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
prediction_data=True,
)
cuml_agg.fit(X)
sk_agg = hdbscan.HDBSCAN(
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
prediction_data=True,
)
sk_agg.fit(cp.asnumpy(X))
cu_membership_vectors = membership_vector(
cuml_agg, points_to_predict, batch_size
)
cu_membership_vectors.sort(axis=1)
sk_membership_vectors = hdbscan.membership_vector(
sk_agg,
points_to_predict,
).astype("float32")
sk_membership_vectors.sort(axis=1)
assert_membership_vectors(cu_membership_vectors, sk_membership_vectors)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("n_points_to_predict", [50])
@pytest.mark.parametrize("min_samples", [5, 15])
@pytest.mark.parametrize("min_cluster_size", [300, 500])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 0.5])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("connectivity", ["knn"])
@pytest.mark.parametrize("batch_size", [16])
def test_membership_vector_moons(
nrows,
n_points_to_predict,
min_samples,
cluster_selection_epsilon,
cluster_selection_method,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
connectivity,
batch_size,
):
X, y = datasets.make_moons(
n_samples=nrows + n_points_to_predict, noise=0.05, random_state=42
)
X_train = X[:nrows]
X_test = X[nrows:]
cuml_agg = HDBSCAN(
verbose=logger.level_info,
min_samples=min_samples,
allow_single_cluster=allow_single_cluster,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
prediction_data=True,
)
cuml_agg.fit(X_train)
sk_agg = hdbscan.HDBSCAN(
min_samples=min_samples,
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
prediction_data=True,
)
sk_agg.fit(X_train)
cu_membership_vectors = membership_vector(cuml_agg, X_test, batch_size)
sk_membership_vectors = hdbscan.membership_vector(sk_agg, X_test).astype(
"float32"
)
assert_membership_vectors(cu_membership_vectors, sk_membership_vectors)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("n_points_to_predict", [50])
@pytest.mark.parametrize("min_samples", [20, 30])
@pytest.mark.parametrize("min_cluster_size", [100, 150])
@pytest.mark.parametrize("cluster_selection_epsilon", [0.0, 0.5])
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("max_cluster_size", [0])
@pytest.mark.parametrize("cluster_selection_method", ["eom", "leaf"])
@pytest.mark.parametrize("connectivity", ["knn"])
@pytest.mark.parametrize("batch_size", [16])
def test_membership_vector_circles(
nrows,
n_points_to_predict,
min_samples,
cluster_selection_epsilon,
cluster_selection_method,
min_cluster_size,
allow_single_cluster,
max_cluster_size,
connectivity,
batch_size,
):
X, y = datasets.make_circles(
n_samples=nrows + n_points_to_predict,
factor=0.8,
noise=0.05,
random_state=42,
)
X_train = X[:nrows]
X_test = X[nrows:]
cuml_agg = HDBSCAN(
verbose=logger.level_info,
min_samples=min_samples,
allow_single_cluster=allow_single_cluster,
max_cluster_size=max_cluster_size,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
prediction_data=True,
)
cuml_agg.fit(X_train)
sk_agg = hdbscan.HDBSCAN(
min_samples=min_samples,
allow_single_cluster=allow_single_cluster,
approx_min_span_tree=False,
gen_min_span_tree=True,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
algorithm="generic",
prediction_data=True,
)
sk_agg.fit(X_train)
cu_membership_vectors = membership_vector(cuml_agg, X_test, batch_size)
sk_membership_vectors = hdbscan.membership_vector(sk_agg, X_test).astype(
"float32"
)
assert_membership_vectors(cu_membership_vectors, sk_membership_vectors)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_batched_lbfgs.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.tsa.batched_lbfgs import batched_fmin_lbfgs_b
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
def rosenbrock(x, a=1, b=100):
"""Famous Rosenbrock example"""
return (a - x[0]) ** 2 + b * (x[1] - x[0] ** 2) ** 2
def g_rosenbrock(x, a=1, b=100):
"""Gradietn of rosenbrock example"""
g = np.array(
[
-2 * a - 4 * b * x[0] * (-x[0] ** 2 + x[1]) + 2 * x[0],
b * (-2 * x[0] ** 2 + 2 * x[1]),
]
)
return g
def batched_rosenbrock(
x: np.ndarray, num_batches: int, a: np.ndarray, b: np.ndarray
) -> np.ndarray:
"""A batched version of the rosenbrock example"""
fall = np.zeros(num_batches)
for i in range(num_batches):
fall[i] = rosenbrock(x[i * 2 : (i + 1) * 2], a[i], b[i])
return fall
def g_batched_rosenbrock(
x: np.ndarray, num_batches: int, a: np.ndarray, b: np.ndarray
) -> np.ndarray:
"""Gradient of the batched rosenbrock example."""
gall = np.zeros(2 * num_batches)
for i in range(num_batches):
gall[i * 2 : (i + 1) * 2] = g_rosenbrock(
x[i * 2 : (i + 1) * 2], a[i], b[i]
)
return gall
def test_batched_lbfgs_rosenbrock():
"""Test batched rosenbrock using batched lbfgs implemtnation"""
num_batches = 5
np.random.seed(42)
a = np.random.normal(1, scale=0.1, size=num_batches)
b = np.random.normal(100, scale=10, size=num_batches)
def f(x, n=None):
nonlocal a
nonlocal b
nonlocal num_batches
if n is not None:
return rosenbrock(x, a[n], b[n])
fb = batched_rosenbrock(x, num_batches, a, b)
return fb
def gf(x, n=None):
nonlocal a
nonlocal b
nonlocal num_batches
if n is not None:
return g_rosenbrock(x, a[n], b[n])
g = g_batched_rosenbrock(x, num_batches, a, b)
return g
x0 = np.zeros(2 * num_batches)
x0[0] = 0.0
x0[1] = 0.0
# analytical minimum
res_true = np.zeros(num_batches * 2)
for i in range(num_batches):
res_true[i * 2 : (i + 1) * 2] = np.array([a[i], a[i] ** 2])
# our new batch-aware l-bfgs optimizer
res_xk, _, _ = batched_fmin_lbfgs_b(
f, x0, num_batches, gf, iprint=-1, factr=100
)
np.testing.assert_allclose(res_xk, res_true, rtol=1e-5)
if __name__ == "__main__":
test_batched_lbfgs_rosenbrock()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_target_encoder.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from cuml.testing.utils import array_equal
from cuml.internals.safe_imports import cpu_only_import
from cuml.preprocessing.TargetEncoder import TargetEncoder
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
pandas = cpu_only_import("pandas")
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
def test_targetencoder_fit_transform():
train = cudf.DataFrame(
{"category": ["a", "b", "b", "a"], "label": [1, 0, 1, 1]}
)
encoder = TargetEncoder()
train_encoded = encoder.fit_transform(train.category, train.label)
answer = np.array([1.0, 1.0, 0.0, 1.0])
assert array_equal(train_encoded, answer)
encoder = TargetEncoder()
encoder.fit(train.category, train.label)
train_encoded = encoder.transform(train.category)
assert array_equal(train_encoded, answer)
def test_targetencoder_transform():
train = cudf.DataFrame(
{"category": ["a", "b", "b", "a"], "label": [1, 0, 1, 1]}
)
test = cudf.DataFrame({"category": ["b", "b", "a", "b"]})
encoder = TargetEncoder()
encoder.fit_transform(train.category, train.label)
test_encoded = encoder.transform(test.category)
answer = np.array([0.5, 0.5, 1.0, 0.5])
assert array_equal(test_encoded, answer)
encoder = TargetEncoder()
encoder.fit(train.category, train.label)
test_encoded = encoder.transform(test.category)
assert array_equal(test_encoded, answer)
@pytest.mark.parametrize("n_samples", [5000, 500000])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("stat", ["mean", "var", "median"])
def test_targetencoder_random(n_samples, dtype, stat):
x = cp.random.randint(0, 1000, n_samples).astype(dtype)
y = cp.random.randint(0, 2, n_samples).astype(dtype)
xt = cp.random.randint(0, 1000, n_samples).astype(dtype)
encoder = TargetEncoder(stat=stat)
encoder.fit_transform(x, y)
test_encoded = encoder.transform(xt)
df_train = cudf.DataFrame({"x": x, "y": y})
dg = df_train.groupby("x", as_index=False).agg({"y": stat})
df_test = cudf.DataFrame({"x": xt})
df_test["row_id"] = cp.arange(len(df_test))
df_test = df_test.merge(dg, on="x", how="left")
df_test = df_test.sort_values("row_id")
answer = df_test["y"].fillna(eval(f"cp.{stat}")(y).item()).values
assert array_equal(test_encoded, answer)
def test_targetencoder_multi_column():
"""
Test jointly encoding multiple columns
"""
train = cudf.DataFrame(
{
"cat_1": ["a", "b", "b", "a", "a", "b"],
"cat_2": [1, 1, 2, 2, 1, 2],
"label": [1, 0, 1, 1, 0, 1],
}
)
test = cudf.DataFrame(
{"cat_1": ["b", "b", "a", "b"], "cat_2": [1, 2, 1, 2]}
)
encoder = TargetEncoder()
train_encoded = encoder.fit_transform(
train[["cat_1", "cat_2"]], train.label
)
test_encoded = encoder.transform(test[["cat_1", "cat_2"]])
train_answer = np.array([2.0 / 3, 2.0 / 3, 1.0, 2.0 / 3, 2.0 / 3, 1.0])
test_answer = np.array([0.0, 1.0, 0.5, 1.0])
assert array_equal(train_encoded, train_answer)
assert array_equal(test_encoded, test_answer)
encoder = TargetEncoder()
encoder.fit(train[["cat_1", "cat_2"]], train.label)
train_encoded = encoder.transform(train[["cat_1", "cat_2"]])
test_encoded = encoder.transform(test[["cat_1", "cat_2"]])
assert array_equal(train_encoded, train_answer)
assert array_equal(test_encoded, test_answer)
def test_targetencoder_newly_encountered():
"""
Note that there are newly-encountered values in test,
namely, 'c' and 'd'.
"""
train = cudf.DataFrame(
{"category": ["a", "b", "b", "a"], "label": [1, 0, 1, 1]}
)
test = cudf.DataFrame({"category": ["c", "b", "a", "d"]})
encoder = TargetEncoder()
encoder.fit_transform(train.category, train.label)
test_encoded = encoder.transform(test.category)
answer = np.array([0.75, 0.5, 1.0, 0.75])
assert array_equal(test_encoded, answer)
encoder = TargetEncoder()
encoder.fit(train.category, train.label)
test_encoded = encoder.transform(test.category)
assert array_equal(test_encoded, answer)
def test_one_category():
train = cudf.DataFrame(
{"category": ["a", "a", "a", "a"], "label": [3, 0, 0, 3]}
)
test = cudf.DataFrame({"category": ["c", "b", "a", "d"]})
encoder = TargetEncoder()
train_encoded = encoder.fit_transform(train.category, train.label)
answer = np.array([1.0, 2.0, 2.0, 1.0])
assert array_equal(train_encoded, answer)
test_encoded = encoder.transform(test.category)
answer = np.array([1.5, 1.5, 1.5, 1.5])
assert array_equal(test_encoded, answer)
def test_targetencoder_pandas():
"""
Note that there are newly-encountered values in test,
namely, 'c' and 'd'.
"""
train = pandas.DataFrame(
{"category": ["a", "b", "b", "a"], "label": [1, 0, 1, 1]}
)
test = pandas.DataFrame({"category": ["c", "b", "a", "d"]})
encoder = TargetEncoder()
encoder.fit_transform(train.category, train.label)
test_encoded = encoder.transform(test.category)
answer = np.array([0.75, 0.5, 1.0, 0.75])
assert array_equal(test_encoded, answer)
print(type(test_encoded))
assert isinstance(test_encoded, np.ndarray)
def test_targetencoder_numpy():
"""
Note that there are newly-encountered values in x_test,
namely, 3 and 4.
"""
x_train = np.array([1, 2, 2, 1])
y_train = np.array([1, 0, 1, 1])
x_test = np.array([1, 2, 3, 4])
encoder = TargetEncoder()
encoder.fit_transform(x_train, y_train)
test_encoded = encoder.transform(x_test)
answer = np.array([1.0, 0.5, 0.75, 0.75])
assert array_equal(test_encoded, answer)
print(type(test_encoded))
assert isinstance(test_encoded, np.ndarray)
def test_targetencoder_cupy():
"""
Note that there are newly-encountered values in x_test,
namely, 3 and 4.
"""
x_train = cp.array([1, 2, 2, 1])
y_train = cp.array([1, 0, 1, 1])
x_test = cp.array([1, 2, 3, 4])
encoder = TargetEncoder()
encoder.fit_transform(x_train, y_train)
test_encoded = encoder.transform(x_test)
answer = np.array([1.0, 0.5, 0.75, 0.75])
assert array_equal(test_encoded, answer)
print(type(test_encoded))
assert isinstance(test_encoded, cp.ndarray)
def test_targetencoder_smooth():
train = cudf.DataFrame(
{"category": ["a", "b", "b", "a"], "label": [1, 0, 1, 1]}
)
answers = np.array(
[
[1.0, 1.0, 0.0, 1.0],
[0.875, 0.875, 0.375, 0.875],
[0.8333, 0.8333, 0.5, 0.8333],
[0.75, 0.75, 0.75, 0.75],
]
)
smooths = [0, 1, 2, 10000]
for smooth, answer in zip(smooths, answers):
encoder = TargetEncoder(smooth=smooth)
train_encoded = encoder.fit_transform(train.category, train.label)
assert array_equal(train_encoded, answer)
encoder = TargetEncoder(smooth=smooth)
encoder.fit(train.category, train.label)
train_encoded = encoder.transform(train.category)
assert array_equal(train_encoded, answer)
def test_targetencoder_customized_fold_id():
"""
use customized `fold_ids` array to split data.
in this example, the 1st sample belongs to `fold 0`
the 2nd and 3rd sample belongs to `fold 1`
and the 4th sample belongs to `fold 2`
"""
train = cudf.DataFrame(
{"category": ["a", "b", "b", "a"], "label": [1, 0, 1, 1]}
)
fold_ids = [0, 1, 1, 2]
encoder = TargetEncoder(split_method="customize")
train_encoded = encoder.fit_transform(
train.category, train.label, fold_ids=fold_ids
)
answer = np.array([1.0, 0.75, 0.75, 1.0])
assert array_equal(train_encoded, answer)
encoder = TargetEncoder(split_method="customize")
encoder.fit(train.category, train.label, fold_ids=fold_ids)
train_encoded = encoder.transform(train.category)
assert array_equal(train_encoded, answer)
def test_targetencoder_var():
train = cudf.DataFrame(
{"category": ["a", "b", "b", "b"], "label": [1, 0, 1, 1]}
)
encoder = TargetEncoder(stat="var")
train_encoded = encoder.fit_transform(train.category, train.label)
answer = np.array([0.25, 0.0, 0.5, 0.5])
assert array_equal(train_encoded, answer)
encoder = TargetEncoder(stat="var")
encoder.fit(train.category, train.label)
train_encoded = encoder.transform(train.category)
assert array_equal(train_encoded, answer)
def test_transform_with_index():
df = cudf.DataFrame(
{"a": [1, 1, 2, 3], "b": [True, False, False, True]},
index=[9, 4, 5, 3],
)
t_enc = TargetEncoder()
t_enc.fit(df.a, y=df.b)
train_encoded = t_enc.transform(df.a)
ans = cp.asarray([0, 1, 0.5, 0.5])
assert array_equal(train_encoded, ans)
train_encoded = t_enc.transform(df[["a"]])
assert array_equal(train_encoded, ans)
def test_get_params():
params = {
"n_folds": 5,
"smooth": 1,
"seed": 49,
"split_method": "customize",
}
encoder = TargetEncoder(**params)
p2 = encoder.get_params()
for k, v in params.items():
assert v == p2[k]
def test_targetencoder_median():
train = cudf.DataFrame(
{
"category": ["a", "a", "a", "a", "b", "b", "b", "b"],
"label": [1, 22, 15, 17, 70, 9, 99, 56],
}
)
encoder = TargetEncoder(stat="median")
train_encoded = encoder.fit_transform(train.category, train.label)
answer = np.array([17.0, 15.0, 17.0, 15.0, 56.0, 70.0, 56.0, 70.0])
assert array_equal(train_encoded, answer)
encoder = TargetEncoder(stat="median")
encoder.fit(train.category, train.label)
train_encoded = encoder.transform(train.category)
assert array_equal(train_encoded, answer)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_module_config.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from numba.cuda import is_cuda_array, as_cuda_array
from cuml.internals.safe_imports import cpu_only_import
import cuml
import pytest
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
###############################################################################
# Parameters #
###############################################################################
global_input_configs = ["numpy", "numba", "cupy", "cudf"]
global_input_types = ["numpy", "numba", "cupy", "cudf", "pandas"]
test_output_types = {
"numpy": np.ndarray,
"cupy": cp.ndarray,
"cudf": cudf.Series,
"pandas": pd.Series,
}
@pytest.fixture(scope="function", params=global_input_configs)
def global_output_type(request):
output_type = request.param
yield output_type
# Ensure we reset the type at the end of the test
cuml.set_global_output_type(None)
###############################################################################
# Tests #
###############################################################################
@pytest.mark.parametrize("input_type", global_input_types)
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_default_global_output_type(input_type):
dataset = get_small_dataset(input_type)
dbscan_float = cuml.DBSCAN(eps=1.0, min_samples=1)
dbscan_float.fit(dataset)
res = dbscan_float.labels_
if input_type == "numba":
assert is_cuda_array(res)
else:
assert isinstance(res, test_output_types[input_type])
@pytest.mark.parametrize("input_type", global_input_types)
def test_global_output_type(global_output_type, input_type):
dataset = get_small_dataset(input_type)
cuml.set_global_output_type(global_output_type)
dbscan_float = cuml.DBSCAN(eps=1.0, min_samples=1)
dbscan_float.fit(dataset)
res = dbscan_float.labels_
if global_output_type == "numba":
assert is_cuda_array(res)
else:
assert isinstance(res, test_output_types[global_output_type])
@pytest.mark.parametrize("context_type", global_input_configs)
def test_output_type_context_mgr(global_output_type, context_type):
dataset = get_small_dataset("numba")
test_type = "cupy" if global_output_type != "cupy" else "numpy"
cuml.set_global_output_type(test_type)
# use cuml context manager
with cuml.using_output_type(context_type):
dbscan_float = cuml.DBSCAN(eps=1.0, min_samples=1)
dbscan_float.fit(dataset)
res = dbscan_float.labels_
if context_type == "numba":
assert is_cuda_array(res)
else:
assert isinstance(res, test_output_types[context_type])
# use cuml again outside the context manager
dbscan_float = cuml.DBSCAN(eps=1.0, min_samples=1)
dbscan_float.fit(dataset)
res = dbscan_float.labels_
assert isinstance(res, test_output_types[test_type])
###############################################################################
# Utility Functions #
###############################################################################
def get_small_dataset(output_type):
ary = [[1.0, 4.0, 4.0], [2.0, 2.0, 2.0], [5.0, 1.0, 1.0]]
ary = cp.asarray(ary)
if output_type == "numba":
return as_cuda_array(ary)
elif output_type == "cupy":
return ary
elif output_type == "numpy":
return cp.asnumpy(ary)
elif output_type == "pandas":
return cudf.DataFrame(ary).to_pandas()
else:
return cudf.DataFrame(ary)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_arima.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
# How these tests work #
###############################################################################
#
# This test file contains some unit tests and an integration test.
#
# The units tests use the same parameters with cuML and the reference
# implementation to compare strict parity of specific components.
#
# The integration tests compare that, when fitting and forecasting separately,
# our implementation performs better or approximately as good as the reference
# (it mostly serves to test that we don't have any regression)
#
# Note that there are significant differences between our implementation and
# the reference, and perfect parity cannot be expected for integration tests.
from cuml.testing.utils import stress_param
from cuml.internals.input_utils import input_to_host_array
import cuml.tsa.arima as arima
from cuml.internals.safe_imports import gpu_only_import
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from cuml.internals.safe_imports import cpu_only_import_from
import warnings
import os
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
approx_fprime = cpu_only_import_from("scipy.optimize", "approx_fprime")
cudf = gpu_only_import("cudf")
###############################################################################
# Test data #
###############################################################################
class ARIMAData:
"""Contains a dataset name and associated metadata"""
def __init__(
self,
batch_size,
n_obs,
n_test,
dataset,
tolerance_integration,
n_exog=0,
dataset_exog=None,
):
self.batch_size = batch_size
self.n_obs = n_obs
self.n_test = n_test
self.dataset = dataset
self.tolerance_integration = tolerance_integration
self.n_exog = n_exog
self.dataset_exog = dataset_exog
self.n_train = n_obs - n_test
# ARIMA(1,0,1) with intercept
test_101c = ARIMAData(
batch_size=8,
n_obs=15,
n_test=2,
dataset="long_term_arrivals_by_citizenship",
tolerance_integration=0.01,
)
# ARIMA(0,0,2) with intercept
test_002c = ARIMAData(
batch_size=7,
n_obs=20,
n_test=2,
dataset="net_migrations_auckland_by_age",
tolerance_integration=0.05,
)
# ARIMA(0,1,0) with intercept
test_010c = ARIMAData(
batch_size=4,
n_obs=17,
n_test=2,
dataset="cattle",
tolerance_integration=0.01,
)
# ARIMA(1,1,0)
test_110 = ARIMAData(
batch_size=1,
n_obs=137,
n_test=5,
dataset="police_recorded_crime",
tolerance_integration=0.01,
)
# ARIMA(0,1,1) with intercept
test_011c = ARIMAData(
batch_size=16,
n_obs=28,
n_test=2,
dataset="deaths_by_region",
tolerance_integration=0.05,
)
# ARIMA(0,1,1) with intercept (exogenous variables)
test_011c_exog = ARIMAData(
batch_size=16,
n_obs=28,
n_test=2,
dataset="endog_deaths_by_region_exog",
tolerance_integration=0.05,
n_exog=2,
dataset_exog="exog_deaths_by_region_exog",
)
# ARIMA(1,2,1) with intercept
test_121c = ARIMAData(
batch_size=2,
n_obs=137,
n_test=10,
dataset="population_estimate",
tolerance_integration=0.01,
)
# ARIMA(1,1,1) with intercept (missing observations)
test_111c_missing = ARIMAData(
batch_size=2,
n_obs=137,
n_test=10,
dataset="population_estimate_missing",
tolerance_integration=0.01,
)
# ARIMA(1,0,1)(1,1,1)_4
test_101_111_4 = ARIMAData(
batch_size=3,
n_obs=101,
n_test=10,
dataset="alcohol",
tolerance_integration=0.01,
)
# ARIMA(5,1,0)
test_510 = ARIMAData(
batch_size=3,
n_obs=101,
n_test=10,
dataset="alcohol",
tolerance_integration=0.02,
)
# ARIMA(1,1,1)(2,0,0)_4 with intercept
test_111_200_4c = ARIMAData(
batch_size=14,
n_obs=123,
n_test=10,
dataset="hourly_earnings_by_industry",
tolerance_integration=0.01,
)
# ARIMA(1,1,1)(2,0,0)_4 with intercept (missing observations)
test_111_200_4c_missing = ARIMAData(
batch_size=14,
n_obs=123,
n_test=10,
dataset="hourly_earnings_by_industry_missing",
tolerance_integration=0.01,
)
# ARIMA(1,1,1)(2,0,0)_4 with intercept
# (missing observations and exogenous variables)
test_111_200_4c_missing_exog = ARIMAData(
batch_size=14,
n_obs=123,
n_test=10,
dataset="endog_hourly_earnings_by_industry_missing_exog",
tolerance_integration=0.01,
n_exog=2,
dataset_exog="exog_hourly_earnings_by_industry_missing_exog",
)
# ARIMA(1,1,2)(0,1,2)_4
test_112_012_4 = ARIMAData(
batch_size=2,
n_obs=179,
n_test=10,
dataset="passenger_movements",
tolerance_integration=0.001,
)
# ARIMA(1,1,1)(1,1,1)_12
test_111_111_12 = ARIMAData(
batch_size=12,
n_obs=279,
n_test=20,
dataset="guest_nights_by_region",
tolerance_integration=0.001,
)
# ARIMA(1,1,1)(1,1,1)_12 (missing observations)
test_111_111_12_missing = ARIMAData(
batch_size=12,
n_obs=279,
n_test=20,
dataset="guest_nights_by_region_missing",
tolerance_integration=0.03,
)
# ARIMA(1,1,1)(1,1,1)_12 (missing obs, exogenous variables, intercept)
test_111_111_12c_missing_exog = ARIMAData(
batch_size=12,
n_obs=279,
n_test=20,
dataset="endog_guest_nights_by_region_missing_exog",
tolerance_integration=0.001,
n_exog=2,
dataset_exog="exog_guest_nights_by_region_missing_exog",
)
# Dictionary matching a test case to a tuple of model parameters
# (a test case could be used with different models)
# (p, d, q, P, D, Q, s, k) -> ARIMAData
test_data = [
# ((1, 0, 1, 0, 0, 0, 0, 1), test_101c),
((0, 0, 2, 0, 0, 0, 0, 1), test_002c),
((0, 1, 0, 0, 0, 0, 0, 1), test_010c),
((1, 1, 0, 0, 0, 0, 0, 0), test_110),
((0, 1, 1, 0, 0, 0, 0, 1), test_011c),
((0, 1, 1, 0, 0, 0, 0, 1), test_011c_exog),
((1, 2, 1, 0, 0, 0, 0, 1), test_121c),
((1, 1, 1, 0, 0, 0, 0, 1), test_111c_missing),
((1, 0, 1, 1, 1, 1, 4, 0), test_101_111_4),
((5, 1, 0, 0, 0, 0, 0, 0), test_510),
((1, 1, 1, 2, 0, 0, 4, 1), test_111_200_4c),
((1, 1, 1, 2, 0, 0, 4, 1), test_111_200_4c_missing),
((1, 1, 1, 2, 0, 0, 4, 1), test_111_200_4c_missing_exog),
((1, 1, 2, 0, 1, 2, 4, 0), test_112_012_4),
stress_param((1, 1, 1, 1, 1, 1, 12, 0), test_111_111_12),
stress_param((1, 1, 1, 1, 1, 1, 12, 0), test_111_111_12_missing),
stress_param((1, 0, 1, 1, 1, 1, 12, 1), test_111_111_12c_missing_exog),
]
# Dictionary for lazy-loading of datasets
# (name, dtype) -> (pandas dataframe, cuDF dataframe)
lazy_data = {}
# Dictionary for lazy-evaluation of reference fits
# (p, d, q, P, D, Q, s, k, name, dtype) -> SARIMAXResults
lazy_ref_fit = {}
def extract_order(tup):
"""Extract the order from a tuple of parameters"""
p, d, q, P, D, Q, s, k = tup
return (p, d, q), (P, D, Q, s), k
data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "ts_datasets"
)
def get_dataset(data, dtype):
"""Load a dataset with a given dtype or return a previously loaded dataset"""
key = (data.dataset, np.dtype(dtype).name)
if key not in lazy_data:
y = pd.read_csv(
os.path.join(data_path, "{}.csv".format(data.dataset)),
usecols=range(1, data.batch_size + 1),
dtype=dtype,
)
y_train, y_test = train_test_split(
y, test_size=data.n_test, shuffle=False
)
y_train_cudf = cudf.from_pandas(y_train).fillna(np.nan)
y_test_cudf = cudf.from_pandas(y_test)
if data.dataset_exog is not None:
exog = pd.read_csv(
os.path.join(data_path, "{}.csv".format(data.dataset_exog)),
usecols=range(1, data.n_exog * data.batch_size + 1),
dtype=dtype,
)
exog_past, exog_fut = train_test_split(
exog, test_size=data.n_test, shuffle=False
)
exog_past_cudf = cudf.from_pandas(exog_past).fillna(np.nan)
exog_fut_cudf = cudf.from_pandas(exog_fut)
else:
exog_past, exog_past_cudf, exog_fut, exog_fut_cudf = [None] * 4
lazy_data[key] = (
y_train,
y_train_cudf,
y_test,
y_test_cudf,
exog_past,
exog_past_cudf,
exog_fut,
exog_fut_cudf,
)
return lazy_data[key]
def get_ref_fit(data, order, seasonal_order, intercept, dtype):
"""Compute a reference fit of a dataset with the given parameters and dtype
or return a previously computed fit
"""
y_train, _, _, _, exog_past, *_ = get_dataset(data, dtype)
key = (
order
+ seasonal_order
+ (intercept, data.dataset, np.dtype(dtype).name)
)
batch_size = y_train.shape[1]
if key not in lazy_ref_fit:
ref_model = [
sm.tsa.SARIMAX(
endog=y_train[y_train.columns[i]],
exog=exog_past[
exog_past.columns[data.n_exog * i : data.n_exog * (i + 1)]
]
if exog_past is not None
else None,
order=order,
seasonal_order=seasonal_order,
trend="c" if intercept else "n",
)
for i in range(batch_size)
]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
lazy_ref_fit[key] = [model.fit(disp=0) for model in ref_model]
return lazy_ref_fit[key]
###############################################################################
# Utility functions #
###############################################################################
def mase(y_train, y_test, y_fc, s):
y_train_np = input_to_host_array(y_train).array
y_test_np = input_to_host_array(y_test).array
y_fc_np = input_to_host_array(y_fc).array
diff = np.abs(y_train_np[s:] - y_train_np[:-s])
scale = np.nanmean(diff, axis=0)
error = np.abs(y_fc_np - y_test_np).mean(axis=0)
return np.mean(error / scale)
def fill_interpolation(df_in):
np_arr = df_in.to_numpy()
for ib in range(np_arr.shape[1]):
n = len(np_arr)
start, end = -1, 0
while start < n - 1:
if not np.isnan(np_arr[start + 1, ib]):
start += 1
end = start + 1
elif end < n and np.isnan(np_arr[end, ib]):
end += 1
else:
if start == -1:
np_arr[:end, ib] = np_arr[end, ib]
elif end == n:
np_arr[start + 1 :, ib] = np_arr[start, ib]
else:
for j in range(start + 1, end):
coef = (j - start) / (end - start)
np_arr[j, ib] = (1.0 - coef) * np_arr[
start, ib
] + coef * np_arr[end, ib]
start = end
end = start + 1
return pd.DataFrame(np_arr, columns=df_in.columns)
###############################################################################
# Tests #
###############################################################################
@pytest.mark.parametrize("key, data", test_data)
@pytest.mark.parametrize("dtype", [np.float64])
def test_integration(key, data, dtype):
"""Full integration test: estimate, fit, forecast"""
order, seasonal_order, intercept = extract_order(key)
s = max(1, seasonal_order[3])
(
y_train,
y_train_cudf,
y_test,
_,
_,
exog_past_cudf,
exog_fut,
exog_fut_cudf,
) = get_dataset(data, dtype)
# Get fit reference model
ref_fits = get_ref_fit(data, order, seasonal_order, intercept, dtype)
# Create and fit cuML model
cuml_model = arima.ARIMA(
endog=y_train_cudf,
exog=exog_past_cudf,
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept,
output_type="numpy",
)
cuml_model.fit()
# Predict
y_fc_cuml = cuml_model.forecast(data.n_test, exog=exog_fut)
y_fc_ref = np.zeros((data.n_test, data.batch_size))
for i in range(data.batch_size):
y_fc_ref[:, i] = (
ref_fits[i]
.get_prediction(
data.n_train,
data.n_obs - 1,
exog=None
if data.n_exog == 0
else exog_fut[
exog_fut.columns[data.n_exog * i : data.n_exog * (i + 1)]
],
)
.predicted_mean
)
# Compare results: MASE must be better or within the tolerance margin
mase_ref = mase(y_train, y_test, y_fc_ref, s)
mase_cuml = mase(y_train, y_test, y_fc_cuml, s)
assert mase_cuml < mase_ref * (1.0 + data.tolerance_integration)
def _statsmodels_to_cuml(
ref_fits, cuml_model, order, seasonal_order, intercept, dtype
):
"""Utility function to transfer the parameters from a statsmodels'
SARIMAXResults object to a cuML ARIMA object.
.. note:: be cautious with the intercept, it is not always equivalent
in statsmodels and cuML models (it depends on the order).
"""
nb = cuml_model.batch_size
N = cuml_model.complexity
x = np.zeros(nb * N, dtype=np.float64)
for ib in range(nb):
x[ib * N : (ib + 1) * N] = ref_fits[ib].params[:N]
cuml_model.unpack(x)
def _predict_common(
key,
data,
dtype,
start,
end,
num_steps=None,
level=None,
simple_differencing=True,
):
"""Utility function used by test_predict and test_forecast to avoid
code duplication.
"""
order, seasonal_order, intercept = extract_order(key)
_, y_train_cudf, _, _, _, exog_cudf, exog_fut, exog_fut_cudf = get_dataset(
data, dtype
)
# Get fit reference model
ref_fits = get_ref_fit(data, order, seasonal_order, intercept, dtype)
# Create cuML model
cuml_model = arima.ARIMA(
endog=y_train_cudf,
exog=exog_cudf,
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept,
output_type="numpy",
simple_differencing=simple_differencing,
)
# Feed the parameters to the cuML model
_statsmodels_to_cuml(
ref_fits, cuml_model, order, seasonal_order, intercept, dtype
)
# Predict or forecast
# Reference (statsmodels)
ref_preds = np.zeros((end - start, data.batch_size))
for i in range(data.batch_size):
ref_preds[:, i] = (
ref_fits[i]
.get_prediction(
start,
end - 1,
exog=(
None
if data.n_exog == 0 or end <= data.n_train
else exog_fut[
exog_fut.columns[
data.n_exog * i : data.n_exog * (i + 1)
]
]
),
)
.predicted_mean
)
if level is not None:
ref_lower = np.zeros((end - start, data.batch_size))
ref_upper = np.zeros((end - start, data.batch_size))
for i in range(data.batch_size):
temp_pred = ref_fits[i].get_forecast(
num_steps,
exog=(
None
if data.n_exog == 0
else exog_fut[
exog_fut.columns[
data.n_exog * i : data.n_exog * (i + 1)
]
]
),
)
ci = temp_pred.summary_frame(alpha=1 - level)
ref_lower[:, i] = ci["mean_ci_lower"].to_numpy()
ref_upper[:, i] = ci["mean_ci_upper"].to_numpy()
# cuML
if num_steps is None:
cuml_pred = cuml_model.predict(
start,
end,
exog=None
if data.n_exog == 0 or end <= data.n_train
else exog_fut_cudf,
)
elif level is not None:
cuml_pred, cuml_lower, cuml_upper = cuml_model.forecast(
num_steps, level, exog=exog_fut_cudf
)
else:
cuml_pred = cuml_model.forecast(num_steps, exog=exog_fut_cudf)
# Compare results
np.testing.assert_allclose(cuml_pred, ref_preds, rtol=0.002, atol=0.01)
if level is not None:
np.testing.assert_allclose(
cuml_lower, ref_lower, rtol=0.005, atol=0.01
)
np.testing.assert_allclose(
cuml_upper, ref_upper, rtol=0.005, atol=0.01
)
@pytest.mark.parametrize("key, data", test_data)
@pytest.mark.parametrize("dtype", [np.float64])
@pytest.mark.parametrize("simple_differencing", [True, False])
def test_predict_in(key, data, dtype, simple_differencing):
"""Test in-sample prediction against statsmodels (with the same values
for the model parameters)
"""
_predict_common(
key,
data,
dtype,
data.n_train // 2,
data.n_obs,
simple_differencing=simple_differencing,
)
@pytest.mark.parametrize("key, data", test_data)
@pytest.mark.parametrize("dtype", [np.float64])
@pytest.mark.parametrize("simple_differencing", [True, False])
def test_predict_inout(key, data, dtype, simple_differencing):
"""Test in- and ouf-of-sample prediction against statsmodels (with the
same values for the model parameters)
"""
_predict_common(
key,
data,
dtype,
data.n_train // 2,
data.n_train,
simple_differencing=simple_differencing,
)
@pytest.mark.parametrize("key, data", test_data)
@pytest.mark.parametrize("dtype", [np.float64])
@pytest.mark.parametrize("simple_differencing", [True, False])
def test_forecast(key, data, dtype, simple_differencing):
"""Test out-of-sample forecasting against statsmodels (with the same
values for the model parameters)
"""
_predict_common(
key,
data,
dtype,
data.n_train,
data.n_obs,
data.n_test,
simple_differencing=simple_differencing,
)
@pytest.mark.parametrize("key, data", test_data)
@pytest.mark.parametrize("dtype", [np.float64])
@pytest.mark.parametrize("level", [0.5, 0.95])
def test_intervals(key, data, dtype, level):
"""Test forecast confidence intervals against statsmodels (with the same
values for the model parameters)
"""
_predict_common(
key, data, dtype, data.n_train, data.n_obs, data.n_test, level
)
@pytest.mark.parametrize("key, data", test_data)
@pytest.mark.parametrize("dtype", [np.float64])
@pytest.mark.parametrize("simple_differencing", [True, False])
def test_loglikelihood(key, data, dtype, simple_differencing):
"""Test loglikelihood against statsmodels (with the same values for the
model parameters)
"""
order, seasonal_order, intercept = extract_order(key)
_, y_train_cudf, _, _, _, exog_past_cudf, *_ = get_dataset(data, dtype)
# Get fit reference model
ref_fits = get_ref_fit(data, order, seasonal_order, intercept, dtype)
# Create cuML model
cuml_model = arima.ARIMA(
endog=y_train_cudf,
exog=exog_past_cudf,
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept,
simple_differencing=simple_differencing,
)
# Feed the parameters to the cuML model
_statsmodels_to_cuml(
ref_fits, cuml_model, order, seasonal_order, intercept, dtype
)
# Compute loglikelihood
cuml_llf = cuml_model.llf
ref_llf = np.array([ref_fit.llf for ref_fit in ref_fits])
# Compare results
np.testing.assert_allclose(cuml_llf, ref_llf, rtol=0.01, atol=0.01)
@pytest.mark.parametrize("key, data", test_data)
@pytest.mark.parametrize("dtype", [np.float64])
def test_gradient(key, data, dtype):
"""
Test batched gradient implementation against scipy non-batched
gradient.
.. note:: it doesn't test that the loglikelihood is correct!
"""
order, seasonal_order, intercept = extract_order(key)
p, _, q = order
P, _, Q, _ = seasonal_order
h = 1e-8
_, y_train_cudf, _, _, _, exog_past_cudf, *_ = get_dataset(data, dtype)
# Create cuML model
cuml_model = arima.ARIMA(
endog=y_train_cudf,
exog=exog_past_cudf,
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept,
)
N = cuml_model.complexity
# Get an estimate of the parameters and pack them into a vector
cuml_model._estimate_x0()
x = cuml_model.pack()
# Compute the batched loglikelihood gradient
batched_grad = cuml_model._loglike_grad(x, h)
# Iterate over the batch to compute a reference gradient
scipy_grad = np.zeros(N * data.batch_size)
for i in range(data.batch_size):
# Create a model with only the current series
model_i = arima.ARIMA(
endog=y_train_cudf[y_train_cudf.columns[i]],
exog=None
if exog_past_cudf is None
else exog_past_cudf[
exog_past_cudf.columns[data.n_exog * i : data.n_exog * (i + 1)]
],
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept,
)
def f(x):
return model_i._loglike(x)
scipy_grad[N * i : N * (i + 1)] = approx_fprime(
x[N * i : N * (i + 1)], f, h
)
# Compare
np.testing.assert_allclose(batched_grad, scipy_grad, rtol=0.001, atol=0.01)
@pytest.mark.parametrize("key, data", test_data)
@pytest.mark.parametrize("dtype", [np.float64])
def test_start_params(key, data, dtype):
"""Test starting parameters against statsmodels"""
order, seasonal_order, intercept = extract_order(key)
y_train, y_train_cudf, _, _, exog_past, exog_past_cudf, *_ = get_dataset(
data, dtype
)
# fillna for reference to match cuML initial estimation strategy
y_train_nona = fill_interpolation(y_train)
# Convert to numpy to avoid misaligned indices
if exog_past is not None:
exog_past_np = exog_past.to_numpy()
# Create models
cuml_model = arima.ARIMA(
endog=y_train_cudf,
exog=exog_past_cudf,
order=order,
seasonal_order=seasonal_order,
fit_intercept=intercept,
)
ref_model = [
sm.tsa.SARIMAX(
endog=y_train_nona[y_train_nona.columns[i]],
exog=exog_past_np[:, i * data.n_exog : (i + 1) * data.n_exog]
if data.n_exog
else None,
order=order,
seasonal_order=seasonal_order,
trend="c" if intercept else "n",
)
for i in range(data.batch_size)
]
# Estimate reference starting parameters
N = cuml_model.complexity
nb = data.batch_size
x_ref = np.zeros(N * nb, dtype=dtype)
for ib in range(nb):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
x_ref[ib * N : (ib + 1) * N] = ref_model[ib].start_params[:N]
# Estimate cuML starting parameters
cuml_model._estimate_x0()
x_cuml = cuml_model.pack()
# Compare results
np.testing.assert_allclose(x_cuml, x_ref, rtol=0.001, atol=0.01)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_kmeans.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import adjusted_rand_score
from sklearn import cluster
from cuml.testing.utils import (
get_pattern,
unit_param,
quality_param,
stress_param,
array_equal,
)
from cuml.datasets import make_blobs
import pytest
import random
import cuml
import cuml.internals.logger as logger
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
dataset_names = ["blobs", "noisy_circles", "noisy_moons", "varied", "aniso"]
@pytest.fixture
def get_data_consistency_test():
cluster_std = 1.0
nrows = 1000
ncols = 50
nclusters = 8
X, y = make_blobs(
nrows,
ncols,
nclusters,
cluster_std=cluster_std,
shuffle=False,
random_state=0,
)
return X, y
@pytest.fixture
def random_state():
random_state = random.randint(0, 1e6)
with logger.set_level(logger.level_debug):
logger.debug("Random seed: {}".format(random_state))
return random_state
@pytest.mark.xfail
def test_n_init_cluster_consistency(random_state):
nclusters = 8
X, y = get_data_consistency_test()
cuml_kmeans = cuml.KMeans(
init="k-means++",
n_clusters=nclusters,
n_init=10,
random_state=random_state,
output_type="numpy",
)
cuml_kmeans.fit(X)
initial_clusters = cuml_kmeans.cluster_centers_
cuml_kmeans = cuml.KMeans(
init="k-means++",
n_clusters=nclusters,
n_init=10,
random_state=random_state,
output_type="numpy",
)
cuml_kmeans.fit(X)
assert array_equal(initial_clusters, cuml_kmeans.cluster_centers_)
@pytest.mark.parametrize("nrows", [1000, 10000])
@pytest.mark.parametrize("ncols", [25])
@pytest.mark.parametrize("nclusters", [2, 5])
def test_traditional_kmeans_plus_plus_init(
nrows, ncols, nclusters, random_state
):
# Using fairly high variance between points in clusters
cluster_std = 1.0
X, y = make_blobs(
int(nrows),
ncols,
nclusters,
cluster_std=cluster_std,
shuffle=False,
random_state=0,
)
cuml_kmeans = cuml.KMeans(
init="k-means++",
n_clusters=nclusters,
n_init=10,
random_state=random_state,
output_type="numpy",
)
cuml_kmeans.fit(X)
cu_score = cuml_kmeans.score(X)
kmeans = cluster.KMeans(random_state=random_state, n_clusters=nclusters)
kmeans.fit(cp.asnumpy(X))
sk_score = kmeans.score(cp.asnumpy(X))
cp.testing.assert_allclose(cu_score, sk_score, atol=0.1, rtol=1e-4)
@pytest.mark.parametrize("nrows", [100, 500])
@pytest.mark.parametrize("ncols", [25])
@pytest.mark.parametrize("nclusters", [5, 10])
@pytest.mark.parametrize("max_weight", [10])
def test_weighted_kmeans(nrows, ncols, nclusters, max_weight, random_state):
# Using fairly high variance between points in clusters
cluster_std = 1.0
np.random.seed(random_state)
# set weight per sample to be from 1 to max_weight
wt = np.random.randint(1, high=max_weight, size=nrows)
X, y = make_blobs(
nrows,
ncols,
nclusters,
cluster_std=cluster_std,
shuffle=False,
random_state=0,
)
cuml_kmeans = cuml.KMeans(
init="k-means++",
n_clusters=nclusters,
n_init=10,
random_state=random_state,
output_type="numpy",
)
cuml_kmeans.fit(X, sample_weight=wt)
cu_score = cuml_kmeans.score(X)
sk_kmeans = cluster.KMeans(random_state=random_state, n_clusters=nclusters)
sk_kmeans.fit(cp.asnumpy(X), sample_weight=wt)
sk_score = sk_kmeans.score(cp.asnumpy(X))
assert abs(cu_score - sk_score) <= cluster_std * 1.5
@pytest.mark.parametrize("nrows", [1000, 10000])
@pytest.mark.parametrize("ncols", [25])
@pytest.mark.parametrize("nclusters", [2, 5])
@pytest.mark.parametrize("cluster_std", [1.0, 0.1, 0.01])
def test_kmeans_clusters_blobs(
nrows, ncols, nclusters, random_state, cluster_std
):
X, y = make_blobs(
int(nrows),
ncols,
nclusters,
cluster_std=cluster_std,
shuffle=False,
random_state=0,
)
cuml_kmeans = cuml.KMeans(
init="k-means||",
n_clusters=nclusters,
random_state=random_state,
output_type="numpy",
)
preds = cuml_kmeans.fit_predict(X)
assert adjusted_rand_score(cp.asnumpy(preds), cp.asnumpy(y)) >= 0.99
@pytest.mark.parametrize("name", dataset_names)
@pytest.mark.parametrize("nrows", [unit_param(1000), quality_param(5000)])
def test_kmeans_sklearn_comparison(name, nrows, random_state):
default_base = {
"quantile": 0.3,
"eps": 0.3,
"damping": 0.9,
"preference": -200,
"n_neighbors": 10,
"n_clusters": 3,
}
pat = get_pattern(name, nrows)
params = default_base.copy()
params.update(pat[1])
cuml_kmeans = cuml.KMeans(
n_clusters=params["n_clusters"],
output_type="numpy",
init="k-means++",
random_state=random_state,
n_init=10,
)
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cu_y_pred = cuml_kmeans.fit_predict(X)
cu_score = adjusted_rand_score(cu_y_pred, y)
kmeans = cluster.KMeans(
random_state=random_state, n_clusters=params["n_clusters"]
)
sk_y_pred = kmeans.fit_predict(X)
sk_score = adjusted_rand_score(sk_y_pred, y)
assert sk_score - 1e-2 <= cu_score <= sk_score + 1e-2
@pytest.mark.parametrize("name", dataset_names)
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
def test_kmeans_sklearn_comparison_default(name, nrows, random_state):
default_base = {
"quantile": 0.3,
"eps": 0.3,
"damping": 0.9,
"preference": -200,
"n_neighbors": 10,
"n_clusters": 3,
}
pat = get_pattern(name, nrows)
params = default_base.copy()
params.update(pat[1])
cuml_kmeans = cuml.KMeans(
n_clusters=params["n_clusters"],
random_state=random_state,
n_init=10,
output_type="numpy",
)
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cu_y_pred = cuml_kmeans.fit_predict(X)
cu_score = adjusted_rand_score(cu_y_pred, y)
kmeans = cluster.KMeans(
random_state=random_state, n_clusters=params["n_clusters"]
)
sk_y_pred = kmeans.fit_predict(X)
sk_score = adjusted_rand_score(sk_y_pred, y)
assert sk_score - 1e-2 <= cu_score <= sk_score + 1e-2
@pytest.mark.parametrize(
"max_iter, oversampling_factor, max_samples_per_batch, init",
[
(100, 0.5, 1 << 10, "preset"),
(1000, 1.0, 1 << 15, "preset"),
(500, 1.5, 1 << 5, "k-means||"),
(1000, 1.0, 1 << 10, "random"),
# Redundant case to better exercise 'k-means||'
(1000, 1.0, 1 << 15, "k-means||"),
],
)
@pytest.mark.parametrize(
"n_clusters", [unit_param(10), unit_param(100), stress_param(1000)]
)
def test_all_kmeans_params(
n_clusters,
max_iter,
init,
oversampling_factor,
max_samples_per_batch,
random_state,
):
np.random.seed(0)
X = np.random.rand(1000, 10)
if init == "preset":
init = np.random.rand(n_clusters, 10)
cuml_kmeans = cuml.KMeans(
n_clusters=n_clusters,
max_iter=max_iter,
init=init,
random_state=random_state,
oversampling_factor=oversampling_factor,
max_samples_per_batch=max_samples_per_batch,
output_type="cupy",
)
cuml_kmeans.fit_predict(X)
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize("ncols", [10, 30])
@pytest.mark.parametrize(
"nclusters", [unit_param(5), quality_param(10), stress_param(50)]
)
def test_score(nrows, ncols, nclusters, random_state):
X, y = make_blobs(
int(nrows),
ncols,
nclusters,
cluster_std=1.0,
shuffle=False,
random_state=0,
)
cuml_kmeans = cuml.KMeans(
init="k-means||",
n_clusters=nclusters,
random_state=random_state,
output_type="numpy",
)
cuml_kmeans.fit(X)
actual_score = cuml_kmeans.score(X)
predictions = cuml_kmeans.predict(X)
centers = cuml_kmeans.cluster_centers_
expected_score = 0.0
for idx, label in enumerate(predictions):
x = X[idx, :]
y = cp.array(centers[label, :], dtype=cp.float32)
sq_euc_dist = cp.sum(cp.square((x - y)))
expected_score += sq_euc_dist
expected_score *= -1
cp.testing.assert_allclose(
actual_score, expected_score, atol=0.1, rtol=1e-4
)
@pytest.mark.parametrize("nrows", [100])
@pytest.mark.parametrize("ncols", [25])
@pytest.mark.parametrize("nclusters", [5])
@pytest.mark.parametrize("max_weight", [10])
def test_fit_transform_weighted_kmeans(
nrows, ncols, nclusters, max_weight, random_state
):
# Using fairly high variance between points in clusters
cluster_std = 1.0
np.random.seed(random_state)
# set weight per sample to be from 1 to max_weight
wt = np.random.randint(1, high=max_weight, size=nrows)
X, y = make_blobs(
nrows,
ncols,
nclusters,
cluster_std=cluster_std,
shuffle=False,
random_state=0,
)
cuml_kmeans = cuml.KMeans(
init="k-means++",
n_clusters=nclusters,
n_init=10,
random_state=random_state,
output_type="numpy",
)
cuml_transf = cuml_kmeans.fit_transform(X, sample_weight=wt)
cu_score = cuml_kmeans.score(X)
sk_kmeans = cluster.KMeans(random_state=random_state, n_clusters=nclusters)
sk_transf = sk_kmeans.fit_transform(cp.asnumpy(X), sample_weight=wt)
sk_score = sk_kmeans.score(cp.asnumpy(X))
assert abs(cu_score - sk_score) <= cluster_std * 1.5
assert sk_transf.shape == cuml_transf.shape
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_array.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gc
import operator
import pickle
import pytest
from copy import deepcopy
from cuml.internals.array import (
CumlArray,
_order_to_strides,
array_to_memory_order,
)
from cuml import global_settings
from cuml.internals.mem_type import MemoryType
from cuml.internals.memory_utils import (
_get_size_from_shape,
determine_array_memtype,
using_memory_type,
)
# Temporarily disabled due to CUDA 11.0 issue
# https://github.com/rapidsai/cuml/issues/4332
# from rmm import DeviceBuffer
from cuml.internals.safe_imports import (
cpu_only_import,
cpu_only_import_from,
gpu_only_import,
gpu_only_import_from,
)
from cuml.testing.strategies import (
UNSUPPORTED_CUDF_DTYPES,
create_cuml_array_input,
cuml_array_dtypes,
cuml_array_input_types,
cuml_array_inputs,
cuml_array_orders,
cuml_array_output_types,
cuml_array_shapes,
cuml_array_mem_types,
)
from cuml.testing.utils import (
normalized_shape,
series_squeezed_shape,
squeezed_shape,
to_nparray,
)
from hypothesis import assume, given, settings
from hypothesis import strategies as st
cp = gpu_only_import("cupy")
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
cuda = gpu_only_import_from("numba", "cuda")
CudfDataFrame = gpu_only_import_from("cudf", "DataFrame")
CudfSeries = gpu_only_import_from("cudf", "Series")
PandasSeries = cpu_only_import_from("pandas", "Series")
PandasDataFrame = cpu_only_import_from("pandas", "DataFrame")
cp_array = gpu_only_import_from("cupy", "ndarray")
np_array = gpu_only_import_from("numpy", "ndarray")
numba_array = gpu_only_import_from(
"numba.cuda.cudadrv.devicearray", "DeviceNDArray"
)
test_input_types = ["numpy", "numba", "cupy", "series", None]
test_output_types = (
"cupy",
"numpy",
"cudf",
"pandas",
"array",
"numba",
"dataframe",
"series",
"df_obj",
)
_OUTPUT_TYPES_MAPPING = {
"cupy": cp.ndarray,
"numpy": np.ndarray,
"cudf": (CudfDataFrame, CudfSeries),
"pandas": (PandasDataFrame, PandasSeries),
"dataframe": (CudfDataFrame, PandasDataFrame),
"series": (CudfSeries, PandasSeries),
}
def _multidimensional(shape):
return len(squeezed_shape(normalized_shape(shape))) > 1
def _get_owner(curr):
if isinstance(curr, CumlArray):
return curr._owner
elif isinstance(curr, cp.ndarray):
return curr.data.mem._owner
else:
return None
def _assert_equal(array_like, cuml_array):
"""Check whether array-like data and cuml array data are equal."""
assert cp.array_equal(
cp.asarray(array_like),
cuml_array.to_output("cupy"),
equal_nan=True,
)
@given(
input_type=cuml_array_input_types(),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
force_gc=st.booleans(),
)
@settings(deadline=None)
def test_array_init(input_type, dtype, shape, order, mem_type, force_gc):
input_array = create_cuml_array_input(input_type, dtype, shape, order)
with using_memory_type(mem_type):
cuml_array = CumlArray(data=input_array)
# Test basic array properties
assert cuml_array.dtype == dtype
if input_type == "series":
assert cuml_array.shape == series_squeezed_shape(shape)
else:
assert cuml_array.shape == normalized_shape(shape)
# Order is only well-defined (and preserved) for multidimensional arrays.
md = isinstance(shape, tuple) and len([d for d in shape if d != 1]) > 1
assert cuml_array.order == order if md else "C"
# Check input array and array equality.
_assert_equal(input_array, cuml_array)
# Check that data is kept in memory even when the input_array reference
# is deleted.
input_array_copy = deepcopy(cp.asarray(input_array))
del input_array
if force_gc:
gc.collect()
_assert_equal(input_array_copy, cuml_array)
@given(
data_type=st.sampled_from([bytes, bytearray, memoryview]),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_array_init_from_bytes(data_type, dtype, shape, order, mem_type):
dtype = np.dtype(dtype)
values = bytes(_get_size_from_shape(shape, dtype)[0])
# Convert to data_type to be tested if needed.
if data_type != bytes:
values = data_type(values)
array = CumlArray(
values, dtype=dtype, shape=shape, order=order, mem_type=mem_type
)
assert array.order == order
assert array.shape in (shape, (shape,))
assert array.dtype == dtype
array_copy = cp.zeros(shape, dtype=dtype)
assert cp.all(cp.asarray(array_copy) == array_copy)
@given(
input_type=cuml_array_input_types(),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_array_mem_type(input_type, dtype, shape, order, mem_type):
"""
Test whether we can create CumlArray from all supported types and array
shapes on all supported mem types.
"""
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
input_array = create_cuml_array_input(input_type, dtype, shape, order)
# Ensure the array is creatable
array = CumlArray(input_array)
input_mem_type = determine_array_memtype(input_array)
if input_mem_type.is_device_accessible:
joint_mem_type = input_mem_type
else:
joint_mem_type = mem_type
assert joint_mem_type.xpy.all(
joint_mem_type.xpy.asarray(input_array)
== joint_mem_type.xpy.asarray(array)
)
@given(
inp=cuml_array_inputs(),
indices=st.slices(10), # TODO: should be basic_indices() as shown below
# indices=basic_indices((10, 10)),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_get_set_item(inp, indices, mem_type):
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
ary = CumlArray(data=inp)
# Assumption required due to limitation on step size for F-order.
assume(ary.order != "F" or (indices.step in (None, 1)))
# Check equality of array views.
inp_view = inp[indices]
# Must assume that resulting view must have at least one element to not
# trigger UnownedMemory exception.
assume(mem_type.xpy.isscalar(inp_view) or inp_view.size > 0)
_assert_equal(inp_view, ary[indices])
# Check equality after assigning to array slice.
ary[indices] = 1.0
inp[indices] = 1.0
# We need to assume that inp is not a cudf.Series here, otherwise
# ary.to_output("cupy") called by equal() will trigger a
# CUDARuntimeError: cudaErrorInvalidDevice: invalid device ordinal
# error.
assume(not isinstance(inp, cudf.Series))
_assert_equal(inp, ary)
@given(
shape=cuml_array_shapes(),
dtype=cuml_array_dtypes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_create_empty(shape, dtype, order, mem_type):
with using_memory_type(mem_type):
ary = CumlArray.empty(shape=shape, dtype=dtype, order=order)
assert isinstance(ary.ptr, int)
assert ary.shape == normalized_shape(shape)
assert ary.dtype == np.dtype(dtype)
@given(
shape=cuml_array_shapes(),
dtype=cuml_array_dtypes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_create_zeros(shape, dtype, order, mem_type):
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
ary = CumlArray.zeros(shape=shape, dtype=dtype, order=order)
test = mem_type.xpy.zeros(shape).astype(dtype)
assert mem_type.xpy.all(test == mem_type.xpy.asarray(ary))
@given(
shape=cuml_array_shapes(),
dtype=cuml_array_dtypes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_create_ones(shape, dtype, order, mem_type):
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
ary = CumlArray.ones(shape=shape, dtype=dtype, order=order)
test = mem_type.xpy.ones(shape).astype(dtype)
assert mem_type.xpy.all(test == mem_type.xpy.asarray(ary))
@given(
shape=cuml_array_shapes(),
dtype=cuml_array_dtypes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_create_full(shape, dtype, order, mem_type):
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
value = mem_type.xpy.array([mem_type.xpy.random.randint(100)]).astype(
dtype
)
ary = CumlArray.full(
value=value[0], shape=shape, dtype=dtype, order=order
)
test = mem_type.xpy.zeros(shape).astype(dtype) + value[0]
assert mem_type.xpy.all(test == mem_type.xpy.asarray(ary))
def cudf_compatible_dtypes(dtype):
return dtype not in UNSUPPORTED_CUDF_DTYPES
@given(
inp=cuml_array_inputs(),
input_mem_type=cuml_array_mem_types(),
output_type=cuml_array_output_types(),
)
@settings(deadline=None)
def test_output(inp, input_mem_type, output_type):
# Required assumptions for cudf outputs:
if output_type in ("cudf", "dataframe", "series"):
assume(inp.dtype not in UNSUPPORTED_CUDF_DTYPES)
if output_type == "series":
assume(not _multidimensional(inp.shape))
# Generate CumlArray from input and perform conversion.
with using_memory_type(input_mem_type):
arr = CumlArray(inp)
res = arr.to_output(output_type)
# Check output type
if output_type == "numba": # TODO: is this still needed?
# using correct numba ndarray check
assert cuda.devicearray.is_cuda_ndarray(res)
elif output_type == "cudf":
assert isinstance(
res, CudfDataFrame if _multidimensional(inp.shape) else CudfSeries
)
elif output_type == "pandas":
assert isinstance(
res,
PandasDataFrame if _multidimensional(inp.shape) else PandasSeries,
)
else:
assert isinstance(res, _OUTPUT_TYPES_MAPPING[output_type])
def assert_data_equal_(res):
# Check output data equality
if isinstance(res, CudfSeries):
# A simple equality check `assert cudf.Series(inp).equals(res)`
# does not work for with multi-dimensional data.
assert CudfSeries(np.ravel(inp)).equals(res)
elif isinstance(res, PandasSeries):
assert PandasSeries(np.ravel(inp)).equals(res)
elif isinstance(res, CudfDataFrame):
# Assumption required because of:
# https://github.com/rapidsai/cudf/issues/12266
assume(not np.isnan(res.to_numpy()).any())
assert CudfDataFrame(inp).equals(res)
elif isinstance(res, PandasDataFrame):
assert PandasDataFrame(inp).equals(res)
else:
assert np.array_equal(
to_nparray(inp), to_nparray(res), equal_nan=True
)
assert_data_equal_(res)
@given(
inp=cuml_array_inputs(),
output_type=cuml_array_output_types(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_end_to_end_conversion_via_intermediate(inp, output_type, mem_type):
mem_type = MemoryType.from_str(mem_type)
# This test requires a lot of assumptions in combination with cuDF
# intermediates.
# Assumptions required for cuDF limitations:
assume(
# Not all dtypes are supported by cuDF.
not (
output_type in ("cudf", "pandas", "dataframe", "series")
and inp.dtype in UNSUPPORTED_CUDF_DTYPES
)
)
assume(
# Can't convert multidimensional arrays to a Series.
not (output_type == "series" and len(inp.shape) > 1)
)
assume(
# Cannot convert from DataFrame to CumlArray wihthout explicitly
# specifying shape, dtype, and order.
not (
output_type == "dataframe"
or (output_type == "cudf" and len(inp.shape) > 1)
or (output_type == "pandas" and len(inp.shape) > 1)
)
)
with using_memory_type(mem_type):
# First conversion:
array = CumlArray(data=inp)
_assert_equal(inp, array)
# Second conversion via intermediate
intermediate = array.to_output(output_type)
# Cupy does not support masked arrays.
cai = getattr(intermediate, "__cuda_array_interface__", dict())
assume(cai.get("mask") is None)
array2 = CumlArray(data=intermediate)
_assert_equal(inp, array2)
@given(
output_type=cuml_array_output_types(),
shape=cuml_array_shapes(),
dtype=cuml_array_dtypes(),
order=cuml_array_orders(),
out_dtype=cuml_array_dtypes(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_output_dtype(output_type, shape, dtype, order, out_dtype, mem_type):
with using_memory_type(mem_type):
# Required assumptions for cudf outputs:
if output_type in ("cudf", "dataframe", "series"):
assume(dtype not in UNSUPPORTED_CUDF_DTYPES)
assume(out_dtype not in UNSUPPORTED_CUDF_DTYPES)
if output_type == "series":
assume(not _multidimensional(shape))
# Perform conversion
inp = create_cuml_array_input("numpy", dtype, shape, order)
ary = CumlArray(inp)
res = ary.to_output(output_type=output_type, output_dtype=out_dtype)
# Check output dtype
if isinstance(res, (CudfDataFrame, PandasDataFrame)):
res.values.dtype is out_dtype
else:
res.dtype is out_dtype
@given(inp=cuml_array_inputs(), mem_type=cuml_array_mem_types())
@settings(deadline=None)
def test_array_interface(inp, mem_type):
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
ary = CumlArray(inp)
in_mem_type = determine_array_memtype(inp)
if isinstance(inp, PandasSeries):
converted_inp = inp.to_numpy()
elif isinstance(inp, CudfSeries):
converted_inp = cp.asnumpy(inp.to_cupy())
else:
converted_inp = inp
try:
inp_ai = converted_inp.__cuda_array_interface__
except AttributeError:
inp_ai = converted_inp.__array_interface__
ary_ai = ary._array_interface
# Check Array Interface equality.
assert inp_ai["shape"] == ary_ai["shape"]
assert inp_ai["typestr"] == ary_ai["typestr"]
if (
not isinstance(inp, (PandasSeries, CudfSeries))
and determine_array_memtype(inp) is global_settings.memory_type
):
assert inp_ai["data"] == ary_ai["data"]
# Mismatch for one-dimensional arrays:
if inp_ai.get("strides", None) is not None:
assert inp_ai["strides"] == ary_ai["strides"]
if in_mem_type.is_device_accessible:
joint_mem_type = in_mem_type
else:
joint_mem_type = mem_type
# Check equality
inp_arr = joint_mem_type.xpy.asarray(converted_inp)
out_arr = joint_mem_type.xpy.asarray(ary)
assert joint_mem_type.xpy.all(
inp_arr == out_arr
) or joint_mem_type.xpy.all(
joint_mem_type.xpy.isnan(inp_arr)
== joint_mem_type.xpy.isnan(out_arr)
)
@given(
inp=cuml_array_inputs(),
to_serialize_mem_type=cuml_array_mem_types(),
from_serialize_mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_serialize(inp, to_serialize_mem_type, from_serialize_mem_type):
with using_memory_type(to_serialize_mem_type):
ary = CumlArray(data=inp)
header, frames = ary.serialize()
with using_memory_type(from_serialize_mem_type):
ary2 = CumlArray.deserialize(header, frames)
assert pickle.loads(header["type-serialized"]) is CumlArray
_assert_equal(inp, ary2)
assert ary._array_interface["shape"] == ary2._array_interface["shape"]
# Restricting the strides check due to
# https://github.com/cupy/cupy/issues/5897
if not (
len(ary.shape) > 1
and (
(ary.order == "C" and ary.shape[0] == 1)
or (ary.order == "F" and ary.shape[-1] == 1)
)
):
assert (
ary._array_interface["strides"]
== ary2._array_interface["strides"]
)
assert (
ary._array_interface["typestr"] == ary2._array_interface["typestr"]
)
assert ary2.mem_type is global_settings.memory_type
if isinstance(inp, (cudf.Series, pd.Series)):
assert ary.order == ary2.order
@pytest.mark.parametrize("protocol", [4, 5])
@given(
inp=cuml_array_inputs(),
to_serialize_mem_type=cuml_array_mem_types(),
from_serialize_mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_pickle(protocol, inp, to_serialize_mem_type, from_serialize_mem_type):
with using_memory_type(to_serialize_mem_type):
# Generate CumlArray
ary = CumlArray(data=inp)
# Prepare keyword arguments.
dumps_kwargs = {"protocol": protocol}
loads_kwargs = {}
f = []
len_f = 0
if protocol >= 5:
dumps_kwargs["buffer_callback"] = f.append
loads_kwargs["buffers"] = f
len_f = 1
a = pickle.dumps(ary, **dumps_kwargs)
with using_memory_type(from_serialize_mem_type):
b = pickle.loads(a, **loads_kwargs)
assert ary._array_interface["shape"] == b._array_interface["shape"]
# Restricting the strides check due to
# https://github.com/cupy/cupy/issues/5897
if not (len(ary.shape) > 1 and (ary.shape[0] == 1 or ary.shape[-1] == 1)):
assert ary._array_interface["strides"] == b._array_interface["strides"]
assert ary._array_interface["typestr"] == b._array_interface["typestr"]
# Check equality
assert len(f) == len_f
_assert_equal(inp, b)
if isinstance(inp, (cudf.Series, pd.Series)):
# skipping one dimensional ary order test
assert ary.order == b.order
@given(inp=cuml_array_inputs(), mem_type=cuml_array_mem_types())
@settings(deadline=None)
def test_deepcopy(inp, mem_type):
with using_memory_type(mem_type):
# Generate CumlArray
ary = CumlArray(data=inp)
# Perform deepcopy
b = deepcopy(ary)
# Check equality
_assert_equal(inp, b)
assert ary.ptr != b.ptr
assert ary._array_interface["shape"] == b._array_interface["shape"]
# Restricting the strides check due to
# https://github.com/cupy/cupy/issues/5897
if not (
len(ary.shape) > 1 and (ary.shape[0] == 1 or ary.shape[-1] == 1)
):
assert (
ary._array_interface["strides"]
== b._array_interface["strides"]
)
assert ary._array_interface["typestr"] == b._array_interface["typestr"]
if isinstance(inp, (cudf.Series, pd.Series)):
# skipping one dimensional ary order test
assert ary.order == b.order
@pytest.mark.parametrize("operation", [operator.add, operator.sub])
@given(
a=cuml_array_inputs(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_cumlary_binops(operation, a, mem_type):
with using_memory_type(mem_type):
b = deepcopy(a)
ary_a = CumlArray(a)
ary_b = CumlArray(b)
c = operation(a, b)
ary_c = operation(ary_a, ary_b)
_assert_equal(c, ary_c)
@pytest.mark.parametrize("order", ["F", "C"])
@given(mem_type=cuml_array_mem_types())
@settings(deadline=None)
def test_sliced_array_owner(order, mem_type):
"""
When slicing a CumlArray, a new object can be created created which
previously had an incorrect owner. This was due to the requirement by
`cudf.core.Buffer` that all data be in "u1" form. CumlArray would satisfy
this requirement by calling
`cp.asarray(data).ravel(order='A').view('u1')`. If the slice is not
contiguous, this would create an intermediate object with no references
that would be cleaned up by GC causing an error when using the memory
"""
mem_type = MemoryType.from_str(mem_type)
xpy = mem_type.xpy
# Create 2 copies of a random array
random_arr = xpy.array(
xpy.random.random((500, 4)), dtype=np.float32, order=order
)
arr = xpy.array(random_arr, copy=True)
with using_memory_type(mem_type):
cuml_array = CumlArray(random_arr)
# Make sure we have 2 pieces of data
if mem_type.is_device_accessible:
assert arr.data.ptr != cuml_array.ptr
else:
assert arr.__array_interface__["data"][0] != cuml_array.ptr
# Since these are C arrays, slice off the first column to ensure they are
# non-contiguous
cuml_slice = cuml_array[1:, 1:]
arr_slice = arr[1:, 1:]
# Delete the input object just to be sure
del random_arr
# Make sure to cleanup any objects. Forces deletion of intermediate owner
# object
gc.collect()
# Calling `to_output` forces use of the pointer. This can fail with a cuda
# error on `cupy.cuda.runtime.pointerGetAttributes(cuml_slice.ptr)` in CUDA
# < 11.0 or cudaErrorInvalidDevice in CUDA > 11.0 (unclear why it changed)
assert xpy.all(
cuml_slice.to_output("array", output_mem_type=mem_type) == arr_slice
)
@given(
input_type=cuml_array_input_types(),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(min_dims=1, max_dims=5),
order=cuml_array_orders(),
)
@settings(deadline=None)
def test_array_to_memory_order(input_type, dtype, shape, order):
input_array = create_cuml_array_input(input_type, dtype, shape, order)
assert array_to_memory_order(input_array, default=order) == order
@given(
input_type=st.sampled_from(("cupy", "numpy")),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(min_dims=1, max_dims=5),
order=cuml_array_orders(),
)
@settings(deadline=None)
def test_order_to_strides(input_type, dtype, shape, order):
input_array = create_cuml_array_input(input_type, dtype, shape, order)
if isinstance(shape, int):
shape = (shape,)
assert np.all(
np.array(_order_to_strides(order, shape, dtype))
== np.array(input_array.strides)
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_incremental_pca.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.exceptions import NotFittedError
from cuml.testing.utils import array_equal
from cuml.decomposition.incremental_pca import _svd_flip
from cuml.decomposition import IncrementalPCA as cuIPCA
from cuml.datasets import make_blobs
from sklearn.decomposition import IncrementalPCA as skIPCA
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
@pytest.mark.parametrize(
"nrows, ncols, n_components, sparse_input, density, sparse_format,"
" batch_size_divider, whiten",
[
(500, 15, 2, True, 0.4, "csr", 5, True),
(5000, 25, 12, False, 0.07, "csc", 10, False),
(5000, 15, None, True, 0.4, "csc", 5, False),
(500, 25, 2, False, 0.07, "csr", 10, False),
(5000, 25, 12, False, 0.07, "csr", 10, True),
(500, 2500, 9, False, 0.07, "csr", 50, True),
(500, 250, 14, True, 0.07, "csr", 1, True),
],
)
@pytest.mark.no_bad_cuml_array_check
def test_fit(
nrows,
ncols,
n_components,
sparse_input,
density,
sparse_format,
batch_size_divider,
whiten,
):
if sparse_format == "csc":
pytest.skip(
"cupyx.scipy.sparse.csc.csc_matrix does not support"
" indexing as of cupy 7.6.0"
)
if sparse_input:
X = cupyx.scipy.sparse.random(
nrows,
ncols,
density=density,
random_state=10,
format=sparse_format,
)
else:
X, _ = make_blobs(n_samples=nrows, n_features=ncols, random_state=10)
cu_ipca = cuIPCA(
n_components=n_components,
whiten=whiten,
batch_size=int(nrows / batch_size_divider),
)
cu_ipca.fit(X)
cu_t = cu_ipca.transform(X)
cu_inv = cu_ipca.inverse_transform(cu_t)
sk_ipca = skIPCA(
n_components=n_components,
whiten=whiten,
batch_size=int(nrows / batch_size_divider),
)
if sparse_input:
X = X.get()
else:
X = cp.asnumpy(X)
sk_ipca.fit(X)
sk_t = sk_ipca.transform(X)
sk_inv = sk_ipca.inverse_transform(sk_t)
assert array_equal(cu_inv, sk_inv, 5e-5, with_sign=True)
@pytest.mark.parametrize(
"nrows, ncols, n_components, density, batch_size_divider, whiten",
[
(500, 15, 2, 0.07, 5, False),
(500, 15, 2, 0.07, 5, True),
(5000, 25, 12, 0.07, 10, False),
(5000, 15, 2, 0.4, 5, True),
(500, 25, 12, 0.4, 10, False),
(5000, 4, 2, 0.1, 100, False),
],
)
@pytest.mark.no_bad_cuml_array_check
def test_partial_fit(
nrows, ncols, n_components, density, batch_size_divider, whiten
):
X, _ = make_blobs(n_samples=nrows, n_features=ncols, random_state=10)
cu_ipca = cuIPCA(n_components=n_components, whiten=whiten)
sample_size = int(nrows / batch_size_divider)
for i in range(0, nrows, sample_size):
cu_ipca.partial_fit(X[i : i + sample_size].copy())
cu_t = cu_ipca.transform(X)
cu_inv = cu_ipca.inverse_transform(cu_t)
sk_ipca = skIPCA(n_components=n_components, whiten=whiten)
X = cp.asnumpy(X)
for i in range(0, nrows, sample_size):
sk_ipca.partial_fit(X[i : i + sample_size].copy())
sk_t = sk_ipca.transform(X)
sk_inv = sk_ipca.inverse_transform(sk_t)
assert array_equal(cu_inv, sk_inv, 5e-5, with_sign=True)
def test_exceptions():
X = cupyx.scipy.sparse.eye(10)
ipca = cuIPCA()
with pytest.raises(TypeError):
ipca.partial_fit(X)
X = X.toarray()
with pytest.raises(NotFittedError):
ipca.transform(X)
with pytest.raises(NotFittedError):
ipca.inverse_transform(X)
with pytest.raises(ValueError):
cuIPCA(n_components=8).fit(X[:5])
with pytest.raises(ValueError):
cuIPCA(n_components=8).fit(X[:, :5])
def test_svd_flip():
x = cp.array(range(-10, 80)).reshape((9, 10))
u, s, v = cp.linalg.svd(x, full_matrices=False)
u_true, v_true = _svd_flip(u, v, u_based_decision=True)
reco_true = cp.dot(u_true * s, v_true)
u_false, v_false = _svd_flip(u, v, u_based_decision=False)
reco_false = cp.dot(u_false * s, v_false)
assert array_equal(reco_true, x)
assert array_equal(reco_false, x)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_multiclass.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml import LogisticRegression as cuLog
from cuml import multiclass as cu_multiclass
import sys
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
# As tests directory is not a module, we need to add it to the path
sys.path.insert(0, ".")
from test_linear_model import make_classification_dataset # noqa: E402
@pytest.mark.parametrize("strategy", ["ovr", "ovo"])
@pytest.mark.parametrize("use_wrapper", [True, False])
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("num_classes", [3])
@pytest.mark.parametrize("column_info", [[10, 4]])
def test_logistic_regression(
strategy, use_wrapper, nrows, num_classes, column_info, dtype=np.float32
):
ncols, n_info = column_info
X_train, X_test, y_train, y_test = make_classification_dataset(
datatype=dtype,
nrows=nrows,
ncols=ncols,
n_info=n_info,
num_classes=num_classes,
)
y_train = y_train.astype(dtype)
y_test = y_test.astype(dtype)
culog = cuLog()
if use_wrapper:
cls = cu_multiclass.MulticlassClassifier(culog, strategy=strategy)
else:
if strategy == "ovo":
cls = cu_multiclass.OneVsOneClassifier(culog)
else:
cls = cu_multiclass.OneVsRestClassifier(culog)
cls.fit(X_train, y_train)
test_score = cls.score(X_test, y_test)
assert test_score > 0.7
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_random_forest.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import treelite
from sklearn.model_selection import train_test_split
from sklearn.datasets import (
fetch_california_housing,
make_classification,
make_regression,
load_iris,
load_breast_cancer,
)
from sklearn.metrics import (
accuracy_score,
mean_squared_error,
mean_tweedie_deviance,
)
from sklearn.ensemble import RandomForestRegressor as skrfr
from sklearn.ensemble import RandomForestClassifier as skrfc
import cuml.internals.logger as logger
from cuml.testing.utils import (
get_handle,
unit_param,
quality_param,
stress_param,
)
from cuml.metrics import r2_score
from cuml.ensemble import RandomForestRegressor as curfr
from cuml.ensemble import RandomForestClassifier as curfc
import cuml
from cuml.internals.safe_imports import gpu_only_import_from
import os
import json
import random
from cuml.internals.safe_imports import cpu_only_import
import pytest
import warnings
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
cuda = gpu_only_import_from("numba", "cuda")
pytestmark = pytest.mark.filterwarnings(
"ignore: For reproducible results(.*)" "::cuml[.*]"
)
@pytest.fixture(
scope="session",
params=[
unit_param({"n_samples": 350, "n_features": 20, "n_informative": 10}),
quality_param(
{"n_samples": 5000, "n_features": 200, "n_informative": 80}
),
stress_param(
{"n_samples": 500000, "n_features": 400, "n_informative": 180}
),
],
)
def small_clf(request):
X, y = make_classification(
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
n_clusters_per_class=1,
n_informative=request.param["n_informative"],
random_state=123,
n_classes=2,
)
return X, y
@pytest.fixture(
scope="session",
params=[
unit_param({"n_samples": 350, "n_features": 30, "n_informative": 15}),
quality_param(
{"n_samples": 5000, "n_features": 200, "n_informative": 80}
),
stress_param(
{"n_samples": 500000, "n_features": 400, "n_informative": 180}
),
],
)
def mclass_clf(request):
X, y = make_classification(
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
n_clusters_per_class=1,
n_informative=request.param["n_informative"],
random_state=123,
n_classes=10,
)
return X, y
@pytest.fixture(
scope="session",
params=[
unit_param({"n_samples": 500, "n_features": 20, "n_informative": 10}),
quality_param(
{"n_samples": 5000, "n_features": 200, "n_informative": 50}
),
stress_param(
{"n_samples": 500000, "n_features": 400, "n_informative": 100}
),
],
)
def large_clf(request):
X, y = make_classification(
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
n_clusters_per_class=1,
n_informative=request.param["n_informative"],
random_state=123,
n_classes=2,
)
return X, y
@pytest.fixture(
scope="session",
params=[
unit_param({"n_samples": 1500, "n_features": 20, "n_informative": 10}),
quality_param(
{"n_samples": 12000, "n_features": 200, "n_informative": 100}
),
stress_param(
{"n_samples": 500000, "n_features": 500, "n_informative": 350}
),
],
)
def large_reg(request):
X, y = make_regression(
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
n_informative=request.param["n_informative"],
random_state=123,
)
return X, y
special_reg_params = [
unit_param(
{
"mode": "unit",
"n_samples": 500,
"n_features": 20,
"n_informative": 10,
}
),
quality_param(
{
"mode": "quality",
"n_samples": 500,
"n_features": 20,
"n_informative": 10,
}
),
quality_param({"mode": "quality", "n_features": 200, "n_informative": 50}),
stress_param(
{
"mode": "stress",
"n_samples": 500,
"n_features": 20,
"n_informative": 10,
}
),
stress_param({"mode": "stress", "n_features": 200, "n_informative": 50}),
stress_param(
{
"mode": "stress",
"n_samples": 1000,
"n_features": 400,
"n_informative": 100,
}
),
]
@pytest.fixture(scope="session", params=special_reg_params)
def special_reg(request):
if request.param["mode"] == "quality":
X, y = fetch_california_housing(return_X_y=True)
else:
X, y = make_regression(
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
n_informative=request.param["n_informative"],
random_state=123,
)
return X, y
@pytest.mark.parametrize("max_depth", [2, 4])
@pytest.mark.parametrize(
"split_criterion", ["poisson", "gamma", "inverse_gaussian"]
)
def test_tweedie_convergence(max_depth, split_criterion):
np.random.seed(33)
bootstrap = None
max_features = 1.0
n_estimators = 1
min_impurity_decrease = 1e-5
n_datapoints = 1000
tweedie = {
"poisson": {"power": 1, "gen": np.random.poisson, "args": [0.01]},
"gamma": {"power": 2, "gen": np.random.gamma, "args": [2.0]},
"inverse_gaussian": {
"power": 3,
"gen": np.random.wald,
"args": [0.1, 2.0],
},
}
# generating random dataset with tweedie distribution
X = np.random.random((n_datapoints, 4)).astype(np.float32)
y = tweedie[split_criterion]["gen"](
*tweedie[split_criterion]["args"], size=n_datapoints
).astype(np.float32)
tweedie_preds = (
curfr(
split_criterion=split_criterion,
max_depth=max_depth,
n_estimators=n_estimators,
bootstrap=bootstrap,
max_features=max_features,
min_impurity_decrease=min_impurity_decrease,
)
.fit(X, y)
.predict(X)
)
mse_preds = (
curfr(
split_criterion=2,
max_depth=max_depth,
n_estimators=n_estimators,
bootstrap=bootstrap,
max_features=max_features,
min_impurity_decrease=min_impurity_decrease,
)
.fit(X, y)
.predict(X)
)
# y should not be non-positive for mean_poisson_deviance
mask = mse_preds > 0
mse_tweedie_deviance = mean_tweedie_deviance(
y[mask], mse_preds[mask], power=tweedie[split_criterion]["power"]
)
tweedie_tweedie_deviance = mean_tweedie_deviance(
y[mask], tweedie_preds[mask], power=tweedie[split_criterion]["power"]
)
# model trained on tweedie data with
# tweedie criterion must perform better on tweedie loss
assert mse_tweedie_deviance >= tweedie_tweedie_deviance
@pytest.mark.parametrize(
"max_samples", [unit_param(1.0), quality_param(0.90), stress_param(0.95)]
)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("max_features", [1.0, "auto", "log2", "sqrt"])
def test_rf_classification(small_clf, datatype, max_samples, max_features):
use_handle = True
X, y = small_clf
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc(
max_features=max_features,
max_samples=max_samples,
n_bins=16,
split_criterion=0,
min_samples_leaf=2,
random_state=123,
n_streams=1,
n_estimators=40,
handle=handle,
max_leaves=-1,
max_depth=16,
)
cuml_model.fit(X_train, y_train)
fil_preds = cuml_model.predict(
X_test, predict_model="GPU", threshold=0.5, algo="auto"
)
cu_preds = cuml_model.predict(X_test, predict_model="CPU")
fil_preds = np.reshape(fil_preds, np.shape(cu_preds))
cuml_acc = accuracy_score(y_test, cu_preds)
fil_acc = accuracy_score(y_test, fil_preds)
if X.shape[0] < 500000:
sk_model = skrfc(
n_estimators=40,
max_depth=16,
min_samples_split=2,
max_features=max_features,
random_state=10,
)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_acc = accuracy_score(y_test, sk_preds)
assert fil_acc >= (sk_acc - 0.07)
assert fil_acc >= (
cuml_acc - 0.07
) # to be changed to 0.02. see issue #3910: https://github.com/rapidsai/cuml/issues/3910 # noqa
@pytest.mark.parametrize(
"max_samples", [unit_param(1.0), quality_param(0.90), stress_param(0.95)]
)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
def test_rf_classification_unorder(
small_clf, datatype, max_samples, max_features=1, a=2, b=5
):
use_handle = True
X, y = small_clf
X = X.astype(datatype)
y = y.astype(np.int32)
# affine transformation
y = a * y + b
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc(
max_features=max_features,
max_samples=max_samples,
n_bins=16,
split_criterion=0,
min_samples_leaf=2,
random_state=123,
n_streams=1,
n_estimators=40,
handle=handle,
max_leaves=-1,
max_depth=16,
)
cuml_model.fit(X_train, y_train)
fil_preds = cuml_model.predict(
X_test, predict_model="GPU", threshold=0.5, algo="auto"
)
cu_preds = cuml_model.predict(X_test, predict_model="CPU")
fil_preds = np.reshape(fil_preds, np.shape(cu_preds))
cuml_acc = accuracy_score(y_test, cu_preds)
fil_acc = accuracy_score(y_test, fil_preds)
if X.shape[0] < 500000:
sk_model = skrfc(
n_estimators=40,
max_depth=16,
min_samples_split=2,
max_features=max_features,
random_state=10,
)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_acc = accuracy_score(y_test, sk_preds)
assert fil_acc >= (sk_acc - 0.07)
assert fil_acc >= (
cuml_acc - 0.07
) # to be changed to 0.02. see issue #3910: https://github.com/rapidsai/cuml/issues/3910 # noqa
@pytest.mark.parametrize(
"max_samples", [unit_param(1.0), quality_param(0.90), stress_param(0.95)]
)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"max_features,n_bins",
[
(1.0, 16),
(1.0, 11),
("auto", 128),
("log2", 100),
("sqrt", 100),
(1.0, 17),
(1.0, 32),
],
)
def test_rf_regression(
special_reg, datatype, max_features, max_samples, n_bins
):
use_handle = True
X, y = special_reg
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Initialize and fit using cuML's random forest regression model
cuml_model = curfr(
max_features=max_features,
max_samples=max_samples,
n_bins=n_bins,
split_criterion=2,
min_samples_leaf=2,
random_state=123,
n_streams=1,
n_estimators=50,
handle=handle,
max_leaves=-1,
max_depth=16,
accuracy_metric="mse",
)
cuml_model.fit(X_train, y_train)
# predict using FIL
fil_preds = cuml_model.predict(X_test, predict_model="GPU")
cu_preds = cuml_model.predict(X_test, predict_model="CPU")
fil_preds = np.reshape(fil_preds, np.shape(cu_preds))
cu_r2 = r2_score(y_test, cu_preds, convert_dtype=datatype)
fil_r2 = r2_score(y_test, fil_preds, convert_dtype=datatype)
# Initialize, fit and predict using
# sklearn's random forest regression model
if X.shape[0] < 1000: # mode != "stress"
sk_model = skrfr(
n_estimators=50,
max_depth=16,
min_samples_split=2,
max_features=max_features,
random_state=10,
)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_r2 = r2_score(y_test, sk_preds, convert_dtype=datatype)
assert fil_r2 >= (sk_r2 - 0.07)
assert fil_r2 >= (cu_r2 - 0.02)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
def test_rf_classification_seed(small_clf, datatype):
X, y = small_clf
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
for i in range(8):
seed = random.randint(100, 1e5)
# Initialize, fit and predict using cuML's
# random forest classification model
cu_class = curfc(random_state=seed, n_streams=1)
cu_class.fit(X_train, y_train)
# predict using FIL
fil_preds_orig = cu_class.predict(X_test, predict_model="GPU")
cu_preds_orig = cu_class.predict(X_test, predict_model="CPU")
cu_acc_orig = accuracy_score(y_test, cu_preds_orig)
fil_preds_orig = np.reshape(fil_preds_orig, np.shape(cu_preds_orig))
fil_acc_orig = accuracy_score(y_test, fil_preds_orig)
# Initialize, fit and predict using cuML's
# random forest classification model
cu_class2 = curfc(random_state=seed, n_streams=1)
cu_class2.fit(X_train, y_train)
# predict using FIL
fil_preds_rerun = cu_class2.predict(X_test, predict_model="GPU")
cu_preds_rerun = cu_class2.predict(X_test, predict_model="CPU")
cu_acc_rerun = accuracy_score(y_test, cu_preds_rerun)
fil_preds_rerun = np.reshape(fil_preds_rerun, np.shape(cu_preds_rerun))
fil_acc_rerun = accuracy_score(y_test, fil_preds_rerun)
assert fil_acc_orig == fil_acc_rerun
assert cu_acc_orig == cu_acc_rerun
assert (fil_preds_orig == fil_preds_rerun).all()
assert (cu_preds_orig == cu_preds_rerun).all()
@pytest.mark.parametrize(
"datatype", [(np.float64, np.float32), (np.float32, np.float64)]
)
@pytest.mark.parametrize("convert_dtype", [True, False])
@pytest.mark.filterwarnings("ignore:To use pickling(.*)::cuml[.*]")
def test_rf_classification_float64(small_clf, datatype, convert_dtype):
X, y = small_clf
X = X.astype(datatype[0])
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
X_test = X_test.astype(datatype[1])
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc()
cuml_model.fit(X_train, y_train)
cu_preds = cuml_model.predict(X_test, predict_model="CPU")
cu_acc = accuracy_score(y_test, cu_preds)
# sklearn random forest classification model
# initialization, fit and predict
if X.shape[0] < 500000:
sk_model = skrfc(max_depth=16, random_state=10)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_acc = accuracy_score(y_test, sk_preds)
assert cu_acc >= (sk_acc - 0.07)
# predict using cuML's GPU based prediction
fil_preds = cuml_model.predict(
X_test, predict_model="GPU", convert_dtype=convert_dtype
)
fil_preds = np.reshape(fil_preds, np.shape(cu_preds))
fil_acc = accuracy_score(y_test, fil_preds)
assert fil_acc >= (
cu_acc - 0.07
) # to be changed to 0.02. see issue #3910: https://github.com/rapidsai/cuml/issues/3910 # noqa
@pytest.mark.parametrize(
"datatype", [(np.float64, np.float32), (np.float32, np.float64)]
)
@pytest.mark.filterwarnings("ignore:To use pickling(.*)::cuml[.*]")
def test_rf_regression_float64(large_reg, datatype):
X, y = large_reg
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
X_train = X_train.astype(datatype[0])
y_train = y_train.astype(datatype[0])
X_test = X_test.astype(datatype[1])
y_test = y_test.astype(datatype[1])
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfr()
cuml_model.fit(X_train, y_train)
cu_preds = cuml_model.predict(X_test, predict_model="CPU")
cu_r2 = r2_score(y_test, cu_preds, convert_dtype=datatype[0])
# sklearn random forest classification model
# initialization, fit and predict
if X.shape[0] < 500000:
sk_model = skrfr(max_depth=16, random_state=10)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_r2 = r2_score(y_test, sk_preds, convert_dtype=datatype[0])
assert cu_r2 >= (sk_r2 - 0.09)
# predict using cuML's GPU based prediction
fil_preds = cuml_model.predict(
X_test, predict_model="GPU", convert_dtype=True
)
fil_preds = np.reshape(fil_preds, np.shape(cu_preds))
fil_r2 = r2_score(y_test, fil_preds, convert_dtype=datatype[0])
assert fil_r2 >= (cu_r2 - 0.02)
def check_predict_proba(test_proba, baseline_proba, y_test, rel_err):
y_proba = np.zeros(np.shape(baseline_proba))
for count, _class in enumerate(y_test):
y_proba[count, _class] = 1
baseline_mse = mean_squared_error(y_proba, baseline_proba)
test_mse = mean_squared_error(y_proba, test_proba)
# using relative error is more stable when changing decision tree
# parameters, column or class count
assert test_mse <= baseline_mse * (1.0 + rel_err)
def rf_classification(
datatype, array_type, max_features, max_samples, fixture
):
X, y = fixture
X = X.astype(datatype[0])
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
X_test = X_test.astype(datatype[1])
n_streams = 1
handle, stream = get_handle(True, n_streams=n_streams)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc(
max_features=max_features,
max_samples=max_samples,
n_bins=16,
split_criterion=0,
min_samples_leaf=2,
random_state=999,
n_estimators=40,
handle=handle,
max_leaves=-1,
max_depth=16,
n_streams=n_streams,
)
if array_type == "dataframe":
X_train_df = cudf.DataFrame(X_train)
y_train_df = cudf.Series(y_train)
X_test_df = cudf.DataFrame(X_test)
cuml_model.fit(X_train_df, y_train_df)
cu_proba_gpu = cuml_model.predict_proba(X_test_df).to_numpy()
cu_preds_cpu = cuml_model.predict(
X_test_df, predict_model="CPU"
).to_numpy()
cu_preds_gpu = cuml_model.predict(
X_test_df, predict_model="GPU"
).to_numpy()
else:
cuml_model.fit(X_train, y_train)
cu_proba_gpu = cuml_model.predict_proba(X_test)
cu_preds_cpu = cuml_model.predict(X_test, predict_model="CPU")
cu_preds_gpu = cuml_model.predict(X_test, predict_model="GPU")
np.testing.assert_array_equal(
cu_preds_gpu, np.argmax(cu_proba_gpu, axis=1)
)
cu_acc_cpu = accuracy_score(y_test, cu_preds_cpu)
cu_acc_gpu = accuracy_score(y_test, cu_preds_gpu)
assert cu_acc_cpu == pytest.approx(cu_acc_gpu, abs=0.01, rel=0.1)
# sklearn random forest classification model
# initialization, fit and predict
if y.size < 500000:
sk_model = skrfc(
n_estimators=40,
max_depth=16,
min_samples_split=2,
max_features=max_features,
random_state=10,
)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_acc = accuracy_score(y_test, sk_preds)
sk_proba = sk_model.predict_proba(X_test)
assert cu_acc_cpu >= sk_acc - 0.07
assert cu_acc_gpu >= sk_acc - 0.07
# 0.06 is the highest relative error observed on CI, within
# 0.0061 absolute error boundaries seen previously
check_predict_proba(cu_proba_gpu, sk_proba, y_test, 0.1)
@pytest.mark.parametrize("datatype", [(np.float32, np.float64)])
@pytest.mark.parametrize("array_type", ["dataframe", "numpy"])
def test_rf_classification_multi_class(mclass_clf, datatype, array_type):
rf_classification(datatype, array_type, 1.0, 1.0, mclass_clf)
@pytest.mark.parametrize("datatype", [(np.float32, np.float64)])
@pytest.mark.parametrize("max_samples", [unit_param(1.0), stress_param(0.95)])
@pytest.mark.parametrize("max_features", [1.0, "auto", "log2", "sqrt"])
def test_rf_classification_proba(
small_clf, datatype, max_samples, max_features
):
rf_classification(datatype, "numpy", max_features, max_samples, small_clf)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"fil_sparse_format", ["not_supported", True, "auto", False]
)
@pytest.mark.parametrize(
"algo", ["auto", "naive", "tree_reorg", "batch_tree_reorg"]
)
def test_rf_classification_sparse(
small_clf, datatype, fil_sparse_format, algo
):
use_handle = True
num_treees = 50
X, y = small_clf
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc(
n_bins=16,
split_criterion=0,
min_samples_leaf=2,
random_state=123,
n_streams=1,
n_estimators=num_treees,
handle=handle,
max_leaves=-1,
max_depth=40,
)
cuml_model.fit(X_train, y_train)
if (
not fil_sparse_format
or algo == "tree_reorg"
or algo == "batch_tree_reorg"
) or fil_sparse_format == "not_supported":
with pytest.raises(ValueError):
fil_preds = cuml_model.predict(
X_test,
predict_model="GPU",
threshold=0.5,
fil_sparse_format=fil_sparse_format,
algo=algo,
)
else:
fil_preds = cuml_model.predict(
X_test,
predict_model="GPU",
threshold=0.5,
fil_sparse_format=fil_sparse_format,
algo=algo,
)
fil_preds = np.reshape(fil_preds, np.shape(y_test))
fil_acc = accuracy_score(y_test, fil_preds)
np.testing.assert_almost_equal(
fil_acc, cuml_model.score(X_test, y_test)
)
fil_model = cuml_model.convert_to_fil_model()
with cuml.using_output_type("numpy"):
fil_model_preds = fil_model.predict(X_test)
fil_model_acc = accuracy_score(y_test, fil_model_preds)
assert fil_acc == fil_model_acc
tl_model = cuml_model.convert_to_treelite_model()
assert num_treees == tl_model.num_trees
assert X.shape[1] == tl_model.num_features
if X.shape[0] < 500000:
sk_model = skrfc(
n_estimators=50,
max_depth=40,
min_samples_split=2,
random_state=10,
)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_acc = accuracy_score(y_test, sk_preds)
assert fil_acc >= (sk_acc - 0.07)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"fil_sparse_format", ["not_supported", True, "auto", False]
)
@pytest.mark.parametrize(
"algo", ["auto", "naive", "tree_reorg", "batch_tree_reorg"]
)
def test_rf_regression_sparse(special_reg, datatype, fil_sparse_format, algo):
use_handle = True
num_treees = 50
X, y = special_reg
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Initialize and fit using cuML's random forest regression model
cuml_model = curfr(
n_bins=16,
split_criterion=2,
min_samples_leaf=2,
random_state=123,
n_streams=1,
n_estimators=num_treees,
handle=handle,
max_leaves=-1,
max_depth=40,
accuracy_metric="mse",
)
cuml_model.fit(X_train, y_train)
# predict using FIL
if (
not fil_sparse_format
or algo == "tree_reorg"
or algo == "batch_tree_reorg"
) or fil_sparse_format == "not_supported":
with pytest.raises(ValueError):
fil_preds = cuml_model.predict(
X_test,
predict_model="GPU",
fil_sparse_format=fil_sparse_format,
algo=algo,
)
else:
fil_preds = cuml_model.predict(
X_test,
predict_model="GPU",
fil_sparse_format=fil_sparse_format,
algo=algo,
)
fil_preds = np.reshape(fil_preds, np.shape(y_test))
fil_r2 = r2_score(y_test, fil_preds, convert_dtype=datatype)
fil_model = cuml_model.convert_to_fil_model()
with cuml.using_output_type("numpy"):
fil_model_preds = fil_model.predict(X_test)
fil_model_preds = np.reshape(fil_model_preds, np.shape(y_test))
fil_model_r2 = r2_score(
y_test, fil_model_preds, convert_dtype=datatype
)
assert fil_r2 == fil_model_r2
tl_model = cuml_model.convert_to_treelite_model()
assert num_treees == tl_model.num_trees
assert X.shape[1] == tl_model.num_features
# Initialize, fit and predict using
# sklearn's random forest regression model
if X.shape[0] < 1000: # mode != "stress":
sk_model = skrfr(
n_estimators=50,
max_depth=40,
min_samples_split=2,
random_state=10,
)
sk_model.fit(X_train, y_train)
sk_preds = sk_model.predict(X_test)
sk_r2 = r2_score(y_test, sk_preds, convert_dtype=datatype)
assert fil_r2 >= (sk_r2 - 0.07)
@pytest.mark.xfail(reason="Need rapidsai/rmm#415 to detect memleak robustly")
@pytest.mark.memleak
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("fil_sparse_format", [True, False, "auto"])
@pytest.mark.parametrize(
"n_iter", [unit_param(5), quality_param(30), stress_param(80)]
)
def test_rf_memory_leakage(small_clf, datatype, fil_sparse_format, n_iter):
use_handle = True
X, y = small_clf
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=1)
# Warmup. Some modules that are used in RF allocate space on the device
# and consume memory. This is to make sure that the allocation is done
# before the first call to get_memory_info.
base_model = curfc(handle=handle)
base_model.fit(X_train, y_train)
handle.sync() # just to be sure
free_mem = cuda.current_context().get_memory_info()[0]
def test_for_memory_leak():
cuml_mods = curfc(handle=handle)
cuml_mods.fit(X_train, y_train)
handle.sync() # just to be sure
# Calculate the memory free after fitting the cuML model
delta_mem = free_mem - cuda.current_context().get_memory_info()[0]
assert delta_mem == 0
for i in range(2):
cuml_mods.predict(
X_test,
predict_model="GPU",
fil_sparse_format=fil_sparse_format,
)
handle.sync() # just to be sure
# Calculate the memory free after predicting the cuML model
delta_mem = free_mem - cuda.current_context().get_memory_info()[0]
assert delta_mem == 0
for i in range(n_iter):
test_for_memory_leak()
@pytest.mark.parametrize("max_features", [1.0, "auto", "log2", "sqrt"])
@pytest.mark.parametrize("max_depth", [10, 13, 16])
@pytest.mark.parametrize("n_estimators", [10, 20, 100])
@pytest.mark.parametrize("n_bins", [8, 9, 10])
def test_create_classification_model(
max_features, max_depth, n_estimators, n_bins
):
# random forest classification model
cuml_model = curfc(
max_features=max_features,
n_bins=n_bins,
n_estimators=n_estimators,
max_depth=max_depth,
)
params = cuml_model.get_params()
cuml_model2 = curfc()
cuml_model2.set_params(**params)
verfiy_params = cuml_model2.get_params()
assert params["max_features"] == verfiy_params["max_features"]
assert params["max_depth"] == verfiy_params["max_depth"]
assert params["n_estimators"] == verfiy_params["n_estimators"]
assert params["n_bins"] == verfiy_params["n_bins"]
@pytest.mark.parametrize("n_estimators", [10, 20, 100])
@pytest.mark.parametrize("n_bins", [8, 9, 10])
def test_multiple_fits_classification(large_clf, n_estimators, n_bins):
datatype = np.float32
X, y = large_clf
X = X.astype(datatype)
y = y.astype(np.int32)
cuml_model = curfc(n_bins=n_bins, n_estimators=n_estimators, max_depth=10)
# Calling multiple fits
cuml_model.fit(X, y)
cuml_model.fit(X, y)
# Check if params are still intact
params = cuml_model.get_params()
assert params["n_estimators"] == n_estimators
assert params["n_bins"] == n_bins
@pytest.mark.parametrize(
"column_info",
[
unit_param([100, 50]),
quality_param([200, 100]),
stress_param([500, 350]),
],
)
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize("n_estimators", [10, 20, 100])
@pytest.mark.parametrize("n_bins", [8, 9, 10])
def test_multiple_fits_regression(column_info, nrows, n_estimators, n_bins):
datatype = np.float32
ncols, n_info = column_info
X, y = make_regression(
n_samples=nrows,
n_features=ncols,
n_informative=n_info,
random_state=123,
)
X = X.astype(datatype)
y = y.astype(np.int32)
cuml_model = curfr(n_bins=n_bins, n_estimators=n_estimators, max_depth=10)
# Calling multiple fits
cuml_model.fit(X, y)
cuml_model.fit(X, y)
cuml_model.fit(X, y)
# Check if params are still intact
params = cuml_model.get_params()
assert params["n_estimators"] == n_estimators
assert params["n_bins"] == n_bins
@pytest.mark.parametrize("n_estimators", [5, 10, 20])
@pytest.mark.parametrize("detailed_text", [True, False])
def test_rf_get_text(n_estimators, detailed_text):
X, y = make_classification(
n_samples=500,
n_features=10,
n_clusters_per_class=1,
n_informative=5,
random_state=94929,
n_classes=2,
)
X = X.astype(np.float32)
y = y.astype(np.int32)
# Create a handle for the cuml model
handle, stream = get_handle(True, n_streams=1)
# Initialize cuML Random Forest classification model
cuml_model = curfc(
handle=handle,
max_features=1.0,
max_samples=1.0,
n_bins=16,
split_criterion=0,
min_samples_leaf=2,
random_state=23707,
n_streams=1,
n_estimators=n_estimators,
max_leaves=-1,
max_depth=16,
)
# Train model on the data
cuml_model.fit(X, y)
if detailed_text:
text_output = cuml_model.get_detailed_text()
else:
text_output = cuml_model.get_summary_text()
# Test 1: Output is non-zero
assert "" != text_output
# Count the number of trees printed
tree_count = 0
for line in text_output.split("\n"):
if line.strip().startswith("Tree #"):
tree_count += 1
# Test 2: Correct number of trees are printed
assert n_estimators == tree_count
@pytest.mark.parametrize("max_depth", [1, 2, 3, 5, 10, 15, 20])
@pytest.mark.parametrize("n_estimators", [5, 10, 20])
@pytest.mark.parametrize("estimator_type", ["regression", "classification"])
def test_rf_get_json(estimator_type, max_depth, n_estimators):
X, y = make_classification(
n_samples=350,
n_features=20,
n_clusters_per_class=1,
n_informative=10,
random_state=123,
n_classes=2,
)
X = X.astype(np.float32)
if estimator_type == "classification":
cuml_model = curfc(
max_features=1.0,
max_samples=1.0,
n_bins=16,
split_criterion=0,
min_samples_leaf=2,
random_state=23707,
n_streams=1,
n_estimators=n_estimators,
max_leaves=-1,
max_depth=max_depth,
)
y = y.astype(np.int32)
elif estimator_type == "regression":
cuml_model = curfr(
max_features=1.0,
max_samples=1.0,
n_bins=16,
min_samples_leaf=2,
random_state=23707,
n_streams=1,
n_estimators=n_estimators,
max_leaves=-1,
max_depth=max_depth,
)
y = y.astype(np.float32)
else:
assert False
# Train model on the data
cuml_model.fit(X, y)
json_out = cuml_model.get_json()
json_obj = json.loads(json_out)
# Test 1: Output is non-zero
assert "" != json_out
# Test 2: JSON object contains correct number of trees
assert isinstance(json_obj, list)
assert len(json_obj) == n_estimators
# Test 3: Traverse JSON trees and get the same predictions as cuML RF
def predict_with_json_tree(tree, x):
if "children" not in tree:
assert "leaf_value" in tree
return tree["leaf_value"]
assert "split_feature" in tree
assert "split_threshold" in tree
assert "yes" in tree
assert "no" in tree
if x[tree["split_feature"]] <= tree["split_threshold"] + 1e-5:
return predict_with_json_tree(tree["children"][0], x)
return predict_with_json_tree(tree["children"][1], x)
def predict_with_json_rf_classifier(rf, x):
# Returns the class with the highest vote. If there is a tie, return
# the list of all classes with the highest vote.
predictions = []
for tree in rf:
predictions.append(np.array(predict_with_json_tree(tree, x)))
predictions = np.sum(predictions, axis=0)
return np.argmax(predictions)
def predict_with_json_rf_regressor(rf, x):
pred = 0.0
for tree in rf:
pred += predict_with_json_tree(tree, x)[0]
return pred / len(rf)
if estimator_type == "classification":
expected_pred = cuml_model.predict(X).astype(np.int32)
for idx, row in enumerate(X):
majority_vote = predict_with_json_rf_classifier(json_obj, row)
assert expected_pred[idx] == majority_vote
elif estimator_type == "regression":
expected_pred = cuml_model.predict(X).astype(np.float32)
pred = []
for idx, row in enumerate(X):
pred.append(predict_with_json_rf_regressor(json_obj, row))
pred = np.array(pred, dtype=np.float32)
print(json_obj)
for i in range(len(pred)):
assert np.isclose(pred[i], expected_pred[i]), X[i, 19]
np.testing.assert_almost_equal(pred, expected_pred, decimal=6)
@pytest.mark.xfail(
reason="Needs refactoring/debugging due to sporadic failures"
"https://github.com/rapidsai/cuml/issues/5528"
)
@pytest.mark.memleak
@pytest.mark.parametrize("estimator_type", ["classification"])
def test_rf_host_memory_leak(large_clf, estimator_type):
import gc
import os
try:
import psutil
except ImportError:
pytest.skip("psutil not installed")
process = psutil.Process(os.getpid())
X, y = large_clf
X = X.astype(np.float32)
params = {"max_depth": 50}
if estimator_type == "classification":
base_model = curfc(max_depth=10, n_estimators=100, random_state=123)
y = y.astype(np.int32)
else:
base_model = curfr(max_depth=10, n_estimators=100, random_state=123)
y = y.astype(np.float32)
# Pre-fit once - this is our baseline and memory usage
# should not significantly exceed it after later fits
base_model.fit(X, y)
gc.collect()
initial_baseline_mem = process.memory_info().rss
for i in range(5):
base_model.fit(X, y)
base_model.set_params(**params)
gc.collect()
final_mem = process.memory_info().rss
# Some tiny allocations may occur, but we should not leak
# without bounds, which previously happened
assert (final_mem - initial_baseline_mem) < 2.4e6
@pytest.mark.xfail(
reason="Needs refactoring/debugging due to sporadic failures"
"https://github.com/rapidsai/cuml/issues/5528"
)
@pytest.mark.memleak
@pytest.mark.parametrize("estimator_type", ["regression", "classification"])
@pytest.mark.parametrize("i", list(range(100)))
def test_concat_memory_leak(large_clf, estimator_type, i):
import gc
import os
try:
import psutil
except ImportError:
pytest.skip("psutil not installed")
process = psutil.Process(os.getpid())
X, y = large_clf
X = X.astype(np.float32)
# Build a series of RF models
n_models = 10
if estimator_type == "classification":
base_models = [
curfc(max_depth=10, n_estimators=100, random_state=123)
for i in range(n_models)
]
y = y.astype(np.int32)
elif estimator_type == "regression":
base_models = [
curfr(max_depth=10, n_estimators=100, random_state=123)
for i in range(n_models)
]
y = y.astype(np.float32)
else:
assert False
# Pre-fit once - this is our baseline and memory usage
# should not significantly exceed it after later fits
for model in base_models:
model.fit(X, y)
# Just concatenate over and over in a loop
concat_models = base_models[1:]
init_model = base_models[0]
other_handles = [
model._obtain_treelite_handle() for model in concat_models
]
init_model._concatenate_treelite_handle(other_handles)
gc.collect()
initial_baseline_mem = process.memory_info().rss
for i in range(10):
init_model._concatenate_treelite_handle(other_handles)
gc.collect()
used_mem = process.memory_info().rss
logger.debug(
"memory at rep %2d: %d m"
% (i, (used_mem - initial_baseline_mem) / 1e6)
)
gc.collect()
used_mem = process.memory_info().rss
logger.info(
"Final memory delta: %d" % ((used_mem - initial_baseline_mem) / 1e6)
)
# increasing margin to avoid very infrequent failures
assert (used_mem - initial_baseline_mem) < 1.1e6
def test_rf_nbins_small(small_clf):
X, y = small_clf
X = X.astype(np.float32)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc()
# display warning when nbins less than samples
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cuml_model.fit(X_train[0:3, :], y_train[0:3])
assert (
"The number of bins, `n_bins` is greater than "
"the number of samples used for training. "
"Changing `n_bins` to number of training samples."
in str(w[-1].message)
)
@pytest.mark.parametrize("split_criterion", [2], ids=["mse"])
def test_rf_regression_with_identical_labels(split_criterion):
X = np.array([[-1, 0], [0, 1], [2, 0], [0, 3], [-2, 0]], dtype=np.float32)
y = np.array([1, 1, 1, 1, 1], dtype=np.float32)
# Degenerate case: all labels are identical.
# RF Regressor must not create any split. It must yield an empty tree
# with only the root node.
clf = curfr(
max_features=1.0,
max_samples=1.0,
n_bins=5,
bootstrap=False,
split_criterion=split_criterion,
min_samples_leaf=1,
min_samples_split=2,
random_state=0,
n_streams=1,
n_estimators=1,
max_depth=1,
)
clf.fit(X, y)
model_dump = json.loads(clf.get_json())
assert len(model_dump) == 1
expected_dump = {"nodeid": 0, "leaf_value": [1.0], "instance_count": 5}
assert model_dump[0] == expected_dump
def test_rf_regressor_gtil_integration(tmpdir):
X, y = fetch_california_housing(return_X_y=True)
X, y = X.astype(np.float32), y.astype(np.float32)
clf = curfr(max_depth=3, random_state=0, n_estimators=10)
clf.fit(X, y)
expected_pred = clf.predict(X)
checkpoint_path = os.path.join(tmpdir, "checkpoint.tl")
clf.convert_to_treelite_model().to_treelite_checkpoint(checkpoint_path)
tl_model = treelite.Model.deserialize(checkpoint_path)
out_pred = treelite.gtil.predict(tl_model, X)
np.testing.assert_almost_equal(out_pred, expected_pred, decimal=5)
def test_rf_binary_classifier_gtil_integration(tmpdir):
X, y = load_breast_cancer(return_X_y=True)
X, y = X.astype(np.float32), y.astype(np.int32)
clf = curfc(max_depth=3, random_state=0, n_estimators=10)
clf.fit(X, y)
expected_pred = clf.predict(X)
checkpoint_path = os.path.join(tmpdir, "checkpoint.tl")
clf.convert_to_treelite_model().to_treelite_checkpoint(checkpoint_path)
tl_model = treelite.Model.deserialize(checkpoint_path)
out_pred = treelite.gtil.predict(tl_model, X)
np.testing.assert_almost_equal(out_pred, expected_pred, decimal=5)
def test_rf_multiclass_classifier_gtil_integration(tmpdir):
X, y = load_iris(return_X_y=True)
X, y = X.astype(np.float32), y.astype(np.int32)
clf = curfc(max_depth=3, random_state=0, n_estimators=10)
clf.fit(X, y)
expected_prob = clf.predict_proba(X)
checkpoint_path = os.path.join(tmpdir, "checkpoint.tl")
clf.convert_to_treelite_model().to_treelite_checkpoint(checkpoint_path)
tl_model = treelite.Model.deserialize(checkpoint_path)
out_prob = treelite.gtil.predict(tl_model, X, pred_margin=True)
np.testing.assert_almost_equal(out_prob, expected_prob, decimal=5)
@pytest.mark.parametrize(
"estimator, make_data",
[
(curfc, make_classification),
(curfr, make_regression),
],
)
def test_rf_min_samples_split_with_small_float(estimator, make_data):
# Check that min_samples leaf is works with a small float
# Non-regression test for gh-4613
X, y = make_data(random_state=0)
clf = estimator(min_samples_split=0.0001, random_state=0, n_estimators=2)
# Does not error
clf.fit(X, y)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_doctest.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
import pytest
import contextlib
import doctest
import inspect
import io
import cuml
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cudf = gpu_only_import("cudf")
def _name_in_all(parent, name):
return name in getattr(parent, "__all__", [])
def _is_public_name(parent, name):
return not name.startswith("_")
def _find_doctests_in_obj(obj, finder=None, criteria=None):
"""Find all doctests in an object.
Parameters
----------
obj : module or class
The object to search for docstring examples.
finder : doctest.DocTestFinder, optional
The DocTestFinder object to use. If not provided, a DocTestFinder is
constructed.
criteria : callable, optional
Callable indicating whether to recurse over members of the provided
object. If not provided, names not defined in the object's ``__all__``
property are ignored.
Yields
------
doctest.DocTest
The next doctest found in the object.
"""
if finder is None:
finder = doctest.DocTestFinder()
if criteria is None:
criteria = _name_in_all
for docstring in finder.find(obj):
if docstring.examples:
yield docstring
for name, member in inspect.getmembers(obj):
# Only recurse over members matching the criteria
if not criteria(obj, name):
continue
# Recurse over the public API of modules (objects defined in the
# module's __all__)
if inspect.ismodule(member):
yield from _find_doctests_in_obj(
member, finder, criteria=_name_in_all
)
# Recurse over the public API of classes (attributes not prefixed with
# an underscore)
if inspect.isclass(member):
# Temporary hack to get tests to pass
if "MultinomialNB" in member.__name__:
continue
if "RandomForestClassifier" in member.__name__:
continue
if "Lasso" in member.__name__:
continue
if "ElasticNet" in member.__name__:
continue
yield from _find_doctests_in_obj(
member, finder, criteria=_is_public_name
)
if inspect.isfunction(member):
yield from _find_doctests_in_obj(member, finder)
@pytest.mark.parametrize(
"docstring",
_find_doctests_in_obj(cuml),
ids=lambda docstring: docstring.name,
)
def test_docstring(docstring):
# We ignore differences in whitespace in the doctest output, and enable
# the use of an ellipsis "..." to match any string in the doctest
# output. An ellipsis is useful for, e.g., memory addresses or
# imprecise floating point values.
if docstring.name == "Handle":
pytest.skip("Docstring is tested in RAFT.")
optionflags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
runner = doctest.DocTestRunner(optionflags=optionflags)
# These global names are pre-defined and can be used in doctests
# without first importing them.
globals = dict(cudf=cudf, np=np, cuml=cuml)
docstring.globs = globals
# Capture stdout and include failing outputs in the traceback.
doctest_stdout = io.StringIO()
with contextlib.redirect_stdout(doctest_stdout):
runner.run(docstring)
results = runner.summarize()
try:
assert not results.failed, (
f"{results.failed} of {results.attempted} doctests failed for "
f"{docstring.name}:\n{doctest_stdout.getvalue()}"
)
except AssertionError:
# If some failed but all the failures were due to lack of multiGPU
# support, we can skip. This code assumes that any MG-related failures
# mean that all doctests failed for that reason, which is heavy-handed
# and could miss a few things but is much easier than trying to
# identify every line corresponding to any Exception raised.
if (
"cuML has not been built with multiGPU support"
in doctest_stdout.getvalue()
):
pytest.skip("Doctest requires MG support.")
raise
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_array_sparse.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.internals.array import CumlArray
from cuml.internals.array_sparse import SparseCumlArray
from cuml.internals.safe_imports import cpu_only_import
scipy_sparse = cpu_only_import("scipy.sparse")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
test_input_types = ["cupy", "scipy"]
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("sparse_format", ["csr", "coo", "csc"])
@pytest.mark.parametrize("dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("convert_format", [True, False])
def test_input(input_type, sparse_format, dtype, convert_format):
rand_func = cupyx.scipy.sparse if input_type == "cupy" else scipy_sparse
X = rand_func.random(
100, 100, format=sparse_format, density=0.5, dtype=dtype
)
if convert_format or sparse_format == "csr":
X_m = SparseCumlArray(X, convert_format=convert_format)
assert X.shape == X_m.shape
assert X.nnz == X_m.nnz
assert X.dtype == X_m.dtype
# Just a sanity check
assert isinstance(X_m.indptr, CumlArray)
assert isinstance(X_m.indices, CumlArray)
assert isinstance(X_m.data, CumlArray)
assert X_m.indptr.dtype == cp.int32
assert X_m.indices.dtype == cp.int32
assert X_m.data.dtype == dtype
elif not convert_format:
with pytest.raises(ValueError):
SparseCumlArray(X, convert_format=convert_format)
def test_nonsparse_input_fails():
X = cp.random.random((100, 100))
with pytest.raises(ValueError):
SparseCumlArray(X)
@pytest.mark.parametrize("input_type", test_input_types)
def test_convert_to_dtype(input_type):
rand_func = cupyx.scipy.sparse if input_type == "cupy" else scipy_sparse
X = rand_func.random(100, 100, format="csr", density=0.5, dtype=cp.float64)
X_m = SparseCumlArray(X, convert_to_dtype=cp.float32)
assert X_m.dtype == cp.float32
assert X_m.indptr.dtype == cp.int32
assert X_m.indices.dtype == cp.int32
assert X_m.data.dtype == cp.float32
X_m = SparseCumlArray(X)
assert X_m.dtype == X.dtype
@pytest.mark.parametrize("input_type", test_input_types)
def test_convert_index(input_type):
rand_func = cupyx.scipy.sparse if input_type == "cupy" else scipy_sparse
X = rand_func.random(100, 100, format="csr", density=0.5, dtype=cp.float64)
# Will convert to 32-bit by default
X.indptr = X.indptr.astype(cp.int64)
X.indices = X.indices.astype(cp.int64)
X_m = SparseCumlArray(X)
assert X_m.indptr.dtype == cp.int32
assert X_m.indices.dtype == cp.int32
X_m = SparseCumlArray(X, convert_index=cp.int64)
assert X_m.indptr.dtype == cp.int64
assert X_m.indices.dtype == cp.int64
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
@pytest.mark.parametrize("output_type", test_input_types)
@pytest.mark.parametrize("output_format", [None, "coo", "csc"])
def test_output(input_type, output_type, dtype, output_format):
rand_func = cupyx.scipy.sparse if input_type == "cupy" else scipy_sparse
X = rand_func.random(100, 100, format="csr", density=0.5, dtype=dtype)
X_m = SparseCumlArray(X)
output = X_m.to_output(output_type, output_format=output_format)
if output_type == "scipy":
if output_format is None:
assert isinstance(output, scipy_sparse.csr_matrix)
elif output_format == "coo":
assert isinstance(output, scipy_sparse.coo_matrix)
elif output_format == "csc":
assert isinstance(output, scipy_sparse.csc_matrix)
else:
pytest.fail("unecpected output format")
else:
if output_format is None:
assert isinstance(output, cupyx.scipy.sparse.csr_matrix)
elif output_format == "coo":
assert isinstance(output, cupyx.scipy.sparse.coo_matrix)
elif output_format == "csc":
assert isinstance(output, cupyx.scipy.sparse.csc_matrix)
else:
pytest.fail("unecpected output format")
cp.testing.assert_array_equal(X.todense(), output.todense())
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_solver_attributes.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuml.linear_model import MBSGDRegressor as cumlMBSGRegressor
from cuml.linear_model import MBSGDClassifier as cumlMBSGClassifier
from cuml import LogisticRegression as cuLog
from cuml.linear_model import ElasticNet as cumlElastic
from cuml.linear_model import Lasso as cumlLasso
from cuml.datasets import make_blobs
def test_mbsgd_regressor_attributes():
X, y = make_blobs()
clf = cumlMBSGRegressor()
clf.fit(X, y)
attrs = [
"dtype",
"solver_model",
"coef_",
"intercept_",
"l1_ratio",
"n_cols",
"loss",
"eta0",
"batch_size",
"epochs",
]
for attr in attrs:
assert hasattr(clf, attr)
def test_logistic_regression_attributes():
X, y = make_blobs()
clf = cuLog().fit(X, y, convert_dtype=True)
attrs = [
"dtype",
"solver_model",
"coef_",
"intercept_",
"l1_ratio",
"n_cols",
"C",
"penalty",
"fit_intercept",
"solver",
]
for attr in attrs:
assert hasattr(clf, attr)
def test_mbsgd_classifier_attributes():
X, y = make_blobs()
clf = cumlMBSGClassifier()
clf.fit(X, y)
attrs = [
"dtype",
"solver_model",
"coef_",
"intercept_",
"l1_ratio",
"n_cols",
"eta0",
"batch_size",
"fit_intercept",
"penalty",
]
for attr in attrs:
assert hasattr(clf, attr)
def test_elastic_net_attributes():
X, y = make_blobs()
clf = cumlElastic(fit_intercept=False)
clf.fit(X, y)
attrs = [
"dtype",
"solver_model",
"coef_",
"intercept_",
"l1_ratio",
"n_cols",
"alpha",
"max_iter",
"fit_intercept",
]
for attr in attrs:
assert hasattr(clf, attr)
def test_lasso_attributes():
X, y = make_blobs()
clf = cumlLasso()
clf.fit(X, y)
attrs = [
"dtype",
"solver_model",
"coef_",
"intercept_",
"solver_model",
"l1_ratio",
"n_cols",
]
for attr in attrs:
assert hasattr(clf, attr)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_compose.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.test_preproc_utils import assert_allclose
from sklearn.preprocessing import (
StandardScaler as skStandardScaler,
Normalizer as skNormalizer,
PolynomialFeatures as skPolynomialFeatures,
OneHotEncoder as skOneHotEncoder,
)
from cuml.preprocessing import (
StandardScaler as cuStandardScaler,
Normalizer as cuNormalizer,
PolynomialFeatures as cuPolynomialFeatures,
OneHotEncoder as cuOneHotEncoder,
)
from sklearn.compose import (
ColumnTransformer as skColumnTransformer,
make_column_transformer as sk_make_column_transformer,
make_column_selector as sk_make_column_selector,
)
from cuml.compose import (
ColumnTransformer as cuColumnTransformer,
make_column_transformer as cu_make_column_transformer,
make_column_selector as cu_make_column_selector,
)
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.safe_imports import cpu_only_import_from
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.internals.safe_imports import gpu_only_import
from cuml.testing.test_preproc_utils import (
clf_dataset,
sparse_clf_dataset,
) # noqa: F401
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
pdDataFrame = cpu_only_import_from("pandas", "DataFrame")
cuDataFrame = gpu_only_import_from("cudf", "DataFrame")
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
@pytest.mark.parametrize(
"transformer_weights", [None, {"scaler": 2.4, "normalizer": 1.8}]
)
def test_column_transformer(
clf_dataset, remainder, transformer_weights # noqa: F811
):
X_np, X = clf_dataset
sk_selec1 = [0, 2]
sk_selec2 = [1, 3]
cu_selec1 = sk_selec1
cu_selec2 = sk_selec2
if isinstance(X, (pdDataFrame, cuDataFrame)):
cu_selec1 = ["c" + str(i) for i in sk_selec1]
cu_selec2 = ["c" + str(i) for i in sk_selec2]
cu_transformers = [
("scaler", cuStandardScaler(), cu_selec1),
("normalizer", cuNormalizer(), cu_selec2),
]
transformer = cuColumnTransformer(
cu_transformers,
remainder=remainder,
transformer_weights=transformer_weights,
)
ft_X = transformer.fit_transform(X)
t_X = transformer.transform(X)
assert type(t_X) == type(X)
sk_transformers = [
("scaler", skStandardScaler(), sk_selec1),
("normalizer", skNormalizer(), sk_selec2),
]
transformer = skColumnTransformer(
sk_transformers,
remainder=remainder,
transformer_weights=transformer_weights,
)
sk_t_X = transformer.fit_transform(X_np)
assert_allclose(ft_X, sk_t_X)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
@pytest.mark.parametrize(
"transformer_weights", [None, {"scaler": 2.4, "normalizer": 1.8}]
)
@pytest.mark.parametrize("sparse_threshold", [0.2, 0.8])
def test_column_transformer_sparse(
sparse_clf_dataset,
remainder, # noqa: F811
transformer_weights,
sparse_threshold,
):
X_np, X = sparse_clf_dataset
if X.format == "csc":
pytest.xfail()
dataset_density = X.nnz / X.size
cu_transformers = [
("scaler", cuStandardScaler(with_mean=False), [0, 2]),
("normalizer", cuNormalizer(), [1, 3]),
]
transformer = cuColumnTransformer(
cu_transformers,
remainder=remainder,
transformer_weights=transformer_weights,
sparse_threshold=sparse_threshold,
)
ft_X = transformer.fit_transform(X)
t_X = transformer.transform(X)
if dataset_density < sparse_threshold:
# Sparse input -> sparse output if dataset_density > sparse_threshold
# else sparse input -> dense output
assert type(t_X) == type(X)
sk_transformers = [
("scaler", skStandardScaler(with_mean=False), [0, 2]),
("normalizer", skNormalizer(), [1, 3]),
]
transformer = skColumnTransformer(
sk_transformers,
remainder=remainder,
transformer_weights=transformer_weights,
sparse_threshold=sparse_threshold,
)
sk_t_X = transformer.fit_transform(X_np)
assert_allclose(ft_X, sk_t_X)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
def test_make_column_transformer(clf_dataset, remainder): # noqa: F811
X_np, X = clf_dataset
sk_selec1 = [0, 2]
sk_selec2 = [1, 3]
cu_selec1 = sk_selec1
cu_selec2 = sk_selec2
if isinstance(X, (pdDataFrame, cuDataFrame)):
cu_selec1 = ["c" + str(i) for i in sk_selec1]
cu_selec2 = ["c" + str(i) for i in sk_selec2]
transformer = cu_make_column_transformer(
(cuStandardScaler(), cu_selec1),
(cuNormalizer(), cu_selec2),
remainder=remainder,
)
ft_X = transformer.fit_transform(X)
t_X = transformer.transform(X)
assert type(t_X) == type(X)
transformer = sk_make_column_transformer(
(skStandardScaler(), sk_selec1),
(skNormalizer(), sk_selec2),
remainder=remainder,
)
sk_t_X = transformer.fit_transform(X_np)
assert_allclose(ft_X, sk_t_X)
assert_allclose(t_X, sk_t_X)
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
@pytest.mark.parametrize("sparse_threshold", [0.2, 0.8])
def test_make_column_transformer_sparse(
sparse_clf_dataset, remainder, sparse_threshold # noqa: F811
):
X_np, X = sparse_clf_dataset
if X.format == "csc":
pytest.xfail()
dataset_density = X.nnz / X.size
transformer = cu_make_column_transformer(
(cuStandardScaler(with_mean=False), [0, 2]),
(cuNormalizer(), [1, 3]),
remainder=remainder,
sparse_threshold=sparse_threshold,
)
ft_X = transformer.fit_transform(X)
t_X = transformer.transform(X)
if dataset_density < sparse_threshold:
# Sparse input -> sparse output if dataset_density > sparse_threshold
# else sparse input -> dense output
assert type(t_X) == type(X)
transformer = sk_make_column_transformer(
(skStandardScaler(with_mean=False), [0, 2]),
(skNormalizer(), [1, 3]),
remainder=remainder,
sparse_threshold=sparse_threshold,
)
sk_t_X = transformer.fit_transform(X_np)
assert_allclose(ft_X, sk_t_X)
assert_allclose(t_X, sk_t_X)
@pytest.mark.skip(
reason="scikit-learn replaced get_feature_names with "
"get_feature_names_out"
"https://github.com/rapidsai/cuml/issues/5159"
)
def test_column_transformer_get_feature_names(clf_dataset): # noqa: F811
X_np, X = clf_dataset
cu_transformers = [("PolynomialFeatures", cuPolynomialFeatures(), [0, 2])]
transformer = cuColumnTransformer(cu_transformers)
transformer.fit_transform(X)
cu_feature_names = transformer.get_feature_names()
sk_transformers = [("PolynomialFeatures", skPolynomialFeatures(), [0, 2])]
transformer = skColumnTransformer(sk_transformers)
transformer.fit_transform(X_np)
sk_feature_names = transformer.get_feature_names()
assert cu_feature_names == sk_feature_names
def test_column_transformer_named_transformers_(clf_dataset): # noqa: F811
X_np, X = clf_dataset
cu_transformers = [("PolynomialFeatures", cuPolynomialFeatures(), [0, 2])]
transformer = cuColumnTransformer(cu_transformers)
transformer.fit_transform(X)
cu_named_transformers = transformer.named_transformers_
sk_transformers = [("PolynomialFeatures", skPolynomialFeatures(), [0, 2])]
transformer = skColumnTransformer(sk_transformers)
transformer.fit_transform(X_np)
sk_named_transformers = transformer.named_transformers_
assert cu_named_transformers.keys() == sk_named_transformers.keys()
def test_make_column_selector():
X_np = pdDataFrame(
{
"city": ["London", "London", "Paris", "Sallisaw"],
"rating": [5, 3, 4, 5],
"temperature": [21.0, 21.0, 24.0, 28.0],
}
)
X = cudf.from_pandas(X_np)
cu_transformers = [
(
"ohe",
cuOneHotEncoder(),
cu_make_column_selector(dtype_exclude=np.number),
),
(
"scaler",
cuStandardScaler(),
cu_make_column_selector(dtype_include=np.integer),
),
(
"normalizer",
cuNormalizer(),
cu_make_column_selector(pattern="temp"),
),
]
transformer = cuColumnTransformer(cu_transformers, remainder="drop")
t_X = transformer.fit_transform(X)
sk_transformers = [
(
"ohe",
skOneHotEncoder(),
sk_make_column_selector(dtype_exclude=np.number),
),
(
"scaler",
skStandardScaler(),
sk_make_column_selector(dtype_include=np.integer),
),
(
"normalizer",
skNormalizer(),
sk_make_column_selector(pattern="temp"),
),
]
transformer = skColumnTransformer(sk_transformers, remainder="drop")
sk_t_X = transformer.fit_transform(X_np)
assert_allclose(t_X, sk_t_X)
assert type(t_X) == type(X)
def test_column_transformer_index(clf_dataset): # noqa: F811
X_np, X = clf_dataset
if not isinstance(X, (pdDataFrame, cuDataFrame)):
pytest.skip()
cu_transformers = [("scaler", cuStandardScaler(), X.columns)]
transformer = cuColumnTransformer(cu_transformers)
transformer.fit_transform(X)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_random_projection.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common import has_scipy
from sklearn.datasets import make_blobs
from sklearn.random_projection import (
johnson_lindenstrauss_min_dim as sklearn_johnson_lindenstrauss_min_dim,
)
from cuml.random_projection import (
johnson_lindenstrauss_min_dim as cuml_johnson_lindenstrauss_min_dim,
)
from cuml.random_projection import (
GaussianRandomProjection,
SparseRandomProjection,
)
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("method", ["gaussian", "sparse"])
def test_random_projection_fit(datatype, method):
# dataset generation
data, target = make_blobs(n_samples=800, centers=400, n_features=3000)
# conversion to input_type
data = data.astype(datatype)
target = target.astype(datatype)
# creation of model
if method == "gaussian":
model = GaussianRandomProjection(eps=0.2)
else:
model = SparseRandomProjection(eps=0.2)
# fitting the model
model.fit(data)
assert True # Did not crash
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("method", ["gaussian", "sparse"])
def test_random_projection_fit_transform(datatype, method):
if has_scipy():
from scipy.spatial.distance import pdist
else:
pytest.skip(
"Skipping test_random_projection_fit_transform because "
+ "Scipy is missing"
)
eps = 0.2
# dataset generation
data, target = make_blobs(n_samples=800, centers=400, n_features=3000)
# conversion to input_type
data = data.astype(datatype)
target = target.astype(datatype)
# creation of model
if method == "gaussian":
model = GaussianRandomProjection(eps=eps)
else:
model = SparseRandomProjection(eps=eps)
# fitting the model
model.fit(data)
# applying transformation
transformed_data = model.transform(data)
original_pdist = pdist(data)
embedded_pdist = pdist(transformed_data)
# check JL lemma
assert np.all(((1.0 - eps) * original_pdist) <= embedded_pdist) and np.all(
embedded_pdist <= ((1.0 + eps) * original_pdist)
)
def test_johnson_lindenstrauss_min_dim():
n_tests = 10000
n_samples = np.random.randint(low=50, high=1e10, size=n_tests)
eps_values = np.random.rand(n_tests) + 1e-17 # range (0,1)
tests = zip(n_samples, eps_values)
for n_samples, eps in tests:
cuml_value = cuml_johnson_lindenstrauss_min_dim(n_samples, eps=eps)
sklearn_value = sklearn_johnson_lindenstrauss_min_dim(
n_samples, eps=eps
)
assert cuml_value == sklearn_value
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("method", ["sparse"])
def test_random_projection_fit_transform_default(datatype, method):
if has_scipy():
from scipy.spatial.distance import pdist
else:
pytest.skip(
"Skipping test_random_projection_fit_transform_default "
+ "because Scipy is missing"
)
eps = 0.8
# dataset generation
data, target = make_blobs(n_samples=30, centers=4, n_features=5000)
# conversion to input_type
data = data.astype(datatype)
target = target.astype(datatype)
# creation of model
if method == "gaussian":
model = GaussianRandomProjection()
else:
model = SparseRandomProjection()
# fitting the model
model.fit(data)
transformed_data = model.transform(data)
original_pdist = pdist(data)
embedded_pdist = pdist(transformed_data)
# check JL lemma
assert np.all(((1.0 - eps) * original_pdist) <= embedded_pdist) and np.all(
embedded_pdist <= ((1.0 + eps) * original_pdist)
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_class_enumerator.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml
from cuml.testing.utils import ClassEnumerator
def test_class_enumerator():
class SomeModule:
class SomeClass(cuml.Base):
pass
class ExcludedClass(cuml.Base):
pass
class CustomConstructorClass(cuml.Base):
def __init__(self, *, some_parameter):
self.some_parameter = some_parameter
def __eq__(self, other):
return self.some_parameter == other.some_parameter
module = ClassEnumerator(
module=SomeModule,
exclude_classes=[SomeModule.ExcludedClass],
custom_constructors={
"CustomConstructorClass": lambda: SomeModule.CustomConstructorClass(
some_parameter=1
)
},
)
models = module.get_models()
ref = {
"SomeClass": SomeModule.SomeClass,
"CustomConstructorClass": lambda: SomeModule.CustomConstructorClass(
some_parameter=1
),
}
# Here we don't do `assert models == ref` because CustomConstructorClass is
# a lambda.
assert len(models) == len(ref) == 2
assert models["SomeClass"] == ref["SomeClass"]
assert (
models["CustomConstructorClass"]() == ref["CustomConstructorClass"]()
)
def test_class_enumerator_actual_module():
module = ClassEnumerator(
module=cuml.linear_model,
exclude_classes=[
cuml.LinearRegression,
cuml.MBSGDClassifier,
cuml.MBSGDRegressor,
],
custom_constructors={
"LogisticRegression": lambda: cuml.LogisticRegression(handle=1)
},
)
models = module.get_models()
ref = {
"ElasticNet": cuml.ElasticNet,
"Lasso": cuml.Lasso,
"LogisticRegression": lambda: cuml.LogisticRegression(handle=1),
"Ridge": cuml.Ridge,
}
assert (
models["LogisticRegression"]().handle
== ref["LogisticRegression"]().handle
)
models.pop("LogisticRegression")
ref.pop("LogisticRegression")
assert models == ref
def test_class_enumerator_empty_module():
class EmptyModule:
pass
assert {} == ClassEnumerator(EmptyModule).get_models()
def test_class_enumerator_parameters():
class SomeModule:
class SomeClass(cuml.Base):
def __eq__(self, other):
return type(other) == type(self)
models1 = ClassEnumerator(module=SomeModule).get_models()
models2 = ClassEnumerator(
module=SomeModule, exclude_classes=[SomeModule.SomeClass]
).get_models()
models3 = ClassEnumerator(
module=SomeModule,
custom_constructors={"SomeClass": lambda: SomeModule.SomeClass()},
).get_models()
assert models1 == {"SomeClass": SomeModule.SomeClass}
assert models2 == {}
assert (
len(models3) == 1 and models3["SomeClass"]() == SomeModule.SomeClass()
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_holtwinters.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.metrics import r2_score
from statsmodels.tsa.holtwinters import ExponentialSmoothing as sm_ES
from cuml.tsa.holtwinters import ExponentialSmoothing as cuml_ES
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
airpassengers = [
112,
118,
132,
129,
121,
135,
148,
148,
136,
119,
104,
118,
115,
126,
141,
135,
125,
149,
170,
170,
158,
133,
114,
140,
145,
150,
178,
163,
172,
178,
199,
199,
184,
162,
146,
166,
171,
180,
193,
181,
183,
218,
230,
242,
209,
191,
172,
194,
196,
196,
236,
235,
229,
243,
264,
272,
237,
211,
180,
201,
204,
188,
235,
227,
234,
264,
302,
293,
259,
229,
203,
229,
242,
233,
267,
269,
270,
315,
364,
347,
312,
274,
237,
278,
284,
277,
317,
313,
318,
374,
413,
405,
355,
306,
271,
306,
315,
301,
356,
348,
355,
422,
465,
467,
404,
347,
305,
336,
340,
318,
362,
348,
363,
435,
491,
505,
404,
359,
310,
337,
]
co2 = [
315.42,
316.31,
316.50,
317.56,
318.13,
318.00,
316.39,
314.65,
313.68,
313.18,
314.66,
315.43,
316.27,
316.81,
317.42,
318.87,
319.87,
319.43,
318.01,
315.74,
314.00,
313.68,
314.84,
316.03,
316.73,
317.54,
318.38,
319.31,
320.42,
319.61,
318.42,
316.63,
314.83,
315.16,
315.94,
316.85,
317.78,
318.40,
319.53,
320.42,
320.85,
320.45,
319.45,
317.25,
316.11,
315.27,
316.53,
317.53,
318.58,
318.92,
319.70,
321.22,
322.08,
321.31,
319.58,
317.61,
316.05,
315.83,
316.91,
318.20,
319.41,
320.07,
320.74,
321.40,
322.06,
321.73,
320.27,
318.54,
316.54,
316.71,
317.53,
318.55,
319.27,
320.28,
320.73,
321.97,
322.00,
321.71,
321.05,
318.71,
317.66,
317.14,
318.70,
319.25,
320.46,
321.43,
322.23,
323.54,
323.91,
323.59,
322.24,
320.20,
318.48,
317.94,
319.63,
320.87,
322.17,
322.34,
322.88,
324.25,
324.83,
323.93,
322.38,
320.76,
319.10,
319.24,
320.56,
321.80,
322.40,
322.99,
323.73,
324.86,
325.40,
325.20,
323.98,
321.95,
320.18,
320.09,
321.16,
322.74,
]
nybirths = [
26.663,
23.598,
26.931,
24.740,
25.806,
24.364,
24.477,
23.901,
23.175,
23.227,
21.672,
21.870,
21.439,
21.089,
23.709,
21.669,
21.752,
20.761,
23.479,
23.824,
23.105,
23.110,
21.759,
22.073,
21.937,
20.035,
23.590,
21.672,
22.222,
22.123,
23.950,
23.504,
22.238,
23.142,
21.059,
21.573,
21.548,
20.000,
22.424,
20.615,
21.761,
22.874,
24.104,
23.748,
23.262,
22.907,
21.519,
22.025,
22.604,
20.894,
24.677,
23.673,
25.320,
23.583,
24.671,
24.454,
24.122,
24.252,
22.084,
22.991,
23.287,
23.049,
25.076,
24.037,
24.430,
24.667,
26.451,
25.618,
25.014,
25.110,
22.964,
23.981,
23.798,
22.270,
24.775,
22.646,
23.988,
24.737,
26.276,
25.816,
25.210,
25.199,
23.162,
24.707,
24.364,
22.644,
25.565,
24.062,
25.431,
24.635,
27.009,
26.606,
26.268,
26.462,
25.246,
25.180,
24.657,
23.304,
26.982,
26.199,
27.210,
26.122,
26.706,
26.878,
26.152,
26.379,
24.712,
25.688,
24.990,
24.239,
26.721,
23.475,
24.767,
26.219,
28.361,
28.599,
27.914,
27.784,
25.693,
26.881,
]
@pytest.mark.parametrize("seasonal", ["additive", "multiplicative"])
@pytest.mark.parametrize("h", [12, 24])
@pytest.mark.parametrize("datatype", [np.float64])
def test_singlets_holtwinters(seasonal, h, datatype):
global airpassengers
airpassengers = np.asarray(airpassengers, dtype=datatype)
train = airpassengers[:-h]
test = airpassengers[-h:]
if seasonal == "multiplicative":
pytest.xfail("Statsmodels nan errors with gcc 9.3 (Issue #3384)")
sm_hw = sm_ES(
train,
initialization_method="heuristic",
seasonal=seasonal,
seasonal_periods=12,
)
sm_hw = sm_hw.fit()
# train = cudf.Series(train)
cu_hw = cuml_ES(train, seasonal=seasonal, seasonal_periods=12)
cu_hw.fit()
cu_pred = cu_hw.forecast(h)
sm_pred = sm_hw.forecast(h)
cu_r2 = r2_score(cu_pred.to_numpy(), test)
sm_r2 = r2_score(sm_pred, test)
assert (cu_r2 >= sm_r2) or (abs(cu_r2 - sm_r2) < 2e-1)
@pytest.mark.parametrize("seasonal", ["additive", "multiplicative"])
@pytest.mark.parametrize("h", [12, 24])
@pytest.mark.parametrize("datatype", [np.float64])
def test_multits_holtwinters(seasonal, h, datatype):
global airpassengers, co2
airpassengers = np.asarray(airpassengers, dtype=datatype)
co2 = np.asarray(co2, dtype=datatype)
if seasonal == "multiplicative":
pytest.xfail("Statsmodels nan errors with gcc 9.3 (Issue #3384)")
air_train = airpassengers[:-h]
air_test = airpassengers[-h:]
co2_train = co2[:-h]
co2_test = co2[-h:]
data = np.asarray([air_train, co2_train], dtype=datatype)
cu_hw = cuml_ES(data, seasonal=seasonal, seasonal_periods=12, ts_num=2)
sm_air_hw = sm_ES(
air_train,
initialization_method="heuristic",
seasonal=seasonal,
seasonal_periods=12,
)
sm_co2_hw = sm_ES(
co2_train,
initialization_method="heuristic",
seasonal=seasonal,
seasonal_periods=12,
)
cu_hw.fit()
sm_air_hw = sm_air_hw.fit()
sm_co2_hw = sm_co2_hw.fit()
cu_air_pred = cu_hw.forecast(h, 0)
cu_co2_pred = cu_hw.forecast(h, 1)
sm_air_pred = sm_air_hw.forecast(h)
sm_co2_pred = sm_co2_hw.forecast(h)
cu_air_r2 = r2_score(cu_air_pred.to_numpy(), air_test)
cu_co2_r2 = r2_score(cu_co2_pred.to_numpy(), co2_test)
sm_air_r2 = r2_score(sm_air_pred, air_test)
sm_co2_r2 = r2_score(sm_co2_pred, co2_test)
assert (cu_air_r2 >= sm_air_r2) or (abs(cu_air_r2 - sm_air_r2) < 4)
assert (cu_co2_r2 >= sm_co2_r2) or (abs(cu_co2_r2 - sm_co2_r2) < 4)
full_cu_pred = cu_hw.forecast(h)
air_cu_r2 = r2_score(full_cu_pred[0].to_numpy(), air_test)
co2_cu_r2 = r2_score(full_cu_pred[1].to_numpy(), co2_test)
assert (air_cu_r2 >= sm_air_r2) or (abs(air_cu_r2 - sm_air_r2) < 4)
assert (co2_cu_r2 >= sm_co2_r2) or (abs(co2_cu_r2 - sm_co2_r2) < 4)
@pytest.mark.parametrize("seasonal", ["additive", "mul"])
def test_seasonal_holtwinters(seasonal):
global airpassengers, co2, nybirths
data = np.asarray([airpassengers, co2, nybirths], dtype=np.float64)
cu_hw = cuml_ES(data, seasonal=seasonal, ts_num=3)
cu_hw.fit()
cu_hw.forecast(5)
@pytest.mark.parametrize("idx", [0, 1, 2, None])
@pytest.mark.parametrize("h", [12, 24])
def test_series_holtwinters(idx, h):
global airpassengers, co2, nybirths
data = np.asarray([airpassengers, co2, nybirths], dtype=np.float64)
cu_hw = cuml_ES(data, ts_num=3)
cu_hw.fit()
cu_hw.forecast(h, idx)
cu_hw.score(idx)
cu_hw.get_level(idx)
cu_hw.get_trend(idx)
cu_hw.get_season(idx)
@pytest.mark.parametrize("frequency", [7, 12])
@pytest.mark.parametrize("start_periods", [2, 3])
def test_start_freq_holtwinters(frequency, start_periods):
global airpassengers, co2, nybirths
data = np.asarray([airpassengers, co2, nybirths], dtype=np.float64)
cu_hw = cuml_ES(
data, ts_num=3, seasonal_periods=frequency, start_periods=start_periods
)
cu_hw.fit()
cu_hw.forecast(5)
@pytest.mark.parametrize("eps", [1, 2.24e-5, 2.24e-7])
def test_eps_holtwinters(eps):
global airpassengers, co2, nybirths
data = np.asarray([airpassengers, co2, nybirths], dtype=np.float64)
cu_hw = cuml_ES(data, eps=eps, ts_num=3)
cu_hw.fit()
cu_hw.forecast(5)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
def test_inputs_holtwinters(datatype):
global airpassengers, co2, nybirths
data = np.asarray([airpassengers, co2, nybirths], dtype=datatype)
cu_hw = cuml_ES(data, ts_num=3)
cu_hw.fit()
cu_hw.forecast(5)
cu_hw.score()
cu_hw.get_level(0)
cu_hw.get_trend(1)
cu_hw.get_season(2)
@pytest.mark.parametrize("level", [1, 3, 5, 10])
def test_get_level(level):
data = np.array([level] * 100, dtype=np.float64)
cu_hw = cuml_ES(data)
cu_hw.fit()
assert pytest.approx(cu_hw.get_level().to_numpy(), 1e-4) == level
@pytest.mark.parametrize("slope", [1, 2, -2, 0.5])
def test_get_trend(slope):
data = np.arange(0, 100 * slope, slope, dtype=np.float64)
cu_hw = cuml_ES(data)
cu_hw.fit()
assert pytest.approx(cu_hw.get_trend().to_numpy(), 1e-4) == slope
@pytest.mark.parametrize("freq", [1, 0.5, 5])
def test_get_season(freq):
data = np.sin(np.arange(0, 100) * freq)
cu_hw = cuml_ES(data)
cu_hw.fit()
evens = np.arange(0, 98, 2)
odds = evens + 1
seasons = cu_hw.get_season().to_numpy()
base = seasons[0]
assert pytest.approx(seasons[evens], 1e-4) == base
assert pytest.approx(seasons[odds], 1e-4) == -1 * base
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_mbsgd_classifier.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from cuml.datasets import make_classification
from sklearn.linear_model import SGDClassifier
from cuml.testing.utils import unit_param, quality_param, stress_param
from cuml.linear_model import MBSGDClassifier as cumlMBSGClassifier
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.fixture(
scope="module",
params=[
unit_param([500, 20, 10, np.float32]),
unit_param([500, 20, 10, np.float64]),
quality_param([5000, 100, 50, np.float32]),
quality_param([5000, 100, 50, np.float64]),
stress_param([500000, 1000, 500, np.float32]),
stress_param([500000, 1000, 500, np.float64]),
],
ids=[
"500-20-10-f32",
"500-20-10-f64",
"5000-100-50-f32",
"5000-100-50-f64",
"500000-1000-500-f32",
"500000-1000-500-f64",
],
)
def make_dataset(request):
nrows, ncols, n_info, datatype = request.param
X, y = make_classification(
n_samples=nrows,
n_informative=n_info,
n_features=ncols,
random_state=10,
)
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=10
)
y_train = y_train.astype(datatype)
y_test = y_test.astype(datatype)
return nrows, X_train, X_test, y_train, y_test
@pytest.mark.xfail(reason="Related to CuPy 9.0 update (see issue #3813)")
@pytest.mark.parametrize(
# Grouped those tests to reduce the total number of individual tests
# while still keeping good coverage of the different features of MBSGD
("lrate", "penalty", "loss"),
[
("constant", "none", "log"),
("invscaling", "l2", "hinge"),
("adaptive", "l1", "squared_loss"),
("constant", "elasticnet", "hinge"),
],
)
@pytest.mark.filterwarnings("ignore:Maximum::sklearn[.*]")
def test_mbsgd_classifier_vs_skl(lrate, penalty, loss, make_dataset):
nrows, X_train, X_test, y_train, y_test = make_dataset
if nrows < 500000:
cu_mbsgd_classifier = cumlMBSGClassifier(
learning_rate=lrate,
eta0=0.005,
epochs=100,
fit_intercept=True,
batch_size=2,
tol=0.0,
penalty=penalty,
)
cu_mbsgd_classifier.fit(X_train, y_train)
cu_pred = cu_mbsgd_classifier.predict(X_test)
cu_acc = accuracy_score(cp.asnumpy(cu_pred), cp.asnumpy(y_test))
skl_sgd_classifier = SGDClassifier(
learning_rate=lrate,
eta0=0.005,
max_iter=100,
fit_intercept=True,
tol=0.0,
penalty=penalty,
random_state=0,
)
skl_sgd_classifier.fit(cp.asnumpy(X_train), cp.asnumpy(y_train))
skl_pred = skl_sgd_classifier.predict(cp.asnumpy(X_test))
skl_acc = accuracy_score(skl_pred, cp.asnumpy(y_test))
assert cu_acc >= skl_acc - 0.08
@pytest.mark.xfail(reason="Related to CuPy 9.0 update (see issue #3813)")
@pytest.mark.parametrize(
# Grouped those tests to reduce the total number of individual tests
# while still keeping good coverage of the different features of MBSGD
("lrate", "penalty", "loss"),
[
("constant", "none", "log"),
("invscaling", "l2", "hinge"),
("adaptive", "l1", "squared_loss"),
("constant", "elasticnet", "hinge"),
],
)
def test_mbsgd_classifier(lrate, penalty, loss, make_dataset):
nrows, X_train, X_test, y_train, y_test = make_dataset
cu_mbsgd_classifier = cumlMBSGClassifier(
learning_rate=lrate,
eta0=0.005,
epochs=100,
fit_intercept=True,
batch_size=nrows / 100,
tol=0.0,
penalty=penalty,
)
cu_mbsgd_classifier.fit(X_train, y_train)
cu_pred = cu_mbsgd_classifier.predict(X_test)
cu_acc = accuracy_score(cp.asnumpy(cu_pred), cp.asnumpy(y_test))
assert cu_acc > 0.79
@pytest.mark.xfail(reason="Related to CuPy 9.0 update (see issue #3813)")
def test_mbsgd_classifier_default(make_dataset):
nrows, X_train, X_test, y_train, y_test = make_dataset
cu_mbsgd_classifier = cumlMBSGClassifier(batch_size=nrows / 10)
cu_mbsgd_classifier.fit(X_train, y_train)
cu_pred = cu_mbsgd_classifier.predict(X_test)
cu_acc = accuracy_score(cp.asnumpy(cu_pred), cp.asnumpy(y_test))
assert cu_acc >= 0.69
def test_mbsgd_classifier_set_params():
x = np.linspace(0, 1, 50)
y = (x > 0.5).astype(cp.int32)
model = cumlMBSGClassifier()
model.fit(x, y)
coef_before = model.coef_
model = cumlMBSGClassifier(epochs=20, loss="hinge")
model.fit(x, y)
coef_after = model.coef_
model = cumlMBSGClassifier()
model.set_params(**{"epochs": 20, "loss": "hinge"})
model.fit(x, y)
coef_test = model.coef_
assert coef_before != coef_after
assert coef_after == coef_test
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_label_binarizer.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.preprocessing import LabelBinarizer
from cuml.testing.utils import array_equal
from cuml.common import has_scipy
from sklearn.preprocessing import LabelBinarizer as skLB
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.mark.parametrize(
"labels",
[
([1, 4, 5, 2, 0, 1, 6, 2, 3, 4], [4, 2, 6, 3, 2, 0, 1]),
([9, 8, 2, 1, 3, 4], [8, 2, 1, 2, 2]),
],
)
@pytest.mark.parametrize("dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("sparse_output", [True, False])
def test_basic_functions(labels, dtype, sparse_output):
fit_labels, xform_labels = labels
skl_bin = skLB(sparse_output=sparse_output)
skl_bin.fit(fit_labels)
fit_labels = cp.asarray(fit_labels, dtype=dtype)
xform_labels = cp.asarray(xform_labels, dtype=dtype)
binarizer = LabelBinarizer(sparse_output=sparse_output)
binarizer.fit(fit_labels)
assert array_equal(binarizer.classes_.get(), np.unique(fit_labels.get()))
xformed = binarizer.transform(xform_labels)
if sparse_output:
skl_bin_xformed = skl_bin.transform(xform_labels.get())
if has_scipy():
import scipy.sparse
else:
pytest.skip(
"Skipping test_basic_functions(sparse_output=True) "
+ "because Scipy is missing"
)
skl_csr = scipy.sparse.coo_matrix(skl_bin_xformed).tocsr()
cuml_csr = xformed
array_equal(skl_csr.data, cuml_csr.data.get())
# #todo: Support sparse inputs
# xformed = xformed.todense().astype(dtype)
assert xformed.shape[1] == binarizer.classes_.shape[0]
original = binarizer.inverse_transform(xformed)
assert array_equal(original.get(), xform_labels.get())
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_input_utils.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pandas import Series as pdSeries
from cuml.internals.safe_imports import cpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.input_utils import convert_dtype
from cuml.common import has_cupy
from cuml.internals.input_utils import input_to_cupy_array
from cuml.common import input_to_host_array
from cuml.common import input_to_cuml_array, CumlArray
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
nbcuda = gpu_only_import_from("numba", "cuda")
pdDF = cpu_only_import_from("pandas", "DataFrame")
###############################################################################
# Parameters #
###############################################################################
test_dtypes_all = [
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
test_dtypes_acceptable = [np.float32, np.float64]
test_input_types = ["numpy", "numba", "cupy", "cudf", "pandas", "cuml"]
test_num_rows = [1, 100]
test_num_cols = [1, 100]
###############################################################################
# Tests #
###############################################################################
@pytest.mark.parametrize("dtype", test_dtypes_acceptable)
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("num_rows", test_num_rows)
@pytest.mark.parametrize("num_cols", test_num_cols)
@pytest.mark.parametrize("order", ["C", "F", "K"])
def test_input_to_cuml_array(dtype, input_type, num_rows, num_cols, order):
input_data, real_data = get_input(
input_type, num_rows, num_cols, dtype, order=order
)
if input_type == "cupy" and input_data is None:
pytest.skip("cupy not installed")
X, n_rows, n_cols, res_dtype = input_to_cuml_array(input_data, order=order)
np.testing.assert_equal(X.to_output("numpy"), real_data)
assert n_rows == num_rows == X.shape[0] == len(X)
assert n_cols == num_cols == X.shape[1]
assert dtype == res_dtype == X.dtype
del input_data
del real_data
@pytest.mark.parametrize("dtype", test_dtypes_acceptable)
@pytest.mark.parametrize("input_type", ["numba", "cupy"])
@pytest.mark.parametrize("order", ["C", "F"])
@pytest.mark.parametrize("order_check", ["C", "F"])
def test_fail_on_order(dtype, input_type, order, order_check):
# this is tested only for non cudf dataframe or numpy arrays
# those are converted form order by their respective libraries
input_data, real_data = get_input(input_type, 10, 10, dtype, order=order)
if input_type == "cupy" and input_data is None:
pytest.skip("cupy not installed")
if order == order_check:
input_to_cuml_array(input_data, fail_on_order=False, order=order)
else:
with pytest.raises(ValueError):
input_to_cuml_array(
input_data, fail_on_order=True, order=order_check
)
@pytest.mark.parametrize("dtype", test_dtypes_acceptable)
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("from_order", ["C", "F"])
@pytest.mark.parametrize("to_order", ["C", "F", "K"])
def test_convert_matrix_order_cuml_array(
dtype, input_type, from_order, to_order
):
input_data, real_data = get_input(
input_type, 10, 10, dtype, order=from_order
)
# conv_data = np.array(real_data, order=to_order, copy=True)
if from_order == to_order or to_order == "K":
conv_data, *_ = input_to_cuml_array(
input_data, fail_on_order=False, order=to_order
)
else:
# Warning is raised for non cudf dataframe or numpy arrays
# those are converted form order by their respective libraries
if input_type in ["numpy", "cupy", "numba"]:
# with pytest.warns(UserWarning):
# warning disabled due to using cuml logger, need to
# adapt tests for that.
conv_data, *_ = input_to_cuml_array(
input_data, fail_on_order=False, order=to_order
)
else:
conv_data, *_ = input_to_cuml_array(
input_data, fail_on_order=False, order=to_order
)
if to_order == "K":
if input_type in ["cudf"]:
assert conv_data.order == "F"
elif input_type in ["pandas"]:
assert conv_data.order == "C"
else:
assert conv_data.order == from_order
else:
assert conv_data.order == to_order
np.testing.assert_equal(real_data, conv_data.to_output("numpy"))
@pytest.mark.parametrize("dtype", test_dtypes_acceptable)
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("shape", [(1, 10), (10, 1)])
@pytest.mark.parametrize("from_order", ["C", "F"])
@pytest.mark.parametrize("to_order", ["C", "F", "K"])
def test_convert_vector_order_cuml_array(
dtype, input_type, shape, from_order, to_order
):
input_data, real_data = get_input(
input_type, shape[0], shape[1], dtype, order=from_order
)
# conv_data = np.array(real_data, order=to_order, copy=True)
conv_data, *_ = input_to_cuml_array(
input_data, fail_on_order=False, order=to_order
)
np.testing.assert_equal(real_data, conv_data.to_output("numpy"))
@pytest.mark.parametrize("dtype", test_dtypes_acceptable)
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("num_rows", test_num_rows)
@pytest.mark.parametrize("num_cols", test_num_cols)
@pytest.mark.parametrize("order", ["C", "F"])
def test_input_to_host_array(dtype, input_type, num_rows, num_cols, order):
input_data, real_data = get_input(
input_type, num_rows, num_cols, dtype, order=order
)
if input_type == "cupy" and input_data is None:
pytest.skip("cupy not installed")
X, n_rows, n_cols, dtype = input_to_host_array(input_data, order=order)
np.testing.assert_equal(X, real_data)
assert n_rows == num_rows
assert n_cols == num_cols
assert dtype == dtype
del input_data
del real_data
@pytest.mark.parametrize("dtype", test_dtypes_all)
@pytest.mark.parametrize("check_dtype", test_dtypes_all)
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dtype_check(dtype, check_dtype, input_type, order):
if (
dtype == np.float16 or check_dtype == np.float16
) and input_type != "numpy":
pytest.xfail("float16 not yet supported by numba/cuDF")
if dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
if input_type in ["cudf", "pandas"]:
pytest.xfail("unsigned int types not yet supported")
input_data, real_data = get_input(input_type, 10, 10, dtype, order=order)
if input_type == "cupy" and input_data is None:
pytest.skip("cupy not installed")
if dtype == check_dtype:
_, _, _, got_dtype = input_to_cuml_array(
input_data, check_dtype=check_dtype, order=order
)
assert got_dtype == check_dtype
else:
with pytest.raises(TypeError):
_, _, _, got_dtype = input_to_cuml_array(
input_data, check_dtype=check_dtype, order=order
)
@pytest.mark.parametrize("num_rows", test_num_rows)
@pytest.mark.parametrize("num_cols", test_num_cols)
@pytest.mark.parametrize("to_dtype", test_dtypes_acceptable)
@pytest.mark.parametrize("from_dtype", test_dtypes_all)
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("order", ["C", "F"])
def test_convert_input_dtype(
from_dtype, to_dtype, input_type, num_rows, num_cols, order
):
if from_dtype == np.float16 and input_type != "numpy":
pytest.xfail("float16 not yet supported by numba/cuDF")
if from_dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
if input_type == "cudf":
pytest.xfail(
"unsigned int types not yet supported by \
cuDF"
)
elif not has_cupy():
pytest.xfail(
"unsigned int types not yet supported by \
cuDF and cuPy is not installed."
)
input_data, real_data = get_input(
input_type,
num_rows,
num_cols,
from_dtype,
out_dtype=to_dtype,
order=order,
)
if input_type == "cupy" and input_data is None:
pytest.skip("cupy not installed")
converted_data = convert_dtype(input_data, to_dtype=to_dtype)
if input_type == "numpy":
np.testing.assert_equal(converted_data, real_data)
elif input_type == "cudf":
np.testing.assert_equal(converted_data.to_numpy(), real_data)
elif input_type == "pandas":
np.testing.assert_equal(converted_data.to_numpy(), real_data)
else:
np.testing.assert_equal(converted_data.copy_to_host(), real_data)
if from_dtype == to_dtype:
check_ptr(converted_data, input_data, input_type)
@pytest.mark.parametrize("dtype", test_dtypes_acceptable)
@pytest.mark.parametrize("input_type", ["numpy", "cupy"])
@pytest.mark.parametrize("order", ["C", "F"])
@pytest.mark.parametrize("contiguous", [True, False])
@pytest.mark.parametrize("force_contiguous", [True, False])
def test_non_contiguous_to_contiguous_input(
dtype, input_type, order, contiguous, force_contiguous
):
input_data, real_data = get_input(input_type, 10, 8, dtype, order=order)
if not contiguous:
if order == "F":
data_view = input_data[:-3]
real_data = real_data[:-3]
else:
data_view = input_data[:, :-3]
real_data = real_data[:, :-3]
else:
data_view = input_data
cumlary, *_ = input_to_cuml_array(
data_view, force_contiguous=force_contiguous
)
if force_contiguous:
assert cumlary.is_contiguous
np.testing.assert_equal(real_data, cumlary.to_output("numpy"))
@pytest.mark.parametrize("input_type", ["cudf", "pandas"])
@pytest.mark.parametrize("num_rows", test_num_rows)
@pytest.mark.parametrize("num_cols", test_num_cols)
@pytest.mark.parametrize("order", ["C", "F"])
def test_indexed_inputs(input_type, num_rows, num_cols, order):
if num_cols == 1:
input_type += "-series"
index = np.arange(num_rows, 2 * num_rows)
input_data, real_data = get_input(
input_type, num_rows, num_cols, np.float32, index=index
)
X, n_rows, n_cols, res_dtype = input_to_cuml_array(input_data, order=order)
# testing the index in the cuml array
np.testing.assert_equal(X.index.to_numpy(), index)
# testing the index in the converted outputs
cudf_output = X.to_output("cudf")
np.testing.assert_equal(cudf_output.index.to_numpy(), index)
pandas_output = X.to_output("pandas")
np.testing.assert_equal(pandas_output.index.to_numpy(), index)
###############################################################################
# Utility Functions #
###############################################################################
def check_numpy_order(ary, order):
if order == "F":
return ary.flags.f_contiguous
else:
return ary.flags.c_contiguous
def check_ptr(a, b, input_type):
if input_type == "cudf":
for (_, col_a), (_, col_b) in zip(a._data.items(), b._data.items()):
with cudf.core.buffer.acquire_spill_lock():
assert col_a.base_data.get_ptr(
mode="read"
) == col_b.base_data.get_ptr(mode="read")
else:
def get_ptr(x):
try:
return x.__cuda_array_interface__["data"][0]
except AttributeError:
return x.__array_interface__["data"][0]
if input_type == "pandas":
a = a.values
b = b.values
assert get_ptr(a) == get_ptr(b)
def get_input(
type, nrows, ncols, dtype, order="C", out_dtype=False, index=None
):
rand_mat = cp.random.rand(nrows, ncols) * 10
rand_mat = cp.array(rand_mat, dtype=dtype, order=order)
if type == "numpy":
result = np.array(cp.asnumpy(rand_mat), order=order)
if type == "cupy":
result = rand_mat
if type == "numba":
result = nbcuda.as_cuda_array(rand_mat)
if type == "cudf":
result = cudf.DataFrame(rand_mat, index=index)
if type == "cudf-series":
result = cudf.Series(rand_mat.reshape(nrows), index=index)
if type == "pandas":
result = pdDF(cp.asnumpy(rand_mat), index=index)
if type == "pandas-series":
result = pdSeries(
cp.asnumpy(rand_mat).reshape(
nrows,
),
index=index,
)
if type == "cuml":
result = CumlArray(data=rand_mat)
if out_dtype:
return result, np.array(
cp.asnumpy(rand_mat).astype(out_dtype), order=order
)
else:
return result, np.array(cp.asnumpy(rand_mat), order=order)
def test_tocupy_missing_values_handling():
df = cudf.DataFrame(data=[[7, 2, 3], [4, 5, 6], [10, 5, 9]])
array, n_rows, n_cols, dtype = input_to_cupy_array(df, fail_on_null=False)
assert isinstance(array, cp.ndarray)
assert str(array.dtype) == "int64"
df = cudf.DataFrame(data=[[7, 2, 3], [4, None, 6], [10, 5, 9]])
array, n_rows, n_cols, dtype = input_to_cupy_array(df, fail_on_null=False)
assert isinstance(array, cp.ndarray)
assert str(array.dtype) == "float64"
assert cp.isnan(array[1, 1])
df = cudf.Series(data=[7, None, 3])
array, n_rows, n_cols, dtype = input_to_cupy_array(df, fail_on_null=False)
assert str(array.dtype) == "float64"
assert cp.isnan(array[1])
with pytest.raises(ValueError):
df = cudf.Series(data=[7, None, 3])
array, n_rows, n_cols, dtype = input_to_cupy_array(
df, fail_on_null=True
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_internals_api.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from cuml.manifold import UMAP
from cuml.internals import GraphBasedDimRedCallback
from sklearn.datasets import load_digits
digits = load_digits()
data, target = digits.data, digits.target
class CustomCallback(GraphBasedDimRedCallback):
preprocess_event, epoch_event, train_event = False, 0, False
def __init__(self, skip_init=False):
if not skip_init:
super().__init__()
def check(self):
assert self.preprocess_event
assert self.epoch_event > 10
assert self.train_event
def on_preprocess_end(self, embeddings):
self.preprocess_event = True
def on_epoch_end(self, embeddings):
self.epoch_event += 1
def on_train_end(self, embeddings):
self.train_event = True
@pytest.mark.parametrize("n_components", [2, 4, 8])
def test_internals_api(n_components):
callback = CustomCallback()
reducer = UMAP(n_components=n_components, callback=callback)
reducer.fit(data)
callback.check()
# Make sure super().__init__ is called
callback = CustomCallback(skip_init=True)
model = UMAP(n_epochs=10, callback=callback)
with pytest.raises(ValueError):
model.fit_transform(data)
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.