repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_label_binarizer.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.dask.preprocessing import LabelBinarizer
from cuml.testing.utils import array_equal
import dask
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.mark.parametrize(
"labels",
[
([1, 4, 5, 2, 0, 1, 6, 2, 3, 4], [4, 2, 6, 3, 2, 0, 1]),
([9, 8, 2, 1, 3, 4], [8, 2, 1, 2, 2]),
],
)
@pytest.mark.parametrize("multipart", [True, False])
def test_basic_functions(labels, multipart, client):
fit_labels, xform_labels = labels
s = cp.asarray(fit_labels, dtype=np.int32)
df = dask.array.from_array(s)
s2 = cp.asarray(xform_labels, dtype=np.int32)
df2 = dask.array.from_array(s2)
if multipart:
df = df.rechunk((1,))
df2 = df2.rechunk((1,))
binarizer = LabelBinarizer(client=client, sparse_output=False)
binarizer.fit(df)
assert array_equal(
cp.asnumpy(binarizer.classes_), np.unique(cp.asnumpy(s))
)
xformed = binarizer.transform(df2)
xformed = xformed.map_blocks(lambda x: x.get(), dtype=cp.float32)
xformed.compute_chunk_sizes()
assert xformed.compute().shape[1] == binarizer.classes_.shape[0]
original = binarizer.inverse_transform(xformed)
test = original.compute()
assert array_equal(cp.asnumpy(test), xform_labels)
@pytest.mark.parametrize(
"labels",
[
([1, 4, 5, 2, 0, 1, 6, 2, 3, 4], [4, 2, 6, 3, 2, 0, 1]),
([9, 8, 2, 1, 3, 4], [8, 2, 1, 2, 2]),
],
)
@pytest.mark.xfail(
raises=ValueError,
reason="Sparse output disabled until "
"Dask supports sparse CuPy "
"arrays",
)
def test_sparse_output_fails(labels, client):
LabelBinarizer(client=client, sparse_output=True)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_serialization.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distributed.protocol.serialize import serialize
from cuml.naive_bayes.naive_bayes import MultinomialNB
from cuml.internals.array_sparse import SparseCumlArray
from cuml.dask.linear_model import LinearRegression
from cuml.internals.safe_imports import gpu_only_import
from dask import array as da
from sklearn.datasets import make_regression
import numpy as np
import pickle
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
def test_register_naive_bayes_serialization():
"""
Assuming here that the Dask serializers are well-tested.
This test-case is only validating that register_serialization
actually provides the expected serializers on the expected
objects.
"""
mnb = MultinomialNB()
X = cupyx.scipy.sparse.random(1, 5)
y = cp.array([0])
mnb.fit(X, y)
# Unfortunately, Dask has no `unregister` function and Pytest
# shares the same process so cannot test the base-state here.
stype, sbytes = serialize(mnb, serializers=["cuda"])
assert stype["serializer"] == "cuda"
stype, sbytes = serialize(mnb, serializers=["dask"])
assert stype["serializer"] == "dask"
def test_sparse_cumlarray_serialization():
X = cupyx.scipy.sparse.random(10, 5, format="csr", density=0.9)
X_m = SparseCumlArray(X)
stype, sbytes = serialize(X_m, serializers=["cuda"])
assert stype["serializer"] == "cuda"
stype, sbytes = serialize(X_m, serializers=["dask"])
assert stype["serializer"] == "dask"
def test_serialize_mnmg_model(client):
X, y = make_regression(n_samples=1000, n_features=20, random_state=0)
X, y = da.from_array(X), da.from_array(y)
model = LinearRegression(client)
model.fit(X, y)
pickled_model = pickle.dumps(model)
unpickled_model = pickle.loads(pickled_model)
assert np.allclose(unpickled_model.coef_, model.coef_)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_metrics.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask.array as da
from cuml.dask.metrics import confusion_matrix
from cuml.testing.utils import stress_param, generate_random_labels
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
import pytest
from cuml.internals.safe_imports import gpu_only_import
from itertools import chain, permutations
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.mark.mg
@pytest.mark.parametrize("chunks", ["auto", 2, 1])
def test_confusion_matrix(client, chunks):
y_true = da.from_array(cp.array([2, 0, 2, 2, 0, 1]), chunks=chunks)
y_pred = da.from_array(cp.array([0, 0, 2, 2, 0, 2]), chunks=chunks)
cm = confusion_matrix(y_true, y_pred)
ref = cp.array([[2, 0, 0], [0, 0, 1], [1, 0, 2]])
cp.testing.assert_array_equal(cm, ref)
@pytest.mark.mg
@pytest.mark.parametrize("chunks", ["auto", 2, 1])
def test_confusion_matrix_binary(client, chunks):
y_true = da.from_array(cp.array([0, 1, 0, 1]), chunks=chunks)
y_pred = da.from_array(cp.array([1, 1, 1, 0]), chunks=chunks)
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
ref = cp.array([0, 2, 1, 1])
cp.testing.assert_array_equal(ref, cp.array([tn, fp, fn, tp]))
@pytest.mark.mg
@pytest.mark.parametrize("n_samples", [50, 3000, stress_param(500000)])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("problem_type", ["binary", "multiclass"])
def test_confusion_matrix_random(n_samples, dtype, problem_type, client):
upper_range = 2 if problem_type == "binary" else 1000
y_true, y_pred, np_y_true, np_y_pred = generate_random_labels(
lambda rng: rng.randint(0, upper_range, n_samples).astype(dtype),
as_cupy=True,
)
y_true, y_pred = da.from_array(y_true), da.from_array(y_pred)
cm = confusion_matrix(y_true, y_pred)
ref = sk_confusion_matrix(np_y_true, np_y_pred)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
@pytest.mark.mg
@pytest.mark.parametrize(
"normalize, expected_results",
[
("true", 0.333333333),
("pred", 0.333333333),
("all", 0.1111111111),
(None, 2),
],
)
def test_confusion_matrix_normalize(normalize, expected_results, client):
y_test = da.from_array(cp.array([0, 1, 2] * 6))
y_pred = da.from_array(cp.array(list(chain(*permutations([0, 1, 2])))))
cm = confusion_matrix(y_test, y_pred, normalize=normalize)
cp.testing.assert_allclose(cm, cp.array(expected_results))
@pytest.mark.mg
@pytest.mark.parametrize("labels", [(0, 1), (2, 1), (2, 1, 4, 7), (2, 20)])
def test_confusion_matrix_multiclass_subset_labels(labels, client):
y_true, y_pred, np_y_true, np_y_pred = generate_random_labels(
lambda rng: rng.randint(0, 3, 10).astype(np.int32), as_cupy=True
)
y_true, y_pred = da.from_array(y_true), da.from_array(y_pred)
ref = sk_confusion_matrix(np_y_true, np_y_pred, labels=labels)
labels = cp.array(labels, dtype=np.int32)
cm = confusion_matrix(y_true, y_pred, labels=labels)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
@pytest.mark.mg
@pytest.mark.parametrize("n_samples", [50, 3000, stress_param(500000)])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("weights_dtype", ["int", "float"])
def test_confusion_matrix_random_weights(
n_samples, dtype, weights_dtype, client
):
y_true, y_pred, np_y_true, np_y_pred = generate_random_labels(
lambda rng: rng.randint(0, 10, n_samples).astype(dtype), as_cupy=True
)
y_true, y_pred = da.from_array(y_true), da.from_array(y_pred)
if weights_dtype == "int":
sample_weight = np.random.RandomState(0).randint(0, 10, n_samples)
else:
sample_weight = np.random.RandomState(0).rand(n_samples)
ref = sk_confusion_matrix(
np_y_true, np_y_pred, sample_weight=sample_weight
)
sample_weight = cp.array(sample_weight)
sample_weight = da.from_array(sample_weight)
cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_nearest_neighbors.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.utils import array_equal
from sklearn.neighbors import KNeighborsClassifier
from cuml.testing.utils import unit_param, quality_param, stress_param
from cuml.dask.common import utils as dask_utils
from cuml.common import has_scipy
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
dask_cudf = gpu_only_import("dask_cudf")
pd = cpu_only_import("pandas")
np = cpu_only_import("numpy")
def predict(neigh_ind, _y, n_neighbors):
if has_scipy():
import scipy.stats as stats
else:
raise RuntimeError("Scipy is needed to run predict()")
neigh_ind = neigh_ind.astype(np.int64)
ypred, count = stats.mode(_y[neigh_ind], axis=1)
return ypred.ravel(), count.ravel() * 1.0 / n_neighbors
def _prep_training_data(
c, X_train, partitions_per_worker, reverse_order=False
):
workers = c.has_what().keys()
if reverse_order:
workers = list(workers)[::-1]
n_partitions = partitions_per_worker * len(workers)
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X_train))
X_train_df = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
(X_train_df,) = dask_utils.persist_across_workers(
c, [X_train_df], workers=list(workers)
)
return X_train_df
def _scale_rows(client, nrows):
workers = list(client.scheduler_info()["workers"].keys())
n_workers = len(workers)
return n_workers * nrows
@pytest.mark.parametrize(
"nrows", [unit_param(300), quality_param(1e6), stress_param(5e8)]
)
@pytest.mark.parametrize("ncols", [10, 30])
@pytest.mark.parametrize(
"nclusters", [unit_param(5), quality_param(10), stress_param(15)]
)
@pytest.mark.parametrize(
"n_neighbors", [unit_param(10), quality_param(4), stress_param(100)]
)
@pytest.mark.parametrize(
"n_parts",
[unit_param(1), unit_param(5), quality_param(7), stress_param(50)],
)
@pytest.mark.parametrize(
"streams_per_handle,reverse_worker_order", [(5, True), (10, False)]
)
def test_compare_skl(
nrows,
ncols,
nclusters,
n_parts,
n_neighbors,
streams_per_handle,
reverse_worker_order,
client,
):
from cuml.dask.neighbors import NearestNeighbors as daskNN
from sklearn.datasets import make_blobs
nrows = _scale_rows(client, nrows)
X, y = make_blobs(
n_samples=int(nrows),
n_features=ncols,
centers=nclusters,
random_state=0,
)
X = X.astype(np.float32)
X_cudf = _prep_training_data(client, X, n_parts, reverse_worker_order)
from dask.distributed import wait
wait(X_cudf)
dist = np.array([len(v) for v in client.has_what().values()])
assert np.all(dist == dist[0])
cumlModel = daskNN(
n_neighbors=n_neighbors, streams_per_handle=streams_per_handle
)
cumlModel.fit(X_cudf)
out_d, out_i = cumlModel.kneighbors(X_cudf)
local_i = np.array(out_i.compute().to_numpy(), dtype="int64")
sklModel = KNeighborsClassifier(n_neighbors=n_neighbors).fit(X, y)
skl_y_hat = sklModel.predict(X)
y_hat, _ = predict(local_i, y, n_neighbors)
sk_d, sk_i = sklModel.kneighbors(X)
sk_i = sk_i.astype("int64")
assert array_equal(local_i[:, 0], np.arange(nrows))
diff = sk_i - local_i
n_diff = len(diff[diff > 0])
perc_diff = n_diff / (nrows * n_neighbors)
assert perc_diff <= 3e-3
assert array_equal(y_hat, skl_y_hat)
@pytest.mark.parametrize("nrows", [unit_param(1000), stress_param(1e5)])
@pytest.mark.parametrize("ncols", [unit_param(10), stress_param(500)])
@pytest.mark.parametrize("n_parts", [unit_param(10), stress_param(100)])
@pytest.mark.parametrize("batch_size", [unit_param(100), stress_param(1e3)])
def test_batch_size(nrows, ncols, n_parts, batch_size, client):
n_neighbors = 10
n_clusters = 5
from cuml.dask.neighbors import NearestNeighbors as daskNN
from sklearn.datasets import make_blobs
nrows = _scale_rows(client, nrows)
X, y = make_blobs(
n_samples=int(nrows),
n_features=ncols,
centers=n_clusters,
random_state=0,
)
X = X.astype(np.float32)
X_cudf = _prep_training_data(client, X, n_parts)
cumlModel = daskNN(
n_neighbors=n_neighbors, batch_size=batch_size, streams_per_handle=5
)
cumlModel.fit(X_cudf)
out_d, out_i = cumlModel.kneighbors(X_cudf)
local_i = out_i.compute().to_numpy()
y_hat, _ = predict(local_i, y, n_neighbors)
assert array_equal(y_hat, y)
def test_return_distance(client):
n_samples = 50
n_feats = 50
k = 5
from cuml.dask.neighbors import NearestNeighbors as daskNN
from sklearn.datasets import make_blobs
n_samples = _scale_rows(client, n_samples)
X, y = make_blobs(n_samples=n_samples, n_features=n_feats, random_state=0)
X = X.astype(np.float32)
X_cudf = _prep_training_data(client, X, 1)
cumlModel = daskNN(streams_per_handle=5)
cumlModel.fit(X_cudf)
ret = cumlModel.kneighbors(X_cudf, k, return_distance=False)
assert not isinstance(ret, tuple)
ret = ret.compute()
assert ret.shape == (n_samples, k)
ret = cumlModel.kneighbors(X_cudf, k, return_distance=True)
assert isinstance(ret, tuple)
assert len(ret) == 2
def test_default_n_neighbors(client):
n_samples = 50
n_feats = 50
k = 15
from cuml.dask.neighbors import NearestNeighbors as daskNN
from cuml.neighbors.nearest_neighbors_mg import (
NearestNeighborsMG as cumlNN,
)
from sklearn.datasets import make_blobs
n_samples = _scale_rows(client, n_samples)
X, y = make_blobs(n_samples=n_samples, n_features=n_feats, random_state=0)
X = X.astype(np.float32)
X_cudf = _prep_training_data(client, X, 1)
cumlModel = daskNN(streams_per_handle=5)
cumlModel.fit(X_cudf)
ret = cumlModel.kneighbors(X_cudf, return_distance=False)
assert ret.shape[1] == cumlNN().n_neighbors
cumlModel = daskNN(n_neighbors=k)
cumlModel.fit(X_cudf)
ret = cumlModel.kneighbors(X_cudf, k, return_distance=False)
assert ret.shape[1] == k
def test_one_query_partition(client):
from cuml.dask.neighbors import NearestNeighbors as daskNN
from cuml.dask.datasets import make_blobs
X_train, _ = make_blobs(n_samples=4000, n_features=16, n_parts=8)
X_test, _ = make_blobs(n_samples=200, n_features=16, n_parts=1)
cumlModel = daskNN(n_neighbors=4)
cumlModel.fit(X_train)
cumlModel.kneighbors(X_test)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_utils.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from dask.distributed import wait
from cuml.dask.common import raise_exception_from_futures
def _raise_exception():
raise ValueError("intentional exception")
def test_dask_exceptions(client):
fut = client.submit(_raise_exception)
wait(fut)
with pytest.raises(RuntimeError):
raise_exception_from_futures([fut])
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_ridge_regression.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.dask.common import utils as dask_utils
from sklearn.metrics import mean_squared_error
from sklearn.datasets import make_regression
from cuml.internals.safe_imports import cpu_only_import
pd = cpu_only_import("pandas")
np = cpu_only_import("numpy")
dask_cudf = gpu_only_import("dask_cudf")
cudf = gpu_only_import("cudf")
pytestmark = pytest.mark.mg
def _prep_training_data(c, X_train, y_train, partitions_per_worker):
workers = c.has_what().keys()
n_partitions = partitions_per_worker * len(workers)
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X_train))
X_train_df = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
y_cudf = np.array(pd.DataFrame(y_train).values)
y_cudf = y_cudf[:, 0]
y_cudf = cudf.Series(y_cudf)
y_train_df = dask_cudf.from_cudf(y_cudf, npartitions=n_partitions)
X_train_df, y_train_df = dask_utils.persist_across_workers(
c, [X_train_df, y_train_df], workers=workers
)
return X_train_df, y_train_df
def make_regression_dataset(datatype, nrows, ncols, n_info):
X, y = make_regression(
n_samples=nrows, n_features=ncols, n_informative=5, random_state=0
)
X = X.astype(datatype)
y = y.astype(datatype)
return X, y
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [1e4])
@pytest.mark.parametrize("ncols", [10])
@pytest.mark.parametrize("n_parts", [2, 23])
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("normalize", [False])
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("delayed", [True, False])
def test_ridge(
nrows, ncols, n_parts, fit_intercept, normalize, datatype, delayed, client
):
from cuml.dask.linear_model import Ridge as cumlRidge_dask
n_info = 5
nrows = int(nrows)
ncols = int(ncols)
X, y = make_regression_dataset(datatype, nrows, ncols, n_info)
X_df, y_df = _prep_training_data(client, X, y, n_parts)
lr = cumlRidge_dask(
alpha=0.5, fit_intercept=fit_intercept, normalize=normalize
)
lr.fit(X_df, y_df)
ret = lr.predict(X_df, delayed=delayed)
error_cuml = mean_squared_error(y, ret.compute().to_pandas().values)
assert error_cuml < 1e-1
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_kneighbors_classifier.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.testing.utils import unit_param, quality_param, stress_param
from cuml.neighbors import KNeighborsClassifier as lKNNClf
from cuml.dask.neighbors import KNeighborsClassifier as dKNNClf
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import dask.array as da
import dask.dataframe as dd
from cuml.dask.common.dask_arr_utils import to_dask_cudf
from cuml.internals.safe_imports import gpu_only_import_from
DataFrame = gpu_only_import_from("cudf", "DataFrame")
np = cpu_only_import("numpy")
cudf = gpu_only_import("cudf")
def generate_dask_array(np_array, n_parts):
n_samples = np_array.shape[0]
n_samples_per_part = int(n_samples / n_parts)
chunks = [n_samples_per_part] * n_parts
chunks[-1] += n_samples % n_samples_per_part
chunks = tuple(chunks)
return da.from_array(np_array, chunks=(chunks, -1))
@pytest.fixture(
scope="module",
params=[
unit_param(
{
"n_samples": 3000,
"n_features": 30,
"n_classes": 5,
"n_targets": 2,
}
),
quality_param(
{
"n_samples": 8000,
"n_features": 35,
"n_classes": 12,
"n_targets": 3,
}
),
stress_param(
{
"n_samples": 20000,
"n_features": 40,
"n_classes": 12,
"n_targets": 4,
}
),
],
)
def dataset(request):
X, y = make_multilabel_classification(
n_samples=int(request.param["n_samples"] * 1.2),
n_features=request.param["n_features"],
n_classes=request.param["n_classes"],
n_labels=request.param["n_classes"],
length=request.param["n_targets"],
)
new_x = []
new_y = []
for i in range(y.shape[0]):
a = np.argwhere(y[i] == 1)[:, 0]
if len(a) >= request.param["n_targets"]:
new_x.append(i)
np.random.shuffle(a)
a = a[: request.param["n_targets"]]
new_y.append(a)
if len(new_x) >= request.param["n_samples"]:
break
X = X[new_x]
noise = np.random.normal(0, 5.0, X.shape)
X += noise
y = np.array(new_y)
return train_test_split(X, y, test_size=0.3)
def exact_match(l_outputs, d_outputs):
# Check shapes
assert l_outputs.shape == d_outputs.shape
# Predictions should match
correct_queries = (l_outputs == d_outputs).all(axis=1)
assert np.mean(correct_queries) > 0.95
def check_probabilities(l_probas, d_probas):
assert len(l_probas) == len(d_probas)
for i in range(len(l_probas)):
assert l_probas[i].shape == d_probas[i].shape
assert np.mean(l_probas[i] == d_probas[i]) > 0.95
@pytest.mark.parametrize("datatype", ["dask_array", "dask_cudf"])
@pytest.mark.parametrize("parameters", [(1, 3, 256), (8, 8, 256), (9, 3, 128)])
def test_predict_and_score(dataset, datatype, parameters, client):
n_neighbors, n_parts, batch_size = parameters
X_train, X_test, y_train, y_test = dataset
l_model = lKNNClf(n_neighbors=n_neighbors)
l_model.fit(X_train, y_train)
l_outputs = l_model.predict(X_test)
handmade_local_score = np.mean(y_test == l_outputs)
handmade_local_score = round(handmade_local_score, 3)
X_train = generate_dask_array(X_train, n_parts)
X_test = generate_dask_array(X_test, n_parts)
y_train = generate_dask_array(y_train, n_parts)
y_test = generate_dask_array(y_test, n_parts)
if datatype == "dask_cudf":
X_train = to_dask_cudf(X_train, client)
X_test = to_dask_cudf(X_test, client)
y_train = to_dask_cudf(y_train, client)
y_test = to_dask_cudf(y_test, client)
d_model = dKNNClf(
client=client, n_neighbors=n_neighbors, batch_size=batch_size
)
d_model.fit(X_train, y_train)
d_outputs = d_model.predict(X_test, convert_dtype=True)
d_outputs = d_outputs.compute()
d_outputs = (
d_outputs.to_numpy() if isinstance(d_outputs, DataFrame) else d_outputs
)
exact_match(l_outputs, d_outputs)
distributed_score = d_model.score(X_test, y_test)
distributed_score = round(distributed_score, 3)
assert distributed_score == pytest.approx(handmade_local_score, abs=1e-2)
@pytest.mark.parametrize("datatype", ["dask_array", "dask_cudf"])
@pytest.mark.parametrize("parameters", [(1, 3, 256), (8, 8, 256), (9, 3, 128)])
def test_predict_proba(dataset, datatype, parameters, client):
n_neighbors, n_parts, batch_size = parameters
X_train, X_test, y_train, y_test = dataset
l_model = lKNNClf(n_neighbors=n_neighbors)
l_model.fit(X_train, y_train)
l_probas = l_model.predict_proba(X_test)
X_train = generate_dask_array(X_train, n_parts)
X_test = generate_dask_array(X_test, n_parts)
y_train = generate_dask_array(y_train, n_parts)
if datatype == "dask_cudf":
X_train = to_dask_cudf(X_train, client)
X_test = to_dask_cudf(X_test, client)
y_train = to_dask_cudf(y_train, client)
d_model = dKNNClf(client=client, n_neighbors=n_neighbors)
d_model.fit(X_train, y_train)
d_probas = d_model.predict_proba(X_test, convert_dtype=True)
d_probas = da.compute(d_probas)[0]
if datatype == "dask_cudf":
d_probas = list(
map(
lambda o: o.to_numpy()
if isinstance(o, DataFrame)
else o.to_numpy()[..., np.newaxis],
d_probas,
)
)
check_probabilities(l_probas, d_probas)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_predict_1D_labels(input_type, client):
# Testing that nothing crashes with 1D labels
X, y = make_classification(n_samples=10000)
if input_type == "array":
dX = da.from_array(X)
dy = da.from_array(y)
elif input_type == "dataframe":
X = cudf.DataFrame(X)
y = cudf.Series(y)
dX = dd.from_pandas(X, npartitions=1)
dy = dd.from_pandas(y, npartitions=1)
clf = dKNNClf()
clf.fit(dX, dy)
clf.predict(dX)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/experimental/test_filex.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
import os
import pandas as pd
from math import ceil
from cuml.experimental import ForestInference
from cuml.testing.utils import (
array_equal,
unit_param,
quality_param,
stress_param,
)
from cuml.internals.import_utils import has_lightgbm, has_xgboost
from cuml.common.device_selection import using_device_type
from sklearn.datasets import make_classification, make_regression
from sklearn.ensemble import (
GradientBoostingClassifier,
GradientBoostingRegressor,
RandomForestClassifier,
RandomForestRegressor,
ExtraTreesClassifier,
ExtraTreesRegressor,
)
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.model_selection import train_test_split
import treelite
if has_xgboost():
import xgboost as xgb
# pytestmark = pytest.mark.skip
def simulate_data(
m,
n,
k=2,
n_informative="auto",
random_state=None,
classification=True,
bias=0.0,
):
if n_informative == "auto":
n_informative = n // 5
if classification:
features, labels = make_classification(
n_samples=m,
n_features=n,
n_informative=n_informative,
n_redundant=n - n_informative,
n_classes=k,
random_state=random_state,
)
else:
features, labels = make_regression(
n_samples=m,
n_features=n,
n_informative=n_informative,
n_targets=1,
bias=bias,
random_state=random_state,
)
return (
np.c_[features].astype(np.float32),
np.c_[labels].astype(np.float32).flatten(),
)
# absolute tolerance for FIL predict_proba
# False is binary classification, True is multiclass
proba_atol = {False: 3e-7, True: 3e-6}
def _build_and_save_xgboost(
model_path,
X_train,
y_train,
classification=True,
num_rounds=5,
n_classes=2,
xgboost_params={},
):
"""Trains a small xgboost classifier and saves it to model_path"""
dtrain = xgb.DMatrix(X_train, label=y_train)
# instantiate params
params = {"eval_metric": "error", "max_depth": 25}
# learning task params
if classification:
if n_classes == 2:
params["objective"] = "binary:logistic"
else:
params["num_class"] = n_classes
params["objective"] = "multi:softprob"
else:
params["objective"] = "reg:squarederror"
params["base_score"] = 0.0
params.update(xgboost_params)
bst = xgb.train(params, dtrain, num_rounds)
bst.save_model(model_path)
return bst
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.parametrize(
"n_rows", [unit_param(1000), quality_param(10000), stress_param(500000)]
)
@pytest.mark.parametrize(
"n_columns", [unit_param(30), quality_param(100), stress_param(1000)]
)
@pytest.mark.parametrize(
"num_rounds",
[unit_param(1), unit_param(5), quality_param(50), stress_param(90)],
)
@pytest.mark.parametrize("n_classes", [2, 5, 25])
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
def test_fil_classification(
train_device,
infer_device,
n_rows,
n_columns,
num_rounds,
n_classes,
tmp_path,
):
with using_device_type(train_device):
# settings
classification = True # change this to false to use regression
random_state = np.random.RandomState(43210)
X, y = simulate_data(
n_rows,
n_columns,
n_classes,
random_state=random_state,
classification=classification,
)
# identify shape and indices
n_rows, n_columns = X.shape
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=0.8, random_state=0
)
model_path = os.path.join(tmp_path, "xgb_class.model")
bst = _build_and_save_xgboost(
model_path,
X_train,
y_train,
num_rounds=num_rounds,
classification=classification,
n_classes=n_classes,
)
dvalidation = xgb.DMatrix(X_validation, label=y_validation)
if n_classes == 2:
xgb_preds = bst.predict(dvalidation)
xgb_preds_int = np.around(xgb_preds)
else:
xgb_preds = bst.predict(dvalidation)
xgb_preds_int = xgb_preds.argmax(axis=1)
xgb_acc = accuracy_score(y_validation, xgb_preds_int)
fm = ForestInference.load(model_path, output_class=True)
with using_device_type(infer_device):
fil_preds = np.reshape(
np.asarray(fm.predict(X_validation, threshold=0.50)),
xgb_preds_int.shape,
)
fil_proba = np.reshape(
np.asarray(fm.predict_proba(X_validation)), xgb_preds.shape
)
fm.optimize(batch_size=len(X_validation))
fil_preds_opt = np.reshape(
np.asarray(fm.predict(X_validation, threshold=0.50)),
xgb_preds_int.shape,
)
fil_proba_opt = np.reshape(
np.asarray(fm.predict_proba(X_validation)), xgb_preds.shape
)
fil_acc = accuracy_score(y_validation, fil_preds)
assert fil_acc == pytest.approx(xgb_acc, abs=0.01)
assert array_equal(fil_preds, xgb_preds_int)
assert array_equal(fil_preds_opt, fil_preds)
np.testing.assert_allclose(
fil_proba, xgb_preds, atol=proba_atol[n_classes > 2]
)
np.testing.assert_allclose(
fil_proba_opt, fil_proba, atol=proba_atol[n_classes > 2]
)
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.parametrize(
"n_rows", [unit_param(1000), quality_param(10000), stress_param(500000)]
)
@pytest.mark.parametrize(
"n_columns", [unit_param(20), quality_param(100), stress_param(1000)]
)
@pytest.mark.parametrize(
"num_rounds", [unit_param(5), quality_param(10), stress_param(90)]
)
@pytest.mark.parametrize(
"max_depth", [unit_param(3), unit_param(7), stress_param(11)]
)
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
def test_fil_regression(
train_device,
infer_device,
n_rows,
n_columns,
num_rounds,
tmp_path,
max_depth,
):
with using_device_type(train_device):
# settings
classification = False # change this to false to use regression
random_state = np.random.RandomState(43210)
X, y = simulate_data(
n_rows,
n_columns,
random_state=random_state,
classification=classification,
bias=10.0,
)
# identify shape and indices
n_rows, n_columns = X.shape
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0
)
model_path = os.path.join(tmp_path, "xgb_reg.model")
bst = _build_and_save_xgboost(
model_path,
X_train,
y_train,
classification=classification,
num_rounds=num_rounds,
xgboost_params={"max_depth": max_depth},
)
dvalidation = xgb.DMatrix(X_validation, label=y_validation)
xgb_preds = bst.predict(dvalidation)
xgb_mse = mean_squared_error(y_validation, xgb_preds)
fm = ForestInference.load(model_path, output_class=False)
with using_device_type(infer_device):
fil_preds = np.asarray(fm.predict(X_validation))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds))
fil_mse = mean_squared_error(y_validation, fil_preds)
fm.optimize(data=X_validation)
fil_preds_opt = np.asarray(fm.predict(X_validation))
fil_preds_opt = np.reshape(fil_preds_opt, np.shape(xgb_preds))
assert fil_mse == pytest.approx(xgb_mse, abs=0.01)
assert np.allclose(fil_preds, xgb_preds, 1e-3)
assert np.allclose(fil_preds_opt, fil_preds, 1e-3)
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.parametrize("n_rows", [1000])
@pytest.mark.parametrize("n_columns", [30])
# Skip depth 20 for dense tests
@pytest.mark.parametrize(
"max_depth,storage_type",
[(2, False), (2, True), (10, False), (10, True), (20, True)],
)
# When n_classes=25, fit a single estimator only to reduce test time
@pytest.mark.parametrize(
"n_classes,model_class,n_estimators,precision",
[
(2, GradientBoostingClassifier, 1, "native"),
(2, GradientBoostingClassifier, 10, "native"),
(2, RandomForestClassifier, 1, "native"),
(5, RandomForestClassifier, 1, "native"),
(2, RandomForestClassifier, 10, "native"),
(5, RandomForestClassifier, 10, "native"),
(2, ExtraTreesClassifier, 1, "native"),
(2, ExtraTreesClassifier, 10, "native"),
(5, GradientBoostingClassifier, 1, "native"),
(5, GradientBoostingClassifier, 10, "native"),
(25, GradientBoostingClassifier, 1, "native"),
(25, RandomForestClassifier, 1, "native"),
(2, RandomForestClassifier, 10, "float32"),
(2, RandomForestClassifier, 10, "float64"),
(5, RandomForestClassifier, 10, "float32"),
(5, RandomForestClassifier, 10, "float64"),
],
)
def test_fil_skl_classification(
train_device,
infer_device,
n_rows,
n_columns,
n_estimators,
max_depth,
n_classes,
storage_type,
precision,
model_class,
):
with using_device_type(train_device):
# settings
classification = True # change this to false to use regression
random_state = np.random.RandomState(43210)
X, y = simulate_data(
n_rows,
n_columns,
n_classes,
random_state=random_state,
classification=classification,
)
# identify shape and indices
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0
)
init_kwargs = {
"n_estimators": n_estimators,
"max_depth": max_depth,
}
if model_class in [RandomForestClassifier, ExtraTreesClassifier]:
init_kwargs["max_features"] = 0.3
init_kwargs["n_jobs"] = -1
else:
# model_class == GradientBoostingClassifier
init_kwargs["init"] = "zero"
skl_model = model_class(**init_kwargs, random_state=random_state)
skl_model.fit(X_train, y_train)
skl_preds = skl_model.predict(X_validation)
skl_preds_int = np.around(skl_preds)
skl_proba = skl_model.predict_proba(X_validation)
skl_acc = accuracy_score(y_validation, skl_preds_int)
fm = ForestInference.load_from_sklearn(
skl_model, precision=precision, output_class=True
)
with using_device_type(infer_device):
fil_preds = np.asarray(fm.predict(X_validation, threshold=0.50))
fil_preds = np.reshape(fil_preds, np.shape(skl_preds_int))
fil_acc = accuracy_score(y_validation, fil_preds)
# fil_acc is within p99 error bars of skl_acc (diff == 0.017 +- 0.012)
# however, some tests have a delta as big as 0.04.
# sklearn uses float64 thresholds, while FIL uses float32
# TODO(levsnv): once FIL supports float64 accuracy, revisit thresholds
threshold = 1e-5 if n_classes == 2 else 0.1
assert fil_acc == pytest.approx(skl_acc, abs=threshold)
if n_classes == 2:
assert array_equal(fil_preds, skl_preds_int)
fil_proba = np.asarray(fm.predict_proba(X_validation))
fm.optimize(data=np.expand_dims(X_validation, 0))
fil_proba_opt = np.asarray(fm.predict_proba(X_validation))
try:
fil_proba = np.reshape(fil_proba, np.shape(skl_proba))
fil_proba_opt = np.reshape(fil_proba_opt, np.shape(skl_proba))
except ValueError:
skl_proba = skl_proba[:, 1]
fil_proba = np.reshape(fil_proba, np.shape(skl_proba))
fil_proba_opt = np.reshape(fil_proba_opt, np.shape(skl_proba))
np.testing.assert_allclose(
fil_proba, skl_proba, atol=proba_atol[n_classes > 2]
)
np.testing.assert_allclose(
fil_proba_opt, fil_proba, atol=proba_atol[n_classes > 2]
)
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.parametrize("n_rows", [1000])
@pytest.mark.parametrize("n_columns", [20])
@pytest.mark.parametrize(
"n_classes,model_class,n_estimators",
[
(1, GradientBoostingRegressor, 1),
(1, GradientBoostingRegressor, 10),
(1, RandomForestRegressor, 1),
(1, RandomForestRegressor, 10),
(5, RandomForestRegressor, 1),
(5, RandomForestRegressor, 10),
(1, ExtraTreesRegressor, 1),
(1, ExtraTreesRegressor, 10),
(5, GradientBoostingRegressor, 10),
],
)
@pytest.mark.parametrize("max_depth", [2, 10, 20])
@pytest.mark.parametrize("storage_type", [False, True])
@pytest.mark.skip("https://github.com/rapidsai/cuml/issues/5138")
def test_fil_skl_regression(
train_device,
infer_device,
n_rows,
n_columns,
n_classes,
model_class,
n_estimators,
max_depth,
storage_type,
):
with using_device_type(train_device):
# skip depth 20 for dense tests
if max_depth == 20 and not storage_type:
return
# settings
random_state = np.random.RandomState(43210)
X, y = simulate_data(
n_rows,
n_columns,
n_classes,
random_state=random_state,
classification=False,
)
# identify shape and indices
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0
)
init_kwargs = {
"n_estimators": n_estimators,
"max_depth": max_depth,
}
if model_class in [RandomForestRegressor, ExtraTreesRegressor]:
init_kwargs["max_features"] = 0.3
init_kwargs["n_jobs"] = -1
else:
# model_class == GradientBoostingRegressor
init_kwargs["init"] = "zero"
skl_model = model_class(**init_kwargs)
skl_model.fit(X_train, y_train)
skl_preds = skl_model.predict(X_validation)
skl_mse = mean_squared_error(y_validation, skl_preds)
fm = ForestInference.load_from_sklearn(skl_model, output_class=False)
with using_device_type(infer_device):
fil_preds = np.asarray(fm.predict(X_validation))
fil_preds = np.reshape(fil_preds, np.shape(skl_preds))
fm.optimize(batch_size=len(X_validation))
fil_preds_opt = np.asarray(fm.predict(X_validation))
fil_preds_opt = np.reshape(fil_preds_opt, np.shape(skl_preds))
fil_mse = mean_squared_error(y_validation, fil_preds)
assert fil_mse <= skl_mse * (1.0 + 1e-6) + 1e-4
np.testing.assert_allclose(fil_preds, skl_preds, atol=1.2e-3)
np.testing.assert_allclose(fil_preds_opt, fil_preds, atol=1.2e-3)
@pytest.fixture(scope="session", params=["binary", "json"])
def small_classifier_and_preds(tmpdir_factory, request):
X, y = simulate_data(500, 10, random_state=43210, classification=True)
ext = "json" if request.param == "json" else "model"
model_type = "xgboost_json" if request.param == "json" else "xgboost"
model_path = str(
tmpdir_factory.mktemp("models").join(f"small_class.{ext}")
)
bst = _build_and_save_xgboost(model_path, X, y)
# just do within-sample since it's not an accuracy test
dtrain = xgb.DMatrix(X, label=y)
xgb_preds = bst.predict(dtrain)
return (model_path, model_type, X, xgb_preds)
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
@pytest.mark.parametrize("precision", ["native", "float32", "float64"])
def test_precision_xgboost(
train_device, infer_device, precision, small_classifier_and_preds
):
with using_device_type(train_device):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(
model_path,
model_type=model_type,
output_class=True,
precision=precision,
)
with using_device_type(infer_device):
xgb_preds_int = np.around(xgb_preds)
fil_preds = np.asarray(fm.predict(X, threshold=0.50))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds_int))
assert np.allclose(fil_preds, xgb_preds_int, 1e-3)
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
@pytest.mark.parametrize("threads_per_tree", [2, 4, 8, 16, 32])
def test_threads_per_tree(
train_device, infer_device, threads_per_tree, small_classifier_and_preds
):
with using_device_type(train_device):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(
model_path, output_class=True, model_type=model_type
)
with using_device_type(infer_device):
fil_preds = np.asarray(fm.predict(X, chunk_size=threads_per_tree))
fil_proba = np.asarray(
fm.predict_proba(X, chunk_size=threads_per_tree)
)
fil_proba = np.reshape(fil_proba, xgb_preds.shape)
np.testing.assert_allclose(
fil_proba, xgb_preds, atol=proba_atol[False]
)
xgb_preds_int = np.around(xgb_preds)
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds_int))
assert np.allclose(fil_preds, xgb_preds_int, 1e-3)
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
def test_output_args(train_device, infer_device, small_classifier_and_preds):
with using_device_type(train_device):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(
model_path, output_class=False, model_type=model_type
)
with using_device_type(infer_device):
X = np.asarray(X)
fil_preds = fm.predict(X)
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds))
np.testing.assert_allclose(fil_preds, xgb_preds, atol=1e-3)
def to_categorical(features, n_categorical, invalid_frac, random_state):
"""returns data in two formats: pandas (for LightGBM) and numpy (for FIL)
LightGBM needs a DataFrame to recognize and fit on categorical columns.
Second fp32 output is to test invalid categories for prediction only.
"""
features = features.copy() # avoid clobbering source matrix
rng = np.random.default_rng(hash(random_state)) # allow RandomState object
# the main bottleneck (>80%) of to_categorical() is the pandas operations
n_features = features.shape[1]
# all categorical columns
cat_cols = features[:, :n_categorical]
# axis=1 means 0th dimension remains. Row-major FIL means 0th dimension is
# the number of columns. We reduce within columns, across rows.
cat_cols = cat_cols - cat_cols.min(axis=0, keepdims=True) # range [0, ?]
cat_cols /= cat_cols.max(axis=0, keepdims=True) # range [0, 1]
rough_n_categories = 100
# round into rough_n_categories bins
cat_cols = (cat_cols * rough_n_categories).astype(int)
# mix categorical and numerical columns
new_col_idx = rng.choice(
n_features, n_features, replace=False, shuffle=True
)
df_cols = {}
for icol in range(n_categorical):
col = cat_cols[:, icol]
df_cols[new_col_idx[icol]] = pd.Series(
pd.Categorical(col, categories=np.unique(col))
)
# all numerical columns
for icol in range(n_categorical, n_features):
df_cols[new_col_idx[icol]] = pd.Series(features[:, icol])
fit_df = pd.DataFrame(df_cols)
# randomly inject invalid categories only into predict_matrix
invalid_idx = rng.choice(
a=cat_cols.size,
size=ceil(cat_cols.size * invalid_frac),
replace=False,
shuffle=False,
)
cat_cols.flat[invalid_idx] += rough_n_categories
# mix categorical and numerical columns
predict_matrix = np.concatenate(
[cat_cols, features[:, n_categorical:]], axis=1
)
predict_matrix[:, new_col_idx] = predict_matrix
return fit_df, predict_matrix
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.parametrize("num_classes", [2, 5])
@pytest.mark.parametrize("n_categorical", [0, 5])
@pytest.mark.skipif(not has_lightgbm(), reason="need to install lightgbm")
def test_lightgbm(
train_device, infer_device, tmp_path, num_classes, n_categorical
):
import lightgbm as lgb
if n_categorical > 0:
n_features = 10
n_rows = 1000
n_informative = n_features
else:
n_features = 10 if num_classes == 2 else 50
n_rows = 500
n_informative = "auto"
X, y = simulate_data(
n_rows,
n_features,
num_classes,
n_informative=n_informative,
random_state=43210,
classification=True,
)
if n_categorical > 0:
X_fit, X_predict = to_categorical(
X,
n_categorical=n_categorical,
invalid_frac=0.1,
random_state=43210,
)
else:
X_fit, X_predict = X, X
train_data = lgb.Dataset(X_fit, label=y)
num_round = 5
model_path = str(os.path.join(tmp_path, "lgb.model"))
if num_classes == 2:
param = {
"objective": "binary",
"metric": "binary_logloss",
"num_class": 1,
}
bst = lgb.train(param, train_data, num_round)
bst.save_model(model_path)
with using_device_type(train_device):
fm = ForestInference.load(
model_path, output_class=True, model_type="lightgbm"
)
# binary classification
gbm_proba = bst.predict(X_predict)
with using_device_type(infer_device):
fil_proba = fm.predict_proba(X_predict)[:, 0]
gbm_preds = (gbm_proba > 0.5).astype(float)
fil_preds = fm.predict(X_predict)[:, 0]
assert array_equal(gbm_preds, fil_preds)
np.testing.assert_allclose(
gbm_proba, fil_proba, atol=proba_atol[num_classes > 2]
)
else:
# multi-class classification
lgm = lgb.LGBMClassifier(
objective="multiclass",
boosting_type="gbdt",
n_estimators=num_round,
)
lgm.fit(X_fit, y)
lgm.booster_.save_model(model_path)
lgm_preds = lgm.predict(X_predict).astype(int)
with using_device_type(train_device):
fm = ForestInference.load(
model_path, output_class=True, model_type="lightgbm"
)
assert array_equal(
lgm.booster_.predict(X_predict).argmax(axis=1), lgm_preds
)
with using_device_type(infer_device):
assert array_equal(lgm_preds, fm.predict(X_predict))
# lightgbm uses float64 thresholds, while FIL uses float32
np.testing.assert_allclose(
lgm.predict_proba(X_predict),
fm.predict_proba(X_predict),
atol=proba_atol[num_classes > 2],
)
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.parametrize("n_classes", [2, 5, 25])
@pytest.mark.parametrize("num_boost_round", [10, 100])
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
def test_predict_per_tree(
train_device, infer_device, n_classes, num_boost_round, tmp_path
):
n_rows = 1000
n_columns = 30
with using_device_type(train_device):
X, y = simulate_data(
n_rows,
n_columns,
n_classes,
random_state=0,
classification=True,
)
model_path = os.path.join(tmp_path, "xgb_class.model")
xgboost_params = {"base_score": (0.5 if n_classes == 2 else 0.0)}
bst = _build_and_save_xgboost(
model_path,
X,
y,
num_rounds=num_boost_round,
classification=True,
n_classes=n_classes,
xgboost_params=xgboost_params,
)
fm = ForestInference.load(model_path, output_class=True)
tl_model = treelite.Model.from_xgboost(bst)
pred_per_tree_tl = treelite.gtil.predict_per_tree(tl_model, X)
with using_device_type(infer_device):
pred_per_tree = fm.predict_per_tree(X)
margin_pred = bst.predict(xgb.DMatrix(X), output_margin=True)
if n_classes == 2:
expected_shape = (n_rows, num_boost_round)
sum_by_class = np.sum(pred_per_tree, axis=1)
else:
expected_shape = (n_rows, num_boost_round * n_classes)
sum_by_class = np.column_stack(
tuple(
np.sum(pred_per_tree[:, class_id::n_classes], axis=1)
for class_id in range(n_classes)
)
)
fm.optimize(batch_size=len(X), predict_method="predict_per_tree")
pred_per_tree_opt = fm.predict_per_tree(X)
assert pred_per_tree.shape == expected_shape
np.testing.assert_almost_equal(sum_by_class, margin_pred, decimal=3)
np.testing.assert_almost_equal(
pred_per_tree, pred_per_tree_tl, decimal=3
)
np.testing.assert_almost_equal(
pred_per_tree_opt, pred_per_tree, decimal=3
)
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.parametrize("n_classes", [5, 25])
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
def test_predict_per_tree_with_vector_leaf(
train_device, infer_device, n_classes, tmp_path
):
n_rows = 1000
n_columns = 30
n_estimators = 10
with using_device_type(train_device):
X, y = simulate_data(
n_rows,
n_columns,
n_classes,
random_state=0,
classification=True,
)
skl_model = RandomForestClassifier(
max_depth=3, random_state=0, n_estimators=n_estimators
)
skl_model.fit(X, y)
tl_model = treelite.sklearn.import_model(skl_model)
pred_per_tree_tl = treelite.gtil.predict_per_tree(tl_model, X)
fm = ForestInference.load_from_sklearn(
skl_model, precision="native", output_class=True
)
with using_device_type(infer_device):
pred_per_tree = fm.predict_per_tree(X)
fm.optimize(batch_size=len(X), predict_method="predict_per_tree")
pred_per_tree_opt = fm.predict_per_tree(X)
margin_pred = skl_model.predict_proba(X)
assert pred_per_tree.shape == (n_rows, n_estimators, n_classes)
avg_by_class = np.sum(pred_per_tree, axis=1) / n_estimators
np.testing.assert_almost_equal(avg_by_class, margin_pred, decimal=3)
np.testing.assert_almost_equal(
pred_per_tree, pred_per_tree_tl, decimal=3
)
np.testing.assert_almost_equal(
pred_per_tree_opt, pred_per_tree, decimal=3
)
@pytest.mark.parametrize("train_device", ("cpu", "gpu"))
@pytest.mark.parametrize("infer_device", ("cpu", "gpu"))
@pytest.mark.parametrize("n_classes", [2, 5, 25])
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
def test_apply(train_device, infer_device, n_classes, tmp_path):
n_rows = 1000
n_columns = 30
num_boost_round = 10
with using_device_type(train_device):
X, y = simulate_data(
n_rows,
n_columns,
n_classes,
random_state=0,
classification=True,
)
model_path = os.path.join(tmp_path, "xgb_class.model")
xgboost_params = {"base_score": (0.5 if n_classes == 2 else 0.0)}
bst = _build_and_save_xgboost(
model_path,
X,
y,
num_rounds=num_boost_round,
classification=True,
n_classes=n_classes,
xgboost_params=xgboost_params,
)
fm = ForestInference.load(
model_path, output_class=True, model_type="xgboost"
)
with using_device_type(infer_device):
pred_leaf = fm.apply(X).astype(np.int32)
expected_pred_leaf = bst.predict(xgb.DMatrix(X), pred_leaf=True)
if n_classes == 2:
expected_shape = (n_rows, num_boost_round)
else:
expected_shape = (n_rows, num_boost_round * n_classes)
assert pred_leaf.shape == expected_shape
np.testing.assert_equal(pred_leaf, expected_pred_leaf)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/explainer/test_explainer_permutation_shap.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.utils import (
ClassEnumerator,
get_shap_values,
create_synthetic_dataset,
)
from cuml import PermutationExplainer
import sklearn.neighbors
import pytest
from cuml.internals.safe_imports import cpu_only_import
import cuml
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
models_config = ClassEnumerator(module=cuml)
models = models_config.get_models()
###############################################################################
# End to end tests #
###############################################################################
@pytest.mark.parametrize(
"model", [cuml.LinearRegression, cuml.KNeighborsRegressor, cuml.SVR]
)
def test_regression_datasets(exact_shap_regression_dataset, model):
X_train, X_test, y_train, y_test = exact_shap_regression_dataset
models = []
models.append(model().fit(X_train, y_train))
models.append(cuml_skl_class_dict[model]().fit(X_train, y_train))
for mod in models:
explainer, shap_values = get_shap_values(
model=mod.predict,
background_dataset=X_train,
explained_dataset=X_test,
explainer=PermutationExplainer,
)
fx = mod.predict(X_test)
exp_v = explainer.expected_value
for i in range(3):
assert (
np.sum(cp.asnumpy(shap_values[i])) - abs(fx[i] - exp_v)
) <= 1e-5
def test_exact_classification_datasets(exact_shap_classification_dataset):
X_train, X_test, y_train, y_test = exact_shap_classification_dataset
models = []
models.append(cuml.SVC(probability=True).fit(X_train, y_train))
models.append(sklearn.svm.SVC(probability=True).fit(X_train, y_train))
for mod in models:
explainer, shap_values = get_shap_values(
model=mod.predict_proba,
background_dataset=X_train,
explained_dataset=X_test,
explainer=PermutationExplainer,
)
fx = mod.predict_proba(X_test)
exp_v = explainer.expected_value
for i in range(3):
print(i, fx[i][1], shap_values[1][i])
assert (
np.sum(cp.asnumpy(shap_values[0][i]))
- abs(fx[i][0] - exp_v[0])
) <= 1e-5
assert (
np.sum(cp.asnumpy(shap_values[1][i]))
- abs(fx[i][1] - exp_v[1])
) <= 1e-5
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("n_features", [11, 50])
@pytest.mark.parametrize("n_background", [10, 50])
@pytest.mark.parametrize("model", [cuml.LinearRegression, cuml.SVR])
@pytest.mark.parametrize("npermutations", [20])
def test_different_parameters(
dtype, n_features, n_background, model, npermutations
):
cp.random.seed(42)
X_train, X_test, y_train, y_test = create_synthetic_dataset(
n_samples=n_background + 5,
n_features=n_features,
test_size=5,
noise=0.1,
dtype=dtype,
)
mod = model().fit(X_train, y_train)
cu_explainer = PermutationExplainer(
model=mod.predict, data=X_train, is_gpu_model=True
)
cu_shap_values = cu_explainer.shap_values(
X_test, npermutations=npermutations
)
exp_v = float(cu_explainer.expected_value)
fx = mod.predict(X_test)
for i in range(5):
assert (
0.97
<= (
abs(np.sum(cp.asnumpy(cu_shap_values[i]))) / abs(fx[i] - exp_v)
)
<= 1.03
)
###############################################################################
# Functional tests #
###############################################################################
def test_not_shuffled_explanation(exact_shap_regression_dataset):
# in general permutation shap does not behave as predictable as
# kernel shap, even when comparing permutation against kernel SHAP of the
# mainline SHAP package. So these tests assure us that we're doing the
# correct calculations, even if we can't compare directly.
X_train, X_test, y_train, y_test = exact_shap_regression_dataset
mod = cuml.LinearRegression().fit(X_train, y_train)
explainer = PermutationExplainer(model=mod.predict, data=X_train)
shap_values = explainer.shap_values(
X_test[0], npermutations=1, testing=True
)
assert np.allclose(
shap_values, not_shuffled_shap_values, rtol=1e-04, atol=1e-04
)
# Test against exact shap values for linear regression
# 1 permutation should give exact result
def test_permutation(exact_shap_regression_dataset):
X_train, _, y_train, _ = exact_shap_regression_dataset
# Train arbitrary model to get some coefficients
mod = cuml.LinearRegression().fit(X_train, y_train)
# Single background and foreground instance
# Gives zero effect to features when they are 'off'
# and the effect of the regression coefficient when they are 'on'
X_background = np.zeros((1, X_train.shape[1]))
X_foreground = np.ones((1, X_train.shape[1]))
explainer = PermutationExplainer(model=mod.predict, data=X_background)
shap_values = explainer.shap_values(
X_foreground,
npermutations=5,
)
assert np.allclose(mod.coef_, shap_values, rtol=1e-04, atol=1e-04)
###############################################################################
# Precomputed results #
# and testing variables #
###############################################################################
cuml_skl_class_dict = {
cuml.LinearRegression: sklearn.linear_model.LinearRegression,
cuml.KNeighborsRegressor: sklearn.neighbors.KNeighborsRegressor,
cuml.SVR: sklearn.svm.SVR,
}
# values were precomputed with python code and with a modified version
# of SHAP's permutationExplainer that did not shuffle the indexes for the
# permutations, giving us a test of the calculations in our implementation
not_shuffled_shap_values = [
-1.3628101e00,
-1.0234560e02,
1.3428497e-01,
-6.1764000e01,
2.6702881e-04,
-3.4455948e00,
-1.0159061e02,
3.4058895e00,
4.1598404e01,
7.2152489e01,
-2.1964169e00,
]
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/explainer/test_explainer_base.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml import LinearRegression as cuLR
from cuml.explainer.base import SHAPBase
from pylibraft.common.handle import Handle
import pytest
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
@pytest.mark.parametrize("handle", [True, False])
@pytest.mark.parametrize("dtype", [np.float32, np.float64, None])
@pytest.mark.parametrize("order", ["C", None])
def test_init_explainer_base_init_cuml_model(handle, dtype, order):
bg = np.arange(10).reshape(5, 2).astype(np.float32)
y = np.arange(5).astype(np.float32)
bg_df = cudf.DataFrame(bg)
model = cuLR().fit(bg, y)
if handle:
handle = Handle()
else:
handle = None
explainer = SHAPBase(
model=model.predict,
background=bg_df,
order=order,
link="identity",
verbose=2,
random_state=None,
is_gpu_model=None,
handle=handle,
dtype=None,
output_type=None,
)
assert explainer.ncols == 2
assert explainer.nrows == 5
assert np.all(cp.asnumpy(explainer.background) == bg)
assert np.all(explainer.feature_names == bg_df.columns)
assert explainer.is_gpu_model
# check that we infer the order from the model (F for LinearRegression) if
# it is not passed explicitly
if order is None:
assert explainer.order == "F"
else:
assert explainer.order == order
# check that we keep the model's handle if one is not passed explicitly
if handle is not None:
assert explainer.handle == handle
else:
assert explainer.handle == model.handle
@pytest.mark.parametrize("handle", [True, False])
@pytest.mark.parametrize("dtype", [np.float32, np.float64, None])
@pytest.mark.parametrize("order", ["C", None])
@pytest.mark.parametrize("is_gpu_model", [True, False, None])
@pytest.mark.parametrize("output_type", ["cupy", None])
def test_init_explainer_base_init_abritrary_model(
handle, dtype, order, is_gpu_model, output_type
):
bg = np.arange(10).reshape(5, 2).astype(np.float32)
if handle:
handle = Handle()
else:
handle = None
explainer = SHAPBase(
model=dummy_func,
background=bg,
order=order,
order_default="F",
link="identity",
verbose=2,
random_state=None,
is_gpu_model=is_gpu_model,
handle=handle,
dtype=None,
output_type=output_type,
)
assert explainer.ncols == 2
assert explainer.nrows == 5
assert np.all(cp.asnumpy(explainer.background) == bg)
if not is_gpu_model or is_gpu_model is None:
assert not explainer.is_gpu_model
else:
assert explainer.is_gpu_model
if output_type is not None:
assert explainer.output_type == output_type
else:
assert explainer.output_type == "numpy"
# check that explainer defaults to order_default is order is not passed
# explicitly
if order is None:
assert explainer.order == "F"
else:
assert explainer.order == order
# check that we keep the model's handle if one is not passed explicitly
if handle is not None:
assert explainer.handle == handle
else:
isinstance(explainer.handle, Handle)
def test_init_explainer_base_wrong_dtype():
with pytest.raises(ValueError):
explainer = SHAPBase(
model=dummy_func, background=np.ones(10), dtype=np.int32
)
explainer.ncols
def dummy_func(x):
return x
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/explainer/test_explainer_kernel_shap.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.model_selection import train_test_split
from cuml.testing.utils import (
create_synthetic_dataset,
ClassEnumerator,
get_shap_values,
)
from cuml.datasets import make_regression
from cuml.internals.import_utils import has_shap
from cuml.internals.import_utils import has_scipy
from cuml import KernelExplainer
from cuml import Lasso
import sklearn.neighbors
import pytest
import math
from cuml.internals.safe_imports import cpu_only_import
import cuml
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
models_config = ClassEnumerator(module=cuml)
models = models_config.get_models()
def assert_and_log(
cu_shap_values, golden_result_values, fx, expected, tolerance=1e-02
):
close_values = np.allclose(
cu_shap_values, golden_result_values, rtol=tolerance, atol=tolerance
)
expected_sum = np.allclose(
1.00, np.sum(cp.asnumpy(cu_shap_values)) / (fx - expected), rtol=1e-01
)
if not close_values:
print("cu_shap_values: ")
print(cu_shap_values)
print("golden_result_values")
print(golden_result_values)
if not expected_sum:
print(np.sum(cp.asnumpy(cu_shap_values)))
assert expected_sum
assert close_values
###############################################################################
# End to end tests #
###############################################################################
@pytest.mark.parametrize(
"model", [cuml.LinearRegression, cuml.KNeighborsRegressor, cuml.SVR]
)
def test_exact_regression_datasets(exact_shap_regression_dataset, model):
X_train, X_test, y_train, y_test = exact_shap_regression_dataset
models = []
models.append(model().fit(X_train, y_train))
models.append(cuml_skl_class_dict[model]().fit(X_train, y_train))
for mod in models:
explainer, shap_values = get_shap_values(
model=mod.predict,
background_dataset=X_train,
explained_dataset=X_test,
explainer=KernelExplainer,
)
for i in range(3):
print(i)
assert_and_log(
shap_values[i],
golden_regression_results[model][i],
mod.predict(X_test[i].reshape(1, X_test.shape[1])),
explainer.expected_value,
)
def test_exact_classification_datasets(exact_shap_classification_dataset):
X_train, X_test, y_train, y_test = exact_shap_classification_dataset
models = []
models.append(cuml.SVC(probability=True).fit(X_train, y_train))
models.append(sklearn.svm.SVC(probability=True).fit(X_train, y_train))
for mod in models:
explainer, shap_values = get_shap_values(
model=mod.predict_proba,
background_dataset=X_train,
explained_dataset=X_test,
explainer=KernelExplainer,
)
# Some values are very small, which mean our tolerance here needs to be
# a little looser to avoid false positives from comparisons like
# 0.00348627 - 0.00247397. The loose tolerance still tests that the
# distribution of the values matches.
for idx, svs in enumerate(shap_values):
assert_and_log(
svs[0],
golden_classification_result[idx],
float(mod.predict_proba(X_test)[0][idx]),
explainer.expected_value[idx],
tolerance=1e-01,
)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("n_features", [10, 30])
@pytest.mark.parametrize("n_background", [10, 30])
@pytest.mark.parametrize("model", [cuml.TruncatedSVD, cuml.PCA])
def test_kernel_shap_standalone(dtype, n_features, n_background, model):
X_train, X_test, y_train, y_test = create_synthetic_dataset(
n_samples=n_background + 3,
n_features=n_features,
test_size=3,
noise=0.1,
dtype=dtype,
)
mod = model(n_components=3).fit(X_train, y_train)
explainer, shap_values = get_shap_values(
model=mod.transform,
background_dataset=X_train,
explained_dataset=X_test,
explainer=KernelExplainer,
)
exp_v = explainer.expected_value
# we have 3 lists of shap values, each corresponding to a component since
# transform gives back arrays of shape (nrows x ncomponents)
# we test that for each test row, for each component, the
# sum of the shap values is the same as the difference between the
# expected value for that component minus the value of the transform of
# the row.
for sv_idx in range(3):
# pca and tsvd transform give results back nested
fx = mod.transform(X_test[sv_idx].reshape(1, n_features))[0]
for comp_idx in range(3):
assert (
np.sum(shap_values[comp_idx][sv_idx])
- abs(fx[comp_idx] - exp_v[comp_idx])
) <= 1e-5
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("n_features", [11, 15])
@pytest.mark.parametrize("n_background", [30])
@pytest.mark.parametrize("model", [cuml.SVR])
def test_kernel_gpu_cpu_shap(dtype, n_features, n_background, model):
X_train, X_test, y_train, y_test = create_synthetic_dataset(
n_samples=n_background + 3,
n_features=n_features,
test_size=3,
noise=0.1,
dtype=dtype,
)
mod = model().fit(X_train, y_train)
explainer, shap_values = get_shap_values(
model=mod.predict,
background_dataset=X_train,
explained_dataset=X_test,
explainer=KernelExplainer,
)
exp_v = explainer.expected_value
fx = mod.predict(X_test)
for test_idx in range(3):
assert (
np.sum(shap_values[test_idx]) - abs(fx[test_idx] - exp_v)
) <= 1e-5
if has_shap():
import shap
explainer = shap.KernelExplainer(mod.predict, cp.asnumpy(X_train))
cpu_shap_values = explainer.shap_values(cp.asnumpy(X_test))
assert np.allclose(
shap_values, cpu_shap_values, rtol=1e-01, atol=1e-01
)
def test_kernel_housing_dataset(housing_dataset):
X, y, _ = housing_dataset
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42
)
# making all float32 to use gpu predict on random forest
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
y_train = y_train.astype(np.float32)
y_test = y_test.astype(np.float32)
cumodel = cuml.RandomForestRegressor().fit(X_train, y_train)
explainer = KernelExplainer(
model=cumodel.predict, data=X_train[:100], output_type="numpy"
)
cu_shap_values = explainer.shap_values(X_test[:2])
assert np.allclose(
cu_shap_values, housing_regression_result, rtol=1e-01, atol=1e-01
)
###############################################################################
# Single function unit tests #
###############################################################################
def test_binom_coef():
for i in range(1, 101):
val = cuml.explainer.kernel_shap._binomCoef(100, i)
if has_scipy():
from scipy.special import binom
assert math.isclose(val, binom(100, i), rel_tol=1e-15)
def test_shapley_kernel():
for i in range(11):
val = cuml.explainer.kernel_shap._shapley_kernel(10, i)
assert val == shapley_kernel_results[i]
def test_full_powerset():
ps, w = cuml.explainer.kernel_shap._powerset(
5, 2, 2**5 - 2, full_powerset=True
)
for i in range(len(ps)):
assert np.all(ps[i] == full_powerset_result[i])
assert math.isclose(w[i], full_powerset_weight_result[i])
def test_partial_powerset():
ps, w = cuml.explainer.kernel_shap._powerset(6, 3, 42)
for i in range(len(ps)):
assert np.all(ps[i] == partial_powerset_result[i])
assert math.isclose(w[i], partial_powerset_weight_result[i])
@pytest.mark.parametrize("full_powerset", [True, False])
def test_get_number_of_exact_random_samples(full_powerset):
if full_powerset:
(
nsamples_exact,
nsamples_random,
ind,
) = cuml.explainer.kernel_shap._get_number_of_exact_random_samples(
10, 2**10 + 1
)
assert nsamples_exact == 1022
assert nsamples_random == 0
assert ind == 5
else:
(
nsamples_exact,
nsamples_random,
ind,
) = cuml.explainer.kernel_shap._get_number_of_exact_random_samples(
10, 100
)
assert nsamples_exact == 20
assert nsamples_random == 80
assert ind == 2
def test_generate_nsamples_weights():
samples, w = cuml.explainer.kernel_shap._generate_nsamples_weights(
ncols=20,
nsamples=30,
nsamples_exact=10,
nsamples_random=20,
randind=5,
dtype=np.float32,
)
# check that all our samples are between 5 and 6, and the weights in pairs
# are generated correctly
for i, s in enumerate(samples):
assert s in [5, 6]
assert w[i * 2] == cuml.explainer.kernel_shap._shapley_kernel(
20, int(s)
)
assert w[i * 2 + 1] == cuml.explainer.kernel_shap._shapley_kernel(
20, int(s)
)
@pytest.mark.parametrize(
"l1_type", ["auto", "aic", "bic", "num_features(3)", 0.2]
)
def test_l1_regularization(exact_shap_regression_dataset, l1_type):
# currently this is a code test, not mathematical results test.
# Hard to test without falling into testing the underlying algorithms
# which are out of this unit test scope.
X, w = cuml.explainer.kernel_shap._powerset(
5, 2, 2**5 - 2, full_powerset=True
)
y = cp.random.rand(X.shape[0])
nz = cuml.explainer.kernel_shap._l1_regularization(
X=cp.asarray(X).astype(np.float32),
y=cp.asarray(y).astype(np.float32),
weights=cp.asarray(w),
expected_value=0.0,
fx=0.0,
link_fn=cuml.explainer.common.identity,
l1_reg=l1_type,
)
assert isinstance(nz, cp.ndarray)
@pytest.mark.skip(reason="Currently failing for unknown reasons.")
def test_typeerror_input():
X, y = make_regression(n_samples=100, n_features=10, random_state=10)
clf = Lasso()
clf.fit(X, y)
exp = KernelExplainer(model=clf.predict, data=X, nsamples=10)
try:
_ = exp.shap_values(X)
assert True
except TypeError:
assert False
###############################################################################
# Precomputed results #
# and testing variables #
###############################################################################
# "golden" results obtained by running brute force Kernel SHAP notebook from
# https://github.com/slundberg/shap/blob/master/notebooks/kernel_explainer/Simple%20Kernel%20SHAP.ipynb
# and confirmed with SHAP package.
golden_regression_results = {
cuml.LinearRegression: [
[
-1.3628216e00,
-1.0234555e02,
1.3433075e-01,
-6.1763966e01,
2.6035309e-04,
-3.4455872e00,
-1.0159061e02,
3.4058199e00,
4.1598396e01,
7.2152481e01,
-2.1964417e00,
],
[
-8.6558792e01,
8.9456577e00,
-3.6405910e01,
1.0574381e01,
-4.1580200e-04,
-5.8939896e01,
4.8407948e01,
1.4475842e00,
-2.0742226e01,
6.6378265e01,
-3.5134201e01,
],
[
-1.3722158e01,
-2.9430325e01,
-8.0079269e01,
1.2096907e02,
1.0681152e-03,
-5.4266449e01,
-3.1012087e01,
-7.9640961e-01,
7.7072838e01,
1.5370981e01,
-2.4032040e01,
],
],
cuml.KNeighborsRegressor: [
[
4.3210926,
-47.497078,
-4.523407,
-35.49657,
-5.5174675,
-14.158726,
-51.303787,
-2.6457424,
12.230529,
52.345207,
6.3014755,
],
[
-52.036957,
2.4158602,
-20.302296,
15.428952,
5.9823637,
-20.046719,
22.46046,
-4.762917,
-6.20145,
37.457417,
5.3511925,
],
[
-8.803419,
-7.4095736,
-48.113777,
57.21296,
1.0490589,
-37.94751,
-20.748789,
-0.22258139,
28.204493,
4.5492225,
0.5797138,
],
],
cuml.SVR: [
[
3.53810340e-02,
-8.11021507e-01,
3.34369540e-02,
-8.68727207e-01,
1.06804073e-03,
-1.14741415e-01,
-1.35545099e00,
3.87545109e-01,
4.43311602e-01,
1.08623052e00,
2.65314579e-02,
],
[
-1.39247358e00,
5.91157824e-02,
-4.33764964e-01,
1.04503572e-01,
-4.41753864e-03,
-1.09017754e00,
5.90143979e-01,
1.08445108e-01,
-2.26831138e-01,
9.69056726e-01,
-1.18437767e-01,
],
[
-1.28573015e-01,
-2.33658075e-01,
-1.02735841e00,
1.47447693e00,
-1.99043751e-03,
-1.11328888e00,
-4.66209412e-01,
-1.02243885e-01,
8.18460345e-01,
2.20144764e-01,
-9.62769389e-02,
],
],
}
# For testing predict proba, we get one array of shap values per class
golden_classification_result = [
[
0.00152159,
0.00247397,
0.00250474,
0.00155965,
0.0113184,
-0.01153999,
0.19297145,
0.17027254,
0.00850102,
-0.01293354,
-0.00088981,
],
[
-0.00152159,
-0.00247397,
-0.00250474,
-0.00155965,
-0.0113184,
0.01153999,
-0.19297145,
-0.17027254,
-0.00850102,
0.01293354,
0.00088981,
],
]
housing_regression_result = np.array(
[
[
-0.73860609,
0.00557072,
-0.05829297,
-0.01582018,
-0.01010366,
-0.23167623,
-0.470639,
-0.07584473,
],
[
-0.6410764,
0.01369913,
-0.09492759,
0.02654463,
-0.00911134,
-0.05953105,
-0.51266433,
-0.0853608,
],
],
dtype=np.float32,
)
cuml_skl_class_dict = {
cuml.LinearRegression: sklearn.linear_model.LinearRegression,
cuml.KNeighborsRegressor: sklearn.neighbors.KNeighborsRegressor,
cuml.SVR: sklearn.svm.SVR,
}
# results for individual function unit tests
shapley_kernel_results = [
10000,
0.1,
0.0125,
0.0035714285714285713,
0.0017857142857142857,
0.0014285714285714286,
0.0017857142857142857,
0.0035714285714285713,
0.0125,
0.1,
10000,
]
full_powerset_result = [
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0, 1.0],
]
full_powerset_weight_result = np.array(
[
0.2,
0.2,
0.2,
0.2,
0.2,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.06666667,
0.2,
0.2,
0.2,
0.2,
0.2,
],
dtype=np.float32,
)
partial_powerset_result = [
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 1.0],
[1.0, 1.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
]
partial_powerset_weight_result = np.array(
[
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.16666667,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
0.041666668,
],
dtype=np.float32,
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/explainer/test_gpu_treeshap.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.utils import as_type
import cuml
from cuml.ensemble import RandomForestClassifier as curfc
from cuml.ensemble import RandomForestRegressor as curfr
from cuml.common.exceptions import NotFittedError
from cuml.internals.import_utils import has_sklearn
from cuml.internals.import_utils import has_lightgbm, has_shap
from cuml.explainer.tree_shap import TreeExplainer
from hypothesis import given, settings, assume, HealthCheck, strategies as st
from cuml.internals.safe_imports import gpu_only_import
import json
import pytest
import treelite
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
cp = gpu_only_import("cupy")
cudf = gpu_only_import("cudf")
pytestmark = pytest.mark.skip
# See issue #4729
# Xgboost disabled due to CI failures
xgb = None
def has_xgboost():
return False
if has_lightgbm():
import lightgbm as lgb
if has_shap():
import shap
if has_sklearn():
from sklearn.datasets import make_regression, make_classification
from sklearn.ensemble import RandomForestRegressor as sklrfr
from sklearn.ensemble import RandomForestClassifier as sklrfc
def make_classification_with_categorical(
*,
n_samples,
n_features,
n_categorical,
n_informative,
n_redundant,
n_repeated,
n_classes,
random_state,
numeric_dtype=np.float32,
):
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
n_redundant=n_redundant,
n_repeated=n_repeated,
n_classes=n_classes,
random_state=random_state,
n_clusters_per_class=min(2, n_features),
)
X, y = X.astype(numeric_dtype), y.astype(numeric_dtype)
# Turn some columns into categorical, by taking quartiles
n = np.atleast_1d(y).shape[0]
X = pd.DataFrame({f"f{i}": X[:, i] for i in range(n_features)})
for i in range(n_categorical):
column = f"f{i}"
n_bins = min(4, n)
X[column] = pd.qcut(X[column], n_bins, labels=range(n_bins))
# make sure each target exists
y[0:n_classes] = range(n_classes)
assert len(np.unique(y)) == n_classes
return X, y
def make_regression_with_categorical(
*,
n_samples,
n_features,
n_categorical,
n_informative,
random_state,
numeric_dtype=np.float32,
n_targets=1,
):
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
n_targets=n_targets,
random_state=random_state,
)
X, y = X.astype(numeric_dtype), y.astype(numeric_dtype)
# Turn some columns into categorical, by taking quartiles
n = np.atleast_1d(y).shape[0]
X = pd.DataFrame({f"f{i}": X[:, i] for i in range(n_features)})
for i in range(n_categorical):
column = f"f{i}"
n_bins = min(4, n)
X[column] = pd.qcut(X[column], n_bins, labels=range(n_bins))
return X, y
def count_categorical_split(tl_model):
model_dump = json.loads(tl_model.dump_as_json(pretty_print=False))
count = 0
for tree in model_dump["trees"]:
for node in tree["nodes"]:
if "split_type" in node and node["split_type"] == "categorical":
count += 1
return count
@pytest.mark.parametrize(
"objective",
[
"reg:linear",
"reg:squarederror",
"reg:squaredlogerror",
"reg:pseudohubererror",
],
)
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
@pytest.mark.skipif(not has_shap(), reason="need to install shap")
@pytest.mark.skipif(not has_sklearn(), reason="need to install scikit-learn")
def test_xgb_regressor(objective):
n_samples = 100
X, y = make_regression(
n_samples=n_samples,
n_features=8,
n_informative=8,
n_targets=1,
random_state=2021,
)
# Ensure that the label exceeds -1
y += (-0.5) - np.min(y)
assert np.all(y > -1)
X, y = X.astype(np.float32), y.astype(np.float32)
dtrain = xgb.DMatrix(X, label=y)
params = {
"objective": objective,
"base_score": 0.5,
"seed": 0,
"max_depth": 6,
"tree_method": "gpu_hist",
"predictor": "gpu_predictor",
}
num_round = 10
xgb_model = xgb.train(
params, dtrain, num_boost_round=num_round, evals=[(dtrain, "train")]
)
tl_model = treelite.Model.from_xgboost(xgb_model)
# Insert NaN randomly into X
X_test = X.copy()
n_nan = int(np.floor(X.size * 0.1))
rng = np.random.default_rng(seed=0)
index_nan = rng.choice(X.size, size=n_nan, replace=False)
X_test.ravel()[index_nan] = np.nan
explainer = TreeExplainer(model=tl_model)
out = explainer.shap_values(X_test)
ref_explainer = shap.explainers.Tree(model=xgb_model)
correct_out = ref_explainer.shap_values(X_test)
np.testing.assert_almost_equal(out, correct_out, decimal=5)
np.testing.assert_almost_equal(
explainer.expected_value, ref_explainer.expected_value, decimal=5
)
@pytest.mark.parametrize(
"objective,n_classes",
[
("binary:logistic", 2),
("binary:hinge", 2),
("binary:logitraw", 2),
("count:poisson", 4),
("rank:pairwise", 5),
("rank:ndcg", 5),
("rank:map", 5),
("multi:softmax", 5),
("multi:softprob", 5),
],
ids=[
"binary:logistic",
"binary:hinge",
"binary:logitraw",
"count:poisson",
"rank:pairwise",
"rank:ndcg",
"rank:map",
"multi:softmax",
"multi:softprob",
],
)
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
@pytest.mark.skipif(not has_shap(), reason="need to install shap")
@pytest.mark.skipif(not has_sklearn(), reason="need to install scikit-learn")
def test_xgb_classifier(objective, n_classes):
n_samples = 100
X, y = make_classification(
n_samples=n_samples,
n_features=8,
n_informative=8,
n_redundant=0,
n_repeated=0,
n_classes=n_classes,
random_state=2021,
)
X, y = X.astype(np.float32), y.astype(np.float32)
num_round = 10
dtrain = xgb.DMatrix(X, label=y)
params = {
"objective": objective,
"base_score": 0.5,
"seed": 0,
"max_depth": 6,
"tree_method": "gpu_hist",
"predictor": "gpu_predictor",
}
if objective.startswith("rank:"):
dtrain.set_group([10] * 10)
if n_classes > 2 and objective.startswith("multi:"):
params["num_class"] = n_classes
xgb_model = xgb.train(params, dtrain=dtrain, num_boost_round=num_round)
# Insert NaN randomly into X
X_test = X.copy()
n_nan = int(np.floor(X.size * 0.1))
rng = np.random.default_rng(seed=0)
index_nan = rng.choice(X.size, size=n_nan, replace=False)
X_test.ravel()[index_nan] = np.nan
explainer = TreeExplainer(model=xgb_model)
out = explainer.shap_values(X_test)
ref_explainer = shap.explainers.Tree(model=xgb_model)
correct_out = ref_explainer.shap_values(X_test)
np.testing.assert_almost_equal(out, correct_out, decimal=5)
np.testing.assert_almost_equal(
explainer.expected_value, ref_explainer.expected_value, decimal=5
)
def test_degenerate_cases():
n_samples = 100
cuml_model = curfr(
max_features=1.0,
max_samples=0.1,
n_bins=128,
min_samples_leaf=2,
random_state=123,
n_streams=1,
n_estimators=10,
max_leaves=-1,
max_depth=16,
accuracy_metric="mse",
)
# Attempt to import un-fitted model
with pytest.raises(NotFittedError):
TreeExplainer(model=cuml_model)
# Depth 0 trees
rng = np.random.default_rng(seed=0)
X = rng.standard_normal(size=(n_samples, 8), dtype=np.float32)
y = np.ones(shape=(n_samples,), dtype=np.float32)
cuml_model.fit(X, y)
explainer = TreeExplainer(model=cuml_model)
out = explainer.shap_values(X)
# Since the output is always 1.0 no matter the input, SHAP values for all
# features are zero, as feature values don't have any effect on the output.
# The bias (expected_value) is 1.0.
assert np.all(out == 0)
assert explainer.expected_value == 1.0
@pytest.mark.parametrize("input_type", ["numpy", "cupy", "cudf"])
@pytest.mark.skipif(not has_sklearn(), reason="need to install scikit-learn")
def test_cuml_rf_regressor(input_type):
n_samples = 100
X, y = make_regression(
n_samples=n_samples,
n_features=8,
n_informative=8,
n_targets=1,
random_state=2021,
)
X, y = X.astype(np.float32), y.astype(np.float32)
if input_type == "cupy":
X, y = cp.array(X), cp.array(y)
elif input_type == "cudf":
X, y = cudf.DataFrame(X), cudf.Series(y)
cuml_model = curfr(
max_features=1.0,
max_samples=0.1,
n_bins=128,
min_samples_leaf=2,
random_state=123,
n_streams=1,
n_estimators=10,
max_leaves=-1,
max_depth=16,
accuracy_metric="mse",
)
cuml_model.fit(X, y)
pred = cuml_model.predict(X)
explainer = TreeExplainer(model=cuml_model)
out = explainer.shap_values(X)
if input_type == "cupy":
pred = pred.get()
out = out.get()
expected_value = explainer.expected_value.get()
elif input_type == "cudf":
pred = pred.to_numpy()
out = out.get()
expected_value = explainer.expected_value.get()
else:
expected_value = explainer.expected_value
# SHAP values should add up to predicted score
shap_sum = np.sum(out, axis=1) + expected_value
np.testing.assert_almost_equal(shap_sum, pred, decimal=4)
@pytest.mark.parametrize("input_type", ["numpy", "cupy", "cudf"])
@pytest.mark.parametrize("n_classes", [2, 5])
@pytest.mark.skipif(not has_sklearn(), reason="need to install scikit-learn")
def test_cuml_rf_classifier(n_classes, input_type):
n_samples = 100
X, y = make_classification(
n_samples=n_samples,
n_features=8,
n_informative=8,
n_redundant=0,
n_repeated=0,
n_classes=n_classes,
random_state=2021,
)
X, y = X.astype(np.float32), y.astype(np.float32)
if input_type == "cupy":
X, y = cp.array(X), cp.array(y)
elif input_type == "cudf":
X, y = cudf.DataFrame(X), cudf.Series(y)
cuml_model = curfc(
max_features=1.0,
max_samples=0.1,
n_bins=128,
min_samples_leaf=2,
random_state=123,
n_streams=1,
n_estimators=10,
max_leaves=-1,
max_depth=16,
accuracy_metric="mse",
)
cuml_model.fit(X, y)
pred = cuml_model.predict_proba(X)
explainer = TreeExplainer(model=cuml_model)
out = explainer.shap_values(X)
if input_type == "cupy":
pred = pred.get()
out = out.get()
expected_value = explainer.expected_value.get()
elif input_type == "cudf":
pred = pred.to_numpy()
out = out.get()
expected_value = explainer.expected_value.get()
else:
expected_value = explainer.expected_value
# SHAP values should add up to predicted score
expected_value = expected_value.reshape(-1, 1)
shap_sum = np.sum(out, axis=2) + np.tile(expected_value, (1, n_samples))
pred = np.transpose(pred, (1, 0))
np.testing.assert_almost_equal(shap_sum, pred, decimal=4)
@pytest.mark.skipif(not has_shap(), reason="need to install shap")
@pytest.mark.skipif(not has_sklearn(), reason="need to install scikit-learn")
def test_sklearn_rf_regressor():
n_samples = 100
X, y = make_regression(
n_samples=n_samples,
n_features=8,
n_informative=8,
n_targets=1,
random_state=2021,
)
X, y = X.astype(np.float32), y.astype(np.float32)
skl_model = sklrfr(
max_features=1.0,
max_samples=0.1,
min_samples_leaf=2,
random_state=123,
n_estimators=10,
max_depth=16,
)
skl_model.fit(X, y)
explainer = TreeExplainer(model=skl_model)
out = explainer.shap_values(X)
ref_explainer = shap.explainers.Tree(model=skl_model)
correct_out = ref_explainer.shap_values(X)
np.testing.assert_almost_equal(out, correct_out, decimal=5)
np.testing.assert_almost_equal(
explainer.expected_value, ref_explainer.expected_value, decimal=5
)
@pytest.mark.parametrize("n_classes", [2, 3, 5])
@pytest.mark.skipif(not has_shap(), reason="need to install shap")
@pytest.mark.skipif(not has_sklearn(), reason="need to install scikit-learn")
def test_sklearn_rf_classifier(n_classes):
n_samples = 100
X, y = make_classification(
n_samples=n_samples,
n_features=8,
n_informative=8,
n_redundant=0,
n_repeated=0,
n_classes=n_classes,
random_state=2021,
)
X, y = X.astype(np.float32), y.astype(np.float32)
skl_model = sklrfc(
max_features=1.0,
max_samples=0.1,
min_samples_leaf=2,
random_state=123,
n_estimators=10,
max_depth=16,
)
skl_model.fit(X, y)
explainer = TreeExplainer(model=skl_model)
out = explainer.shap_values(X)
ref_explainer = shap.explainers.Tree(model=skl_model)
correct_out = np.array(ref_explainer.shap_values(X))
expected_value = ref_explainer.expected_value
if n_classes == 2:
correct_out = correct_out[1, :, :]
expected_value = expected_value[1:]
np.testing.assert_almost_equal(out, correct_out, decimal=5)
np.testing.assert_almost_equal(
explainer.expected_value, expected_value, decimal=5
)
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
def test_xgb_toy_categorical():
X = pd.DataFrame(
{
"dummy": np.zeros(5, dtype=np.float32),
"x": np.array([0, 1, 2, 3, 4], dtype=np.int32),
}
)
y = np.array([0, 0, 1, 1, 1], dtype=np.float32)
X["x"] = X["x"].astype("category")
dtrain = xgb.DMatrix(X, y, enable_categorical=True)
params = {
"tree_method": "gpu_hist",
"eval_metric": "error",
"objective": "binary:logistic",
"max_depth": 2,
"min_child_weight": 0,
"lambda": 0,
}
xgb_model = xgb.train(
params, dtrain, num_boost_round=1, evals=[(dtrain, "train")]
)
explainer = TreeExplainer(model=xgb_model)
out = explainer.shap_values(X)
ref_out = xgb_model.predict(dtrain, pred_contribs=True)
np.testing.assert_almost_equal(out, ref_out[:, :-1], decimal=5)
np.testing.assert_almost_equal(
explainer.expected_value, ref_out[0, -1], decimal=5
)
@pytest.mark.parametrize("n_classes", [2, 3])
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
@pytest.mark.skipif(not has_sklearn(), reason="need to install scikit-learn")
def test_xgb_classifier_with_categorical(n_classes):
n_samples = 100
n_features = 8
X, y = make_classification_with_categorical(
n_samples=n_samples,
n_features=n_features,
n_categorical=4,
n_informative=n_features,
n_redundant=0,
n_repeated=0,
n_classes=n_classes,
random_state=2022,
)
dtrain = xgb.DMatrix(X, y, enable_categorical=True)
params = {
"tree_method": "gpu_hist",
"max_depth": 6,
"base_score": 0.5,
"seed": 0,
"predictor": "gpu_predictor",
}
if n_classes == 2:
params["objective"] = "binary:logistic"
params["eval_metric"] = "logloss"
else:
params["objective"] = "multi:softprob"
params["eval_metric"] = "mlogloss"
params["num_class"] = n_classes
xgb_model = xgb.train(
params, dtrain, num_boost_round=10, evals=[(dtrain, "train")]
)
assert count_categorical_split(treelite.Model.from_xgboost(xgb_model)) > 0
# Insert NaN randomly into X
X_test = X.values.copy()
n_nan = int(np.floor(X.size * 0.1))
rng = np.random.default_rng(seed=0)
index_nan = rng.choice(X.size, size=n_nan, replace=False)
X_test.ravel()[index_nan] = np.nan
explainer = TreeExplainer(model=xgb_model)
out = explainer.shap_values(X_test)
dtest = xgb.DMatrix(X_test)
ref_out = xgb_model.predict(
dtest, pred_contribs=True, validate_features=False
)
if n_classes == 2:
ref_out, ref_expected_value = ref_out[:, :-1], ref_out[0, -1]
else:
ref_out = ref_out.transpose((1, 0, 2))
ref_out, ref_expected_value = ref_out[:, :, :-1], ref_out[:, 0, -1]
np.testing.assert_almost_equal(out, ref_out, decimal=5)
np.testing.assert_almost_equal(
explainer.expected_value, ref_expected_value, decimal=5
)
@pytest.mark.skipif(not has_xgboost(), reason="need to install xgboost")
@pytest.mark.skipif(not has_sklearn(), reason="need to install scikit-learn")
def test_xgb_regressor_with_categorical():
n_samples = 100
n_features = 8
X, y = make_regression_with_categorical(
n_samples=n_samples,
n_features=n_features,
n_categorical=4,
n_informative=n_features,
random_state=2022,
)
dtrain = xgb.DMatrix(X, y, enable_categorical=True)
params = {
"tree_method": "gpu_hist",
"max_depth": 6,
"base_score": 0.5,
"seed": 0,
"predictor": "gpu_predictor",
"objective": "reg:squarederror",
"eval_metric": "rmse",
}
xgb_model = xgb.train(
params, dtrain, num_boost_round=10, evals=[(dtrain, "train")]
)
assert count_categorical_split(treelite.Model.from_xgboost(xgb_model)) > 0
explainer = TreeExplainer(model=xgb_model)
out = explainer.shap_values(X)
ref_out = xgb_model.predict(dtrain, pred_contribs=True)
ref_out, ref_expected_value = ref_out[:, :-1], ref_out[0, -1]
np.testing.assert_almost_equal(out, ref_out, decimal=5)
np.testing.assert_almost_equal(
explainer.expected_value, ref_expected_value, decimal=5
)
@pytest.mark.skipif(not has_lightgbm(), reason="need to install lightgbm")
@pytest.mark.skipif(not has_sklearn(), reason="need to install scikit-learn")
@pytest.mark.skipif(not has_shap(), reason="need to install shap")
def test_lightgbm_regressor_with_categorical():
n_samples = 100
n_features = 8
n_categorical = 8
X, y = make_regression_with_categorical(
n_samples=n_samples,
n_features=n_features,
n_categorical=n_categorical,
n_informative=n_features,
random_state=2022,
)
dtrain = lgb.Dataset(X, label=y, categorical_feature=range(n_categorical))
params = {
"num_leaves": 64,
"seed": 0,
"objective": "regression",
"metric": "rmse",
"min_data_per_group": 1,
}
lgb_model = lgb.train(
params,
dtrain,
num_boost_round=10,
valid_sets=[dtrain],
valid_names=["train"],
)
assert count_categorical_split(treelite.Model.from_lightgbm(lgb_model)) > 0
explainer = TreeExplainer(model=lgb_model)
out = explainer.shap_values(X)
ref_explainer = shap.explainers.Tree(model=lgb_model)
ref_out = ref_explainer.shap_values(X)
np.testing.assert_almost_equal(out, ref_out, decimal=5)
np.testing.assert_almost_equal(
explainer.expected_value, ref_explainer.expected_value, decimal=5
)
@pytest.mark.parametrize("n_classes", [2, 3])
@pytest.mark.skipif(not has_lightgbm(), reason="need to install lightgbm")
@pytest.mark.skipif(not has_sklearn(), reason="need to install scikit-learn")
@pytest.mark.skipif(not has_shap(), reason="need to install shap")
def test_lightgbm_classifier_with_categorical(n_classes):
n_samples = 100
n_features = 8
n_categorical = 8
X, y = make_classification_with_categorical(
n_samples=n_samples,
n_features=n_features,
n_categorical=n_categorical,
n_informative=n_features,
n_redundant=0,
n_repeated=0,
n_classes=n_classes,
random_state=2022,
)
dtrain = lgb.Dataset(X, label=y, categorical_feature=range(n_categorical))
params = {"num_leaves": 64, "seed": 0, "min_data_per_group": 1}
if n_classes == 2:
params["objective"] = "binary"
params["metric"] = "binary_logloss"
else:
params["objective"] = "multiclass"
params["metric"] = "multi_logloss"
params["num_class"] = n_classes
lgb_model = lgb.train(
params,
dtrain,
num_boost_round=10,
valid_sets=[dtrain],
valid_names=["train"],
)
assert count_categorical_split(treelite.Model.from_lightgbm(lgb_model)) > 0
# Insert NaN randomly into X
X_test = X.values.copy()
n_nan = int(np.floor(X.size * 0.1))
rng = np.random.default_rng(seed=0)
index_nan = rng.choice(X.size, size=n_nan, replace=False)
X_test.ravel()[index_nan] = np.nan
explainer = TreeExplainer(model=lgb_model)
out = explainer.shap_values(X_test)
ref_explainer = shap.explainers.Tree(model=lgb_model)
ref_out = np.array(ref_explainer.shap_values(X_test))
if n_classes == 2:
ref_out = ref_out[1, :, :]
ref_expected_value = ref_explainer.expected_value[1]
else:
ref_expected_value = ref_explainer.expected_value
np.testing.assert_almost_equal(out, ref_out, decimal=5)
np.testing.assert_almost_equal(
explainer.expected_value, ref_expected_value, decimal=5
)
def learn_model(draw, X, y, task, learner, n_estimators, n_targets):
# for lgbm or xgb return the booster or sklearn object?
use_sklearn_estimator = draw(st.booleans())
if learner == "xgb":
assume(has_xgboost())
if task == "regression":
objective = draw(
st.sampled_from(["reg:squarederror", "reg:pseudohubererror"])
)
model = xgb.XGBRegressor(
n_estimators=n_estimators,
tree_method="gpu_hist",
objective=objective,
enable_categorical=True,
verbosity=0,
).fit(X, y)
elif task == "classification":
valid_objectives = [
"binary:logistic",
"binary:hinge",
"binary:logitraw",
"count:poisson",
]
if n_targets > 2:
valid_objectives += [
"rank:pairwise",
"rank:ndcg",
"rank:map",
"multi:softmax",
"multi:softprob",
]
objective = draw(st.sampled_from(valid_objectives))
model = xgb.XGBClassifier(
n_estimators=n_estimators,
tree_method="gpu_hist",
objective=objective,
enable_categorical=True,
verbosity=0,
).fit(X, y)
pred = model.predict(X, output_margin=True)
if not use_sklearn_estimator:
model = model.get_booster()
return model, pred
elif learner == "rf":
predict_model = "GPU " if y.dtype == np.float32 else "CPU"
if task == "regression":
model = cuml.ensemble.RandomForestRegressor(
n_estimators=n_estimators
)
model.fit(X, y)
pred = model.predict(X, predict_model=predict_model)
elif task == "classification":
model = cuml.ensemble.RandomForestClassifier(
n_estimators=n_estimators
)
model.fit(X, y)
pred = model.predict_proba(X)
return model, pred
elif learner == "skl_rf":
assume(has_sklearn())
if task == "regression":
model = sklrfr(n_estimators=n_estimators)
model.fit(X, y)
pred = model.predict(X)
elif task == "classification":
model = sklrfc(n_estimators=n_estimators)
model.fit(X, y)
pred = model.predict_proba(X)
return model, pred
elif learner == "lgbm":
assume(has_lightgbm())
if task == "regression":
model = lgb.LGBMRegressor(n_estimators=n_estimators).fit(X, y)
elif task == "classification":
model = lgb.LGBMClassifier(n_estimators=n_estimators).fit(X, y)
pred = model.predict(X, raw_score=True)
if not use_sklearn_estimator:
model = model.booster_
return model, pred
@st.composite
def shap_strategy(draw):
task = draw(st.sampled_from(["regression", "classification"]))
n_estimators = draw(st.integers(1, 16))
n_samples = draw(st.integers(2, 100))
n_features = draw(st.integers(2, 100))
learner = draw(st.sampled_from(["xgb", "rf", "skl_rf", "lgbm"]))
supports_categorical = learner in ["xgb", "lgbm"]
supports_nan = learner in ["xgb", "lgbm"]
if task == "classification":
n_targets = draw(st.integers(2, 5))
else:
n_targets = 1
n_targets = min(n_targets, n_features)
n_targets = min(n_targets, n_samples)
has_categoricals = draw(st.booleans()) and supports_categorical
dtype = draw(st.sampled_from([np.float32, np.float64]))
if has_categoricals:
n_categorical = draw(st.integers(1, n_features))
else:
n_categorical = 0
has_nan = not has_categoricals and supports_nan
# Filter issues and invalid examples here
if task == "classification" and learner == "rf":
# No way to predict_proba with RandomForestClassifier
# trained on 64-bit data
# https://github.com/rapidsai/cuml/issues/4663
assume(dtype == np.float32)
if task == "regression" and learner == "skl_rf":
# multi-output regression not working
# https://github.com/dmlc/treelite/issues/375
assume(n_targets == 1)
# treelite considers a binary classification model to have
# n_classes=1, which produces an unexpected output shape
# in the shap values
if task == "classification" and learner == "skl_rf":
assume(n_targets > 2)
# ensure we get some variation in test datasets
dataset_seed = draw(st.integers(1, 5))
if task == "classification":
X, y = make_classification_with_categorical(
n_samples=n_samples,
n_features=n_features,
n_categorical=n_categorical,
n_informative=n_features,
n_redundant=0,
n_repeated=0,
random_state=dataset_seed,
n_classes=n_targets,
numeric_dtype=dtype,
)
else:
X, y = make_regression_with_categorical(
n_samples=n_samples,
n_features=n_features,
n_categorical=n_categorical,
n_informative=n_features,
random_state=dataset_seed,
numeric_dtype=dtype,
n_targets=n_targets,
)
if has_nan:
# set about half the first column to nan
X.iloc[np.random.randint(0, n_samples, n_samples // 2), 0] = np.nan
assert len(X.select_dtypes(include="category").columns) == n_categorical
model, preds = learn_model(
draw, X, y, task, learner, n_estimators, n_targets
)
# convert any DataFrame categorical columns to numeric
return X.astype(dtype), y.astype(dtype), model, preds
def check_efficiency(expected_value, pred, shap_values):
# shap values add up to prediction
if len(shap_values.shape) <= 2:
assert np.allclose(
np.sum(shap_values, axis=-1) + expected_value, pred, 1e-3, 1e-3
)
else:
n_targets = shap_values.shape[0]
for i in range(n_targets):
assert np.allclose(
np.sum(shap_values[i], axis=-1) + expected_value[i],
pred[:, i],
1e-3,
1e-3,
)
def check_efficiency_interactions(expected_value, pred, shap_values):
# shap values add up to prediction
if len(shap_values.shape) <= 3:
assert np.allclose(
np.sum(shap_values, axis=(-2, -1)) + expected_value,
pred,
1e-3,
1e-3,
)
else:
n_targets = shap_values.shape[0]
for i in range(n_targets):
assert np.allclose(
np.sum(shap_values[i], axis=(-2, -1)) + expected_value[i],
pred[:, i],
1e-3,
1e-3,
)
# Generating input data/models can be time consuming and triggers
# hypothesis HealthCheck
@settings(
deadline=None,
max_examples=20,
suppress_health_check=[HealthCheck.too_slow, HealthCheck.data_too_large],
)
@given(
shap_strategy(),
st.sampled_from(["shapley-interactions", "shapley-taylor"]),
)
def test_with_hypothesis(params, interactions_method):
X, y, model, preds = params
explainer = TreeExplainer(model=model)
shap_values = explainer.shap_values(X)
shap_interactions = explainer.shap_interaction_values(
X, method=interactions_method
)
check_efficiency(explainer.expected_value, preds, shap_values)
check_efficiency_interactions(
explainer.expected_value, preds, shap_interactions
)
# Interventional
explainer = TreeExplainer(
model=model, data=X.sample(n=15, replace=True, random_state=0)
)
interventional_shap_values = explainer.shap_values(X)
check_efficiency(
explainer.expected_value, preds, interventional_shap_values
)
def test_wrong_inputs():
X = np.array([[0.0, 2.0], [1.0, 0.5]])
y = np.array([0, 1])
model = cuml.ensemble.RandomForestRegressor().fit(X, y)
# background/X different dtype
with pytest.raises(
ValueError, match="Expected background data" " to have the same dtype"
):
explainer = TreeExplainer(model=model, data=X.astype(np.float32))
explainer.shap_values(X)
# background/X different number columns
with pytest.raises(RuntimeError):
explainer = TreeExplainer(model=model, data=X[:, 0:1])
explainer.shap_values(X)
with pytest.raises(
ValueError,
match="Interventional algorithm not"
" supported for interactions. Please"
" specify data as None in constructor.",
):
explainer = TreeExplainer(model=model, data=X.astype(np.float32))
explainer.shap_interaction_values(X)
with pytest.raises(ValueError, match="Unknown interactions method."):
explainer = TreeExplainer(model=model)
explainer.shap_interaction_values(X, method="asdasd")
def test_different_algorithms_different_output():
# ensure different algorithms are actually being called
rng = np.random.RandomState(3)
X = rng.normal(size=(100, 10))
y = rng.normal(size=100)
model = cuml.ensemble.RandomForestRegressor().fit(X, y)
interventional_explainer = TreeExplainer(model=model, data=X)
explainer = TreeExplainer(model=model)
assert not np.all(
explainer.shap_values(X) == interventional_explainer.shap_values(X)
)
assert not np.all(
explainer.shap_interaction_values(X, method="shapley-interactions")
== explainer.shap_interaction_values(X, method="shapley-taylor")
)
@settings(deadline=None)
@given(st.sampled_from(["numpy", "cupy", "cudf", "pandas"]))
def test_input_types(input_type):
# simple test to not crash on different input data-frames
X = np.array([[0.0, 2.0], [1.0, 0.5]])
y = np.array([0, 1])
X, y = as_type(input_type, X, y)
model = cuml.ensemble.RandomForestRegressor().fit(X, y)
explainer = TreeExplainer(model=model)
explainer.shap_values(X)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/explainer/test_explainer_common.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pylibraft.common.handle import Handle
from sklearn.linear_model import LinearRegression as skreg
from cuml.datasets import make_regression
from cuml.testing.utils import ClassEnumerator
from cuml.explainer.common import model_func_call
from cuml.explainer.common import link_dict
from cuml.explainer.common import get_tag_from_model_func
from cuml.explainer.common import get_link_fn_from_str_or_fn
from cuml.explainer.common import get_handle_from_cuml_model_func
from cuml.explainer.common import get_dtype_from_model_func
from cuml.explainer.common import get_cai_ptr
from cuml import PCA
from cuml import LinearRegression as reg
import pytest
from cuml.internals.safe_imports import cpu_only_import
import cuml
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
models_config = ClassEnumerator(module=cuml)
models = models_config.get_models()
_default_tags = [
"preferred_input_order",
"X_types_gpu",
"non_deterministic",
"requires_positive_X",
"requires_positive_y",
"X_types",
"poor_score",
"no_validation",
"multioutput",
"allow_nan",
"stateless",
"multilabel",
"_skip_test",
"_xfail_checks",
"multioutput_only",
"binary_only",
"requires_fit",
"requires_y",
"pairwise",
]
def test_get_dtype_from_model_func():
X, y = make_regression(
n_samples=81,
n_features=10,
noise=0.1,
random_state=42,
dtype=np.float32,
)
# checking model with float32 dtype
model_f32 = reg().fit(X, y)
assert get_dtype_from_model_func(model_f32.predict) == np.float32
# checking model with float64 dtype
X = X.astype(np.float64)
y = y.astype(np.float64)
model_f64 = reg().fit(X, y)
assert get_dtype_from_model_func(model_f64.predict) == np.float64
# checking model that has not been fitted yet
model_not_fit = reg()
assert get_dtype_from_model_func(model_not_fit.predict) is None
# checking arbitrary function
def dummy_func(x):
return x + x
assert get_dtype_from_model_func(dummy_func) is None
def test_get_gpu_tag_from_model_func():
# test getting the gpu tags from the model that we use in explainers
model = reg()
order = get_tag_from_model_func(
func=model.predict, tag="preferred_input_order", default="C"
)
assert order == "F"
out_types = get_tag_from_model_func(
func=model.predict, tag="X_types_gpu", default=False
)
assert isinstance(out_types, list)
assert "2darray" in out_types
# checking arbitrary function
order = get_tag_from_model_func(
func=dummy_func, tag="preferred_input_order", default="C"
)
assert order == "C"
out_types = get_tag_from_model_func(
func=dummy_func, tag="X_types_gpu", default=False
)
assert out_types is False
model2 = skreg()
out_types = get_tag_from_model_func(
func=model2.predict, tag="X_types_gpu", default=False
)
assert out_types is False
@pytest.mark.parametrize("model", list(models.values()))
def test_get_tag_from_model_func(model):
mod = create_dummy_model(model)
for tag in _default_tags:
res = get_tag_from_model_func(
func=mod.get_param_names, tag=tag, default="FFF"
)
if tag != "preferred_input_order":
assert res != "FFF"
@pytest.mark.parametrize("model", list(models.values()))
def test_get_handle_from_cuml_model_func(model):
mod = create_dummy_model(model)
handle = get_handle_from_cuml_model_func(
mod.get_param_names, create_new=True
)
assert isinstance(handle, Handle)
@pytest.mark.parametrize("create_new", [True, False])
def test_get_handle_from_dummy_func(create_new):
handle = get_handle_from_cuml_model_func(dummy_func, create_new=create_new)
res = isinstance(handle, Handle)
assert res == create_new
def test_model_func_call_gpu():
X, y = make_regression(
n_samples=81,
n_features=10,
noise=0.1,
random_state=42,
dtype=np.float32,
)
model = reg().fit(X, y)
z = model_func_call(X=X, model_func=model.predict, gpu_model=True)
assert isinstance(z, cp.ndarray)
z = model_func_call(
X=cp.asnumpy(X), model_func=dummy_func, gpu_model=False
)
assert isinstance(z, cp.ndarray)
with pytest.raises(TypeError):
z = model_func_call(X=X, model_func=dummy_func, gpu_model=True)
model = PCA(n_components=10).fit(X)
z = model_func_call(X=X, model_func=model.transform, gpu_model=True)
assert isinstance(z, cp.ndarray)
def test_get_cai_ptr():
a = cp.ones(10)
ptr = get_cai_ptr(a)
assert ptr == a.__cuda_array_interface__["data"][0]
b = np.ones(10)
with pytest.raises(TypeError):
ptr = get_cai_ptr(b)
@pytest.mark.parametrize("link_function", ["identity", "logit"])
def test_get_link_fn_from_str(link_function):
fn = get_link_fn_from_str_or_fn(link_function)
a = cp.ones(10)
assert cp.all(fn(a) == link_dict[link_function](a))
assert cp.all(fn.inverse(a) == link_dict[link_function].inverse(a))
def test_get_link_fn_from_wrong_str():
with pytest.raises(ValueError):
get_link_fn_from_str_or_fn("this_is_wrong")
def test_get_link_fn_from_fn():
def dummylink(x):
return 2 * x
# check we raise error if link has no inverse
with pytest.raises(TypeError):
get_link_fn_from_str_or_fn(dummylink)
def dummylink_inv(x):
return x / 2
dummylink.inverse = dummylink_inv
fn = get_link_fn_from_str_or_fn(dummylink)
assert fn(2) == 4
assert fn.inverse(2) == 1
def create_dummy_model(model):
try:
mod = model()
except TypeError:
mod = model(np.zeros(10))
return mod
def dummy_func(x):
if not isinstance(x, np.ndarray):
raise TypeError("x must be a NumPy array")
return np.mean(x, axis=1)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/explainer/test_shap_plotting.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml import KernelExplainer as cuKE
from cuml import LinearRegression
# set this variable to True if you want to see the charts
show_plots = False
def get_shap_values(
explainer, dataset, train_or_test="test", api_type="raw_shap_values"
):
X_test, X_train, _, _ = dataset
if train_or_test == "test":
explained_dataset = X_test
elif train_or_test == "train":
explained_dataset = X_train
if api_type == "raw_shap_values":
shap_values = explainer.shap_values(explained_dataset)
elif api_type == "explanation_object":
shap_values = explainer(explained_dataset)
return shap_values, explained_dataset
@pytest.fixture(scope="module")
def explainer(exact_shap_regression_dataset):
X_train, X_test, y_train, y_test = exact_shap_regression_dataset
mod = LinearRegression().fit(X_train, y_train)
explainer = cuKE(model=mod.predict, data=X_train)
return explainer
def test_bar(explainer, exact_shap_regression_dataset):
shap = pytest.importorskip("shap")
shap_values, _ = get_shap_values(
explainer=explainer,
dataset=exact_shap_regression_dataset,
api_type="explanation_object",
)
shap.plots.bar(shap_values, show=show_plots)
def test_decision_plot(explainer, exact_shap_regression_dataset):
shap = pytest.importorskip("shap")
shap_values, _ = get_shap_values(
explainer=explainer,
dataset=exact_shap_regression_dataset,
api_type="raw_shap_values",
)
shap.decision_plot(0, shap_values, show=show_plots)
def test_dependence_plot(explainer, exact_shap_regression_dataset):
shap = pytest.importorskip("shap")
shap_values, data = get_shap_values(
explainer=explainer,
dataset=exact_shap_regression_dataset,
train_or_test="train",
api_type="raw_shap_values",
)
shap.dependence_plot(0, shap_values, data, show=show_plots)
@pytest.mark.skip(
reason="matplotlib has been updated. "
"ref: https://github.com/rapidsai/cuml/issues/4893"
)
def test_heatmap(explainer, exact_shap_regression_dataset):
shap = pytest.importorskip("shap")
shap_values, _ = get_shap_values(
explainer=explainer,
dataset=exact_shap_regression_dataset,
api_type="explanation_object",
)
shap.plots.heatmap(shap_values, show=show_plots)
def test_summary(explainer, exact_shap_regression_dataset):
"""Check that the bar plot is unchanged."""
shap = pytest.importorskip("shap")
shap_values, _ = get_shap_values(
explainer=explainer,
dataset=exact_shap_regression_dataset,
train_or_test="train",
api_type="raw_shap_values",
)
shap.summary_plot(shap_values, show=show_plots)
def test_violin(explainer, exact_shap_regression_dataset):
"""Check that the bar plot is unchanged."""
shap = pytest.importorskip("shap")
shap_values, _ = get_shap_values(
explainer=explainer,
dataset=exact_shap_regression_dataset,
train_or_test="train",
api_type="raw_shap_values",
)
shap.plots.violin(shap_values, show=show_plots)
def test_waterfall(explainer, exact_shap_regression_dataset):
"""Check that the bar plot is unchanged."""
shap = pytest.importorskip("shap")
shap_values, _ = get_shap_values(
explainer=explainer,
dataset=exact_shap_regression_dataset,
api_type="explanation_object",
)
shap.plots.waterfall(shap_values[0], show=show_plots)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/explainer/test_sampling.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.explainer.sampling import kmeans_sampling
from cuml.internals.safe_imports import gpu_only_import_from
import pytest
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
cuda = gpu_only_import_from("numba", "cuda")
@pytest.mark.parametrize(
"input_type",
["cudf-df", "cudf-series", "pandas-df", "pandas-series", "cupy", "numpy"],
)
def test_kmeans_input(input_type):
X = cp.array(
[[0, 10], [1, 24], [0, 52], [0, 48.0], [0.2, 23], [1, 24], [1, 23]]
)
if input_type == "cudf-df":
X = cudf.DataFrame(X)
elif input_type == "cudf-series":
X = cudf.Series(X[:, 1])
elif input_type == "numba":
X = cuda.as_cuda_array(X)
elif input_type == "pandas-df":
X = pd.DataFrame(cp.asnumpy(X))
elif input_type == "pandas-series":
X = pd.Series(cp.asnumpy(X[:, 1]))
elif input_type == "numpy":
X = cp.asnumpy(X)
summary = kmeans_sampling(X, k=2, detailed=True)
if input_type == "cudf-df":
cp.testing.assert_array_equal(
summary[0].values, [[1.0, 23.0], [0.0, 52.0]]
)
assert isinstance(summary[0], cudf.DataFrame)
elif input_type == "pandas-df":
cp.testing.assert_array_equal(
summary[0].values, [[1.0, 23.0], [0.0, 52.0]]
)
assert isinstance(summary[0], pd.DataFrame)
elif input_type == "numpy":
cp.testing.assert_array_equal(summary[0], [[1.0, 23.0], [0.0, 52.0]])
assert isinstance(summary[0], np.ndarray)
elif input_type == "cudf-series":
cp.testing.assert_array_equal(summary[0].values.tolist(), [23.0, 52.0])
assert isinstance(summary[0], cudf.core.series.Series)
elif input_type == "pandas-series":
cp.testing.assert_array_equal(
summary[0].to_numpy().flatten(), [23.0, 52.0]
)
assert isinstance(summary[0], pd.Series)
elif input_type == "numba":
cp.testing.assert_array_equal(
cp.array(summary[0]).tolist(), [[1.0, 23.0], [0.0, 52.0]]
)
assert isinstance(summary[0], cuda.devicearray.DeviceNDArray)
elif input_type == "cupy":
cp.testing.assert_array_equal(
summary[0].tolist(), [[1.0, 23.0], [0.0, 52.0]]
)
assert isinstance(summary[0], cp.ndarray)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/datasets/blobs.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
import cuml.internals
from collections.abc import Iterable
from cuml.internals.safe_imports import gpu_only_import
from cuml.datasets.utils import _create_rs_generator
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import_from,
null_decorator,
)
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
def _get_centers(rs, centers, center_box, n_samples, n_features, dtype):
if isinstance(n_samples, numbers.Integral):
# Set n_centers by looking at centers arg
if centers is None:
centers = 3
if isinstance(centers, numbers.Integral):
n_centers = centers
centers = rs.uniform(
center_box[0],
center_box[1],
size=(n_centers, n_features),
dtype=dtype,
)
else:
if n_features != centers.shape[1]:
raise ValueError(
"Expected `n_features` to be equal to"
" the length of axis 1 of centers array"
)
n_centers = centers.shape[0]
else:
# Set n_centers by looking at [n_samples] arg
n_centers = len(n_samples)
if centers is None:
centers = rs.uniform(
center_box[0],
center_box[1],
size=(n_centers, n_features),
dtype=dtype,
)
try:
assert len(centers) == n_centers
except TypeError:
raise ValueError(
"Parameter `centers` must be array-like. "
"Got {!r} instead".format(centers)
)
except AssertionError:
raise ValueError(
"Length of `n_samples` not consistent"
" with number of centers. Got n_samples = {} "
"and centers = {}".format(n_samples, centers)
)
else:
if n_features != centers.shape[1]:
raise ValueError(
"Expected `n_features` to be equal to"
" the length of axis 1 of centers array"
)
return centers, n_centers
@nvtx_annotate(message="datasets.make_blobs", domain="cuml_python")
@cuml.internals.api_return_generic()
def make_blobs(
n_samples=100,
n_features=2,
centers=None,
cluster_std=1.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=None,
return_centers=False,
order="F",
dtype="float32",
):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int or array-like, optional (default=100)
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [`n_centers`, `n_features`], optional
(default=None)
The number of centers to generate, or the fixed center locations.
If `n_samples` is an int and centers is None, 3 centers are generated.
If `n_samples` is array-like, centers must be
either None or an array of length equal to the length of `n_samples`.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
return_centers : bool, optional (default=False)
If True, then return the centers of each cluster
order: str, optional (default='F')
The order of the generated samples
dtype : str, optional (default='float32')
Dtype of the generated samples
Returns
-------
X : device array of shape [n_samples, n_features]
The generated samples.
y : device array of shape [n_samples]
The integer labels for cluster membership of each sample.
centers : device array, shape [n_centers, n_features]
The centers of each cluster. Only returned if
``return_centers=True``.
Examples
--------
.. code-block:: python
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
# Set the default output type to "cupy". This will be ignored if the user
# has set `cuml.global_settings.output_type`. Only necessary for array
# generation methods that do not take an array as input
cuml.internals.set_api_output_type("cupy")
generator = _create_rs_generator(random_state=random_state)
centers, n_centers = _get_centers(
generator, centers, center_box, n_samples, n_features, dtype
)
# stds: if cluster_std is given as list, it must be consistent
# with the n_centers
if hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers:
raise ValueError(
"Length of `clusters_std` not consistent with "
"number of centers. Got centers = {} "
"and cluster_std = {}".format(centers, cluster_std)
)
if isinstance(cluster_std, numbers.Real):
cluster_std = cp.full(len(centers), cluster_std)
if isinstance(n_samples, Iterable):
n_samples_per_center = n_samples
else:
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
X = cp.zeros(n_samples * n_features, dtype=dtype)
X = X.reshape((n_samples, n_features), order=order)
y = cp.zeros(n_samples, dtype=dtype)
if shuffle:
proba_samples_per_center = np.array(n_samples_per_center) / np.sum(
n_samples_per_center
)
np_seed = int(generator.randint(n_samples, size=1))
np.random.seed(np_seed)
shuffled_sample_indices = cp.array(
np.random.choice(
n_centers, n_samples, replace=True, p=proba_samples_per_center
)
)
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
center_indices = cp.where(shuffled_sample_indices == i)
y[center_indices[0]] = i
X_k = generator.normal(
scale=std,
size=(len(center_indices[0]), n_features),
dtype=dtype,
)
# NOTE: Adding the loc explicitly as cupy has a bug
# when calling generator.normal with an array for loc.
# cupy.random.normal, however, works with the same
# arguments
cp.add(X_k, centers[i], out=X_k)
X[center_indices[0], :] = X_k
else:
stop = 0
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
start, stop = stop, stop + n_samples_per_center[i]
y[start:stop] = i
X_k = generator.normal(
scale=std, size=(n, n_features), dtype=dtype
)
cp.add(X_k, centers[i], out=X_k)
X[start:stop, :] = X_k
if return_centers:
return X, y, centers
else:
return X, y
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/datasets/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("arima.pyx" ${arima_algo} ${datasets_algo})
add_module_gpu_default("regression.pyx" ${regression_algo} ${datasets_algo})
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
MODULE_PREFIX datasets_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/datasets/regression.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import typing
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import_from,
null_decorator
)
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
np = cpu_only_import('numpy')
import cuml.internals
from cuml.internals.array import CumlArray
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
from libcpp cimport bool
from libc.stdint cimport uint64_t, uintptr_t
from random import randint
cdef extern from "cuml/datasets/make_regression.hpp" namespace "ML":
void cpp_make_regression "ML::Datasets::make_regression" (
const handle_t& handle,
float* out,
float* values,
long n_rows,
long n_cols,
long n_informative,
float* coef,
long n_targets,
float bias,
long effective_rank,
float tail_strength,
float noise,
bool shuffle,
uint64_t seed)
void cpp_make_regression "ML::Datasets::make_regression" (
const handle_t& handle,
double* out,
double* values,
long n_rows,
long n_cols,
long n_informative,
double* coef,
long n_targets,
double bias,
long effective_rank,
double tail_strength,
double noise,
bool shuffle,
uint64_t seed)
inp_to_dtype = {
'single': np.float32,
'float': np.float32,
'double': np.float64,
np.float32: np.float32,
np.float64: np.float64
}
@nvtx_annotate(message="datasets.make_regression", domain="cuml_python")
@cuml.internals.api_return_generic()
def make_regression(
n_samples=100,
n_features=2,
n_informative=2,
n_targets=1,
bias=0.0,
effective_rank=None,
tail_strength=0.5,
noise=0.0,
shuffle=True,
coef=False,
random_state=None,
dtype='single',
handle=None
) -> typing.Union[typing.Tuple[CumlArray, CumlArray],
typing.Tuple[CumlArray, CumlArray, CumlArray]]:
"""Generate a random regression problem.
See https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html
Examples
--------
.. code-block:: python
>>> from cuml.datasets.regression import make_regression
>>> from cuml.linear_model import LinearRegression
>>> # Create regression problem
>>> data, values = make_regression(n_samples=200, n_features=12,
... n_informative=7, bias=-4.2,
... noise=0.3, random_state=10)
>>> # Perform a linear regression on this problem
>>> lr = LinearRegression(fit_intercept = True, normalize = False,
... algorithm = "eig")
>>> reg = lr.fit(data, values)
>>> print(reg.coef_) # doctest: +SKIP
[-2.6980877e-02 7.7027252e+01 1.1498465e+01 8.5468025e+00
5.8548538e+01 6.0772545e+01 3.6876743e+01 4.0023815e+01
4.3908358e-03 -2.0275116e-02 3.5066366e-02 -3.4512520e-02]
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=2)
The number of features.
n_informative : int, optional (default=2)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None (default)
Seed for the random number generator for dataset creation.
dtype: string or numpy dtype (default: 'single')
Type of the data. Possible values: float32, float64, 'single', 'float'
or 'double'.
handle: cuml.Handle
If it is None, a new one is created just for this function call
Returns
-------
out : device array of shape [n_samples, n_features]
The input samples.
values : device array of shape [n_samples, n_targets]
The output values.
coef : device array of shape [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
""" # noqa: E501
# Set the default output type to "cupy". This will be ignored if the user
# has set `cuml.global_settings.output_type`. Only necessary for array
# generation methods that do not take an array as input
cuml.internals.set_api_output_type("cupy")
if dtype not in ['single', 'float', 'double', np.float32, np.float64]:
raise TypeError("dtype must be either 'float' or 'double'")
else:
dtype = inp_to_dtype[dtype]
if effective_rank is None:
effective_rank = -1
handle = Handle() if handle is None else handle
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
out = CumlArray.zeros((n_samples, n_features), dtype=dtype, order='C')
cdef uintptr_t out_ptr = out.ptr
values = CumlArray.zeros((n_samples, n_targets), dtype=dtype, order='C')
cdef uintptr_t values_ptr = values.ptr
cdef uintptr_t coef_ptr
coef_ptr = <uintptr_t> NULL
if coef:
coefs = CumlArray.zeros((n_features, n_targets),
dtype=dtype,
order='C')
coef_ptr = coefs.ptr
if random_state is None:
random_state = randint(0, 1e18)
if dtype == np.float32:
cpp_make_regression(handle_[0], <float*> out_ptr,
<float*> values_ptr, <long> n_samples,
<long> n_features, <long> n_informative,
<float*> coef_ptr, <long> n_targets, <float> bias,
<long> effective_rank, <float> tail_strength,
<float> noise, <bool> shuffle,
<uint64_t> random_state)
else:
cpp_make_regression(handle_[0], <double*> out_ptr,
<double*> values_ptr, <long> n_samples,
<long> n_features, <long> n_informative,
<double*> coef_ptr, <long> n_targets,
<double> bias, <long> effective_rank,
<double> tail_strength, <double> noise,
<bool> shuffle, <uint64_t> random_state)
if coef:
return out, values, coefs
else:
return out, values
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/datasets/arima.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.array import CumlArray as cumlArray
import cuml.internals
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
from cuml.tsa.arima cimport ARIMAOrder
from libc.stdint cimport uint64_t, uintptr_t
from random import randint
cdef extern from "cuml/datasets/make_arima.hpp" namespace "ML":
void cpp_make_arima "ML::Datasets::make_arima" (
const handle_t& handle,
float* out,
int batch_size,
int n_obs,
ARIMAOrder order,
float scale,
float noise_scale,
float intercept_scale,
uint64_t seed
)
void cpp_make_arima "ML::Datasets::make_arima" (
const handle_t& handle,
double* out,
int batch_size,
int n_obs,
ARIMAOrder order,
double scale,
double noise_scale,
double intercept_scale,
uint64_t seed
)
inp_to_dtype = {
'single': np.float32,
'float': np.float32,
'double': np.float64,
np.float32: np.float32,
np.float64: np.float64
}
@cuml.internals.api_return_array()
def make_arima(batch_size=1000, n_obs=100, order=(1, 1, 1),
seasonal_order=(0, 0, 0, 0), intercept=False,
random_state=None, dtype='double',
handle=None):
"""Generates a dataset of time series by simulating an ARIMA process
of a given order.
Examples
--------
.. code-block:: python
from cuml.datasets import make_arima
y = make_arima(1000, 100, (2,1,2), (0,1,2,12), 0)
Parameters
----------
batch_size: int
Number of time series to generate
n_obs: int
Number of observations per series
order : Tuple[int, int, int]
Order (p, d, q) of the simulated ARIMA process
seasonal_order: Tuple[int, int, int, int]
Seasonal ARIMA order (P, D, Q, s) of the simulated ARIMA process
intercept: bool or int
Whether to include a constant trend mu in the simulated ARIMA process
random_state: int, RandomState instance or None (default)
Seed for the random number generator for dataset creation.
dtype: string or numpy dtype (default: 'single')
Type of the data. Possible values: float32, float64, 'single', 'float'
or 'double'
handle: cuml.Handle
If it is None, a new one is created just for this function call
Returns
-------
out: array-like, shape (n_obs, batch_size)
Array of the requested type containing the generated dataset
"""
cdef ARIMAOrder cpp_order
cpp_order.p, cpp_order.d, cpp_order.q = order
cpp_order.P, cpp_order.D, cpp_order.Q, cpp_order.s = seasonal_order
cpp_order.k = <int>intercept
cpp_order.n_exog = 0
# Set the default output type to "cupy". This will be ignored if the user
# has set `cuml.global_settings.output_type`. Only necessary for array
# generation methods that do not take an array as input
cuml.internals.set_api_output_type("cupy")
# Define some parameters based on the order
scale = 1.0
noise_scale = 0.2
intercept_scale = [1.0, 0.2, 0.01][cpp_order.d + cpp_order.D]
if dtype not in ['single', 'float', 'double', np.float32, np.float64]:
raise TypeError("dtype must be either 'float' or 'double'")
else:
dtype = inp_to_dtype[dtype]
handle = Handle() if handle is None else handle
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
out = cumlArray.empty((n_obs, batch_size), dtype=dtype, order='F')
cdef uintptr_t out_ptr = <uintptr_t> out.ptr
if random_state is None:
random_state = randint(0, 1e18)
if dtype == np.float32:
cpp_make_arima(handle_[0], <float*> out_ptr, <int> batch_size,
<int> n_obs, cpp_order, <float> scale,
<float> noise_scale, <float> intercept_scale,
<uint64_t> random_state)
else:
cpp_make_arima(handle_[0], <double*> out_ptr, <int> batch_size,
<int> n_obs, cpp_order, <double> scale,
<double> noise_scale, <double> intercept_scale,
<uint64_t> random_state)
return out
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/datasets/classification.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.internals
from cuml.internals.import_utils import has_sklearn
from cuml.datasets.utils import _create_rs_generator
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import_from,
null_decorator,
)
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions"""
if not has_sklearn():
raise RuntimeError(
"Scikit-learn is needed to run \
make_classification."
)
from sklearn.utils.random import sample_without_replacement
if dimensions > 30:
return np.hstack(
[
np.random.randint(2, size=(samples, dimensions - 30)),
_generate_hypercube(samples, 30, rng),
]
)
random_state = int(rng.randint(dimensions))
out = sample_without_replacement(
2**dimensions, samples, random_state=random_state
).astype(dtype=">u4", copy=False)
out = np.unpackbits(out.view(">u1")).reshape((-1, 32))[:, -dimensions:]
return out
@nvtx_annotate(message="datasets.make_classification", domain="cuml_python")
@cuml.internals.api_return_generic()
def make_classification(
n_samples=100,
n_features=20,
n_informative=2,
n_redundant=2,
n_repeated=0,
n_classes=2,
n_clusters_per_class=2,
weights=None,
flip_y=0.01,
class_sep=1.0,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=None,
order="F",
dtype="float32",
_centroids=None,
_informative_covariance=None,
_redundant_covariance=None,
_repeated_indices=None,
):
"""
Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of an `n_informative`-dimensional hypercube with sides of
length :py:`2*class_sep` and assigns an equal number of clusters to each
class. It introduces interdependence between these features and adds
various types of further noise to the data.
Without shuffling, `X` horizontally stacks features in the following
order: the primary `n_informative` features, followed by `n_redundant`
linear combinations of the informative features, followed by `n_repeated`
duplicates, drawn randomly with replacement from the informative and
redundant features. The remaining features are filled with random noise.
Thus, without shuffling, all useful features are contained in the columns
:py:`X[:, :n_informative + n_redundant + n_repeated]`.
Examples
--------
.. code-block:: python
>>> from cuml.datasets.classification import make_classification
>>> X, y = make_classification(n_samples=10, n_features=4,
... n_informative=2, n_classes=2,
... random_state=10)
>>> print(X) # doctest: +SKIP
[[-1.7974224 0.24425316 0.39062843 -0.38293394]
[ 0.6358963 1.4161923 0.06970507 -0.16085647]
[-0.22802866 -1.1827322 0.3525861 0.276615 ]
[ 1.7308872 0.43080002 0.05048406 0.29837844]
[-1.9465544 0.5704457 -0.8997551 -0.27898186]
[ 1.0575483 -0.9171263 0.09529338 0.01173469]
[ 0.7917619 -1.0638094 -0.17599393 -0.06420116]
[-0.6686142 -0.13951421 -0.6074711 0.21645583]
[-0.88968956 -0.914443 0.1302423 0.02924336]
[-0.8817671 -0.84549576 0.1845096 0.02556021]]
>>> print(y)
[0 1 0 1 1 0 0 1 0 0]
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features,
`n_repeated` duplicated features and
:py:`n_features-n_informative-n_redundant-n_repeated` useless features
drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : array-like of shape (n_classes,) or (n_classes - 1,),\
(default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if :py:`len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of
`weights` exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class is assigned randomly. Larger
values introduce noise in the labels and make the classification
task harder.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube size. Larger values spread
out the clusters/classes and make the classification task easier.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
order: str, optional (default='F')
The order of the generated samples
dtype : str, optional (default='float32')
Dtype of the generated samples
_centroids: array of centroids of shape (n_clusters, n_informative)
_informative_covariance: array for covariance between informative features
of shape (n_clusters, n_informative, n_informative)
_redundant_covariance: array for covariance between redundant features
of shape (n_informative, n_redundant)
_repeated_indices: array of indices for the repeated features
of shape (n_repeated, )
Returns
-------
X : device array of shape [n_samples, n_features]
The generated samples.
y : device array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1]_ and was designed to generate
the "Madelon" dataset. How we optimized for GPUs:
1. Firstly, we generate X from a standard univariate instead of zeros.
This saves memory as we don't need to generate univariates each
time for each feature class (informative, repeated, etc.) while
also providing the added speedup of generating a big matrix
on GPU
2. We generate :py:`order=F` construction. We exploit the
fact that X is a generated from a univariate normal, and
covariance is introduced with matrix multiplications. Which means,
we can generate X as a 1D array and just reshape it to the
desired order, which only updates the metadata and eliminates
copies
3. Lastly, we also shuffle by construction. Centroid indices are
permuted for each sample, and then we construct the data for
each centroid. This shuffle works for both :py:`order=C` and
:py:`order=F` and eliminates any need for secondary copies
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
cuml.internals.set_api_output_type("cupy")
generator = _create_rs_generator(random_state)
np_seed = int(generator.randint(n_samples, size=1))
np.random.seed(np_seed)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError(
"Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features"
)
# Use log2 to avoid overflow errors
if n_informative < np.log2(n_classes * n_clusters_per_class):
msg = "n_classes({}) * n_clusters_per_class({}) must be"
msg += " smaller or equal 2**n_informative({})={}"
raise ValueError(
msg.format(
n_classes,
n_clusters_per_class,
n_informative,
2**n_informative,
)
)
if weights is not None:
if len(weights) not in [n_classes, n_classes - 1]:
raise ValueError(
"Weights specified but incompatible with number " "of classes."
)
if len(weights) == n_classes - 1:
if isinstance(weights, list):
weights = weights + [1.0 - sum(weights)]
else:
weights = np.resize(weights, n_classes)
weights[-1] = 1.0 - sum(weights[:-1])
else:
weights = [1.0 / n_classes] * n_classes
n_clusters = n_classes * n_clusters_per_class
# Distribute samples among clusters by weight
n_samples_per_cluster = [
int(n_samples * weights[k % n_classes] / n_clusters_per_class)
for k in range(n_clusters)
]
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = generator.randn(n_samples * n_features, dtype=dtype)
X = X.reshape((n_samples, n_features), order=order)
y = cp.zeros(n_samples, dtype=np.int64)
# Build the polytope whose vertices become cluster centroids
if _centroids is None:
centroids = cp.array(
_generate_hypercube(n_clusters, n_informative, generator)
).astype(dtype, copy=False)
else:
centroids = _centroids
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1, dtype=dtype)
centroids *= generator.rand(1, n_informative, dtype=dtype)
# Create redundant features
if n_redundant > 0:
if _redundant_covariance is None:
B = 2 * generator.rand(n_informative, n_redundant, dtype=dtype) - 1
else:
B = _redundant_covariance
# Create each cluster; a variant of make_blobs
if shuffle:
proba_samples_per_cluster = np.array(n_samples_per_cluster) / np.sum(
n_samples_per_cluster
)
shuffled_sample_indices = cp.array(
np.random.choice(
n_clusters,
n_samples,
replace=True,
p=proba_samples_per_cluster,
)
)
for k, centroid in enumerate(centroids):
centroid_indices = cp.where(shuffled_sample_indices == k)
y[centroid_indices[0]] = k % n_classes
X_k = X[centroid_indices[0], :n_informative]
if _informative_covariance is None:
A = (
2
* generator.rand(n_informative, n_informative, dtype=dtype)
- 1
)
else:
A = _informative_covariance[k]
X_k = cp.dot(X_k, A)
# NOTE: This could be done outside the loop, but a current
# cupy bug does not allow that
# https://github.com/cupy/cupy/issues/3284
if n_redundant > 0:
X[
centroid_indices[0],
n_informative : n_informative + n_redundant,
] = cp.dot(X_k, B)
X_k += centroid # shift the cluster to a vertex
X[centroid_indices[0], :n_informative] = X_k
else:
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
if _informative_covariance is None:
A = (
2
* generator.rand(n_informative, n_informative, dtype=dtype)
- 1
)
else:
A = _informative_covariance[k]
X_k = cp.dot(X_k, A) # introduce random covariance
if n_redundant > 0:
X[
start:stop, n_informative : n_informative + n_redundant
] = cp.dot(X_k, B)
X_k += centroid # shift the cluster to a vertex
X[start:stop, :n_informative] = X_k
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
if _repeated_indices is None:
indices = (
(n - 1) * generator.rand(n_repeated, dtype=dtype) + 0.5
).astype(np.intp)
else:
indices = _repeated_indices
X[:, n : n + n_repeated] = X[:, indices]
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples, dtype=dtype) < flip_y
y[flip_mask] = generator.randint(n_classes, size=int(flip_mask.sum()))
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features, dtype=dtype) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features, dtype=dtype)
X *= scale
return X, y
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/datasets/__init__.py | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.datasets.arima import make_arima
from cuml.datasets.blobs import make_blobs
from cuml.datasets.regression import make_regression
from cuml.datasets.classification import make_classification
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/datasets/utils.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
def _create_rs_generator(random_state):
"""
This is a utility function that returns an instance of CuPy RandomState
Parameters
----------
random_state : None, int, or CuPy RandomState
The random_state from which the CuPy random state is generated
"""
if isinstance(random_state, (type(None), int)):
return cp.random.RandomState(seed=random_state)
elif isinstance(random_state, cp.random.RandomState):
return random_state
else:
raise ValueError("random_state type must be int or CuPy RandomState")
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/testing/test_preproc_utils.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common import input_to_cuml_array
from scipy.sparse import coo_matrix as cpu_coo_matrix
from scipy.sparse import csc_matrix as cpu_csc_matrix
from cupyx.scipy.sparse import coo_matrix as gpu_coo_matrix
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.datasets import make_classification, make_blobs
from cuml.internals.safe_imports import cpu_only_import_from
np_assert_allclose = cpu_only_import_from("numpy.testing", "assert_allclose")
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
gpu_sparse = gpu_only_import("cupyx.scipy.sparse")
cpu_sparse = cpu_only_import("scipy.sparse")
gpu_csr_matrix = gpu_only_import_from("cupyx.scipy.sparse", "csr_matrix")
gpu_csc_matrix = gpu_only_import_from("cupyx.scipy.sparse", "csc_matrix")
cpu_csr_matrix = cpu_only_import_from("scipy.sparse", "csr_matrix")
def to_output_type(array, output_type, order="F"):
"""Used to convert arrays while creating datasets
for testing.
Parameters
----------
array : array
Input array to convert
output_type : string
Type of to convert to
Returns
-------
Converted array
"""
if output_type == "scipy_csr":
return cpu_sparse.csr_matrix(array.get())
if output_type == "scipy_csc":
return cpu_sparse.csc_matrix(array.get())
if output_type == "scipy_coo":
return cpu_sparse.coo_matrix(array.get())
if output_type == "cupy_csr":
if array.format in ["csc", "coo"]:
return array.tocsr()
else:
return array
if output_type == "cupy_csc":
if array.format in ["csr", "coo"]:
return array.tocsc()
else:
return array
if output_type == "cupy_coo":
if array.format in ["csr", "csc"]:
return array.tocoo()
else:
return array
if cpu_sparse.issparse(array):
if output_type == "numpy":
return array.todense()
elif output_type == "cupy":
return cp.array(array.todense())
else:
array = array.todense()
elif gpu_sparse.issparse(array):
if output_type == "numpy":
return array.get().todense()
elif output_type == "cupy":
return array.todense()
else:
array = array.todense()
cuml_array = input_to_cuml_array(array, order=order)[0]
if output_type == "series" and len(array.shape) > 1:
output_type = "cudf"
output = cuml_array.to_output(output_type)
if output_type in ["dataframe", "cudf"]:
renaming = {i: "c" + str(i) for i in range(output.shape[1])}
output = output.rename(columns=renaming)
return output
def create_rand_clf(random_state):
clf, _ = make_classification(
n_samples=500,
n_features=20,
n_clusters_per_class=1,
n_informative=12,
n_classes=5,
order="C",
random_state=random_state,
)
return clf
def create_rand_blobs(random_state):
blobs, _ = make_blobs(
n_samples=500,
n_features=20,
centers=20,
order="C",
random_state=random_state,
)
return blobs
def create_rand_integers(random_state):
cp.random.seed(random_state)
randint = cp.random.randint(30, size=(500, 20)).astype(cp.float64)
return randint
def create_positive_rand(random_state):
cp.random.seed(random_state)
rand = cp.random.rand(500, 20).astype(cp.float64)
rand = cp.abs(rand) + 0.1
return rand
def convert(dataset, conversion_format):
converted_dataset = to_output_type(dataset, conversion_format)
dataset = cp.asnumpy(dataset)
return dataset, converted_dataset
def sparsify_and_convert(dataset, conversion_format, sparsify_ratio=0.3):
"""Randomly set values to 0 and produce a sparse array.
Parameters
----------
dataset : array
Input array to convert
conversion_format : string
Type of sparse array :
- scipy-csr: SciPy CSR sparse array
- scipy-csc: SciPy CSC sparse array
- scipy-coo: SciPy COO sparse array
- cupy-csr: CuPy CSR sparse array
- cupy-csc: CuPy CSC sparse array
- cupy-coo: CuPy COO sparse array
sparsify_ratio: float [0-1]
Ratio of zeros in the sparse array
Returns
-------
SciPy CSR array and converted array
"""
random_loc = cp.random.choice(
dataset.size, int(dataset.size * sparsify_ratio), replace=False
)
dataset.ravel()[random_loc] = 0
if conversion_format.startswith("scipy"):
dataset = cp.asnumpy(dataset)
if conversion_format == "scipy-csr":
converted_dataset = cpu_csr_matrix(dataset)
elif conversion_format == "scipy-csc":
converted_dataset = cpu_csc_matrix(dataset)
elif conversion_format == "scipy-coo":
converted_dataset = cpu_coo_matrix(dataset)
elif conversion_format == "cupy-csr":
converted_dataset = gpu_csr_matrix(dataset)
elif conversion_format == "cupy-csc":
converted_dataset = gpu_csc_matrix(dataset)
elif conversion_format == "cupy-coo":
np_array = cp.asnumpy(dataset)
np_coo_array = cpu_coo_matrix(np_array)
converted_dataset = gpu_coo_matrix(np_coo_array)
if conversion_format.startswith("cupy"):
dataset = cp.asnumpy(dataset)
return cpu_csr_matrix(dataset), converted_dataset
@pytest.fixture(
scope="session", params=["numpy", "dataframe", "cupy", "cudf", "numba"]
)
def clf_dataset(request, random_seed):
clf = create_rand_clf(random_seed)
return convert(clf, request.param)
@pytest.fixture(
scope="session", params=["numpy", "dataframe", "cupy", "cudf", "numba"]
)
def blobs_dataset(request, random_seed):
blobs = create_rand_blobs(random_seed)
return convert(blobs, request.param)
@pytest.fixture(
scope="session", params=["numpy", "dataframe", "cupy", "cudf", "numba"]
)
def int_dataset(request, random_seed):
randint = create_rand_integers(random_seed)
cp.random.seed(random_seed)
random_loc = cp.random.choice(
randint.size, int(randint.size * 0.3), replace=False
)
zero_filled = randint.copy().ravel()
zero_filled[random_loc] = 0
zero_filled = zero_filled.reshape(randint.shape)
zero_filled = convert(zero_filled, request.param)
one_filled = randint.copy().ravel()
one_filled[random_loc] = 1
one_filled = one_filled.reshape(randint.shape)
one_filled = convert(one_filled, request.param)
nan_filled = randint.copy().ravel()
nan_filled[random_loc] = cp.nan
nan_filled = nan_filled.reshape(randint.shape)
nan_filled = convert(nan_filled, request.param)
return zero_filled, one_filled, nan_filled
@pytest.fixture(
scope="session", params=["scipy-csr", "scipy-csc", "cupy-csr", "cupy-csc"]
)
def sparse_clf_dataset(request, random_seed):
clf = create_rand_clf(random_seed)
return sparsify_and_convert(clf, request.param)
@pytest.fixture(
scope="session",
params=[
"scipy-csr",
"scipy-csc",
"scipy-coo",
"cupy-csr",
"cupy-csc",
"cupy-coo",
],
)
def sparse_dataset_with_coo(request, random_seed):
clf = create_rand_clf(random_seed)
return sparsify_and_convert(clf, request.param)
@pytest.fixture(
scope="session", params=["scipy-csr", "scipy-csc", "cupy-csr", "cupy-csc"]
)
def sparse_blobs_dataset(request, random_seed):
blobs = create_rand_blobs(random_seed)
return sparsify_and_convert(blobs, request.param)
@pytest.fixture(
scope="session", params=["scipy-csr", "scipy-csc", "cupy-csr", "cupy-csc"]
)
def sparse_int_dataset(request, random_seed):
randint = create_rand_integers(random_seed)
return sparsify_and_convert(randint, request.param)
@pytest.fixture(
scope="session",
params=[
("scipy-csr", np.nan),
("scipy-csc", np.nan),
("cupy-csr", np.nan),
("cupy-csc", np.nan),
("scipy-csr", 1.0),
("scipy-csc", 1.0),
("cupy-csr", 1.0),
("cupy-csc", 1.0),
],
)
def sparse_imputer_dataset(request, random_seed):
datatype, val = request.param
randint = create_rand_integers(random_seed)
random_loc = cp.random.choice(
randint.size, int(randint.size * 0.3), replace=False
)
randint.ravel()[random_loc] = val
X_sp, X = sparsify_and_convert(randint, datatype, sparsify_ratio=0.15)
X_sp = X_sp.tocsc()
return val, X_sp, X
@pytest.fixture(
scope="session", params=["numpy", "dataframe", "cupy", "cudf", "numba"]
)
def nan_filled_positive(request, random_seed):
rand = create_positive_rand(random_seed)
cp.random.seed(random_seed)
random_loc = cp.random.choice(
rand.size, int(rand.size * 0.3), replace=False
)
rand.ravel()[random_loc] = cp.nan
rand = convert(rand, request.param)
return rand
@pytest.fixture(scope="session", params=["scipy-csc", "cupy-csc"])
def sparse_nan_filled_positive(request, random_seed):
rand = create_positive_rand(random_seed)
cp.random.seed(random_seed)
random_loc = cp.random.choice(
rand.size, int(rand.size * 0.3), replace=False
)
rand.ravel()[random_loc] = cp.nan
return sparsify_and_convert(rand, request.param)
def assert_allclose(actual, desired, rtol=1e-05, atol=1e-05, ratio_tol=None):
if not isinstance(actual, np.ndarray):
actual = to_output_type(actual, "numpy")
if not isinstance(desired, np.ndarray):
desired = to_output_type(desired, "numpy")
if ratio_tol:
assert actual.shape == desired.shape
diff_ratio = (actual != desired).sum() / actual.size
assert diff_ratio <= ratio_tol
else:
return np_assert_allclose(actual, desired, rtol=rtol, atol=atol)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/testing/strategies.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.array import CumlArray
from cuml.internals.safe_imports import cpu_only_import, gpu_only_import
from hypothesis import assume
from hypothesis.extra.numpy import (
array_shapes,
arrays,
floating_dtypes,
integer_dtypes,
)
from hypothesis.strategies import (
composite,
integers,
just,
none,
one_of,
sampled_from,
)
from sklearn.datasets import make_classification, make_regression
from sklearn.model_selection import train_test_split
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
_CUML_ARRAY_INPUT_TYPES = ["numpy", "cupy", "series"]
_CUML_ARRAY_DTYPES = [
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
_CUML_ARRAY_ORDERS = ["F", "C"]
_CUML_ARRAY_OUTPUT_TYPES = [
"cudf",
"cupy",
"dataframe",
"numba",
"numpy",
"series",
]
# TODO(wphicks): Once all memory types are supported, just directly
# iterate on the enum values
_CUML_ARRAY_MEM_TYPES = ("host", "device")
UNSUPPORTED_CUDF_DTYPES = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.float16,
]
@composite
def cuml_array_input_types(draw):
"""Generates all supported cuml array input types."""
return draw(sampled_from(_CUML_ARRAY_INPUT_TYPES))
@composite
def cuml_array_output_types(draw):
"""Generates all cuml array supported output types."""
return draw(sampled_from(_CUML_ARRAY_OUTPUT_TYPES))
@composite
def cuml_array_output_dtypes(draw):
"""Generates all cuml array supported output dtypes."""
return draw(sampled_from(_CUML_ARRAY_DTYPES))
@composite
def cuml_array_dtypes(draw):
"""Generates all supported cuml array dtypes."""
return draw(sampled_from(_CUML_ARRAY_DTYPES))
@composite
def cuml_array_orders(draw):
"""Generates all supported cuml array orders."""
return draw(sampled_from(_CUML_ARRAY_ORDERS))
@composite
def cuml_array_mem_types(draw):
"""Generates all supported cuml array mem_types.
Note that we do not currently test managed memory because it is not yet
officially supported.
"""
return draw(sampled_from(_CUML_ARRAY_MEM_TYPES))
@composite
def cuml_array_shapes(
draw, *, min_dims=1, max_dims=2, min_side=1, max_side=None
):
"""
Generates cuml array shapes.
See also: hypothesis.extra.numpy.array_shapes()
Parameters
----------
min_dims: int, default=1
Returned shapes will have at least this number of dimensions.
max_dims: int, default=2
Returned shapes will have at most this number of dimensions.
min_side: int, default=1
Returned shapes will have at least this size in any dimension.
max_side: int | None, default=min_side + 9
Returned shapes will have at most this size in any dimension.
Returns
-------
Shapes for cuml array inputs.
"""
max_side = min_side + 9 if max_side is None else max_side
if not (1 <= min_dims <= max_dims):
raise ValueError(
"Arguments violate condition 1 <= min_dims <= max_dims."
)
if not (0 < min_side < max_side):
raise ValueError(
"Arguments violate condition 0 < min_side < max_side."
)
shapes = array_shapes(
min_dims=min_dims,
max_dims=max_dims,
min_side=min_side,
max_side=max_side,
)
just_size = integers(min_side, max_side)
return draw(one_of(shapes, just_size))
def create_cuml_array_input(input_type, dtype, shape, order):
"""
Creates a valid cuml array input.
Parameters
----------
input_type: str | None, default=cupy
Valid input types are "numpy", "cupy", "series".
dtype: Data type specifier
A numpy/cupy compatible data type, e.g., numpy.float64.
shape: int | tuple[int]
Dimensions of the array to generate.
order : str in {'C', 'F'}
Order of arrays to generate, either F- or C-major.
Returns
-------
A cuml array input array.
"""
input_type = "cupy" if input_type is None else input_type
multidimensional = isinstance(shape, tuple) and len(shape) > 1
assume(
not (
input_type == "series"
and (dtype in UNSUPPORTED_CUDF_DTYPES or multidimensional)
)
)
array = cp.ones(shape, dtype=dtype, order=order)
if input_type == "numpy":
return np.array(cp.asnumpy(array), dtype=dtype, order=order)
elif input_type == "series":
return cudf.Series(array)
elif input_type == "cupy":
return array
raise ValueError(
"The value for 'input_type' must be "
f"one of {', '.join(_CUML_ARRAY_INPUT_TYPES)}."
)
@composite
def cuml_array_inputs(
draw,
input_types=cuml_array_input_types(),
dtypes=cuml_array_dtypes(),
shapes=cuml_array_shapes(),
orders=cuml_array_orders(),
):
"""
Generates valid inputs for cuml arrays.
Parameters
----------
input_types: SearchStrategy[("numpy", "cupy", "series")], \
default=cuml_array_input_tyes()
A search strategy for the type of array input.
dtypes: SearchStrategy[np.dtype], default=cuml_array_dtypes()
A search strategy for a numpy/cupy compatible data type.
shapes: SearchStrategy[int | tuple[int]], default=cuml_array_shapes()
A search strategy for array shapes.
orders : str in {'C', 'F'}, default=cuml_array_orders()
A search strategy for array orders.
Returns
-------
A strategy for valid cuml array inputs.
"""
input_type = draw(input_types)
dtype = draw(dtypes)
shape = draw(shapes)
order = draw(orders)
multidimensional = isinstance(shape, tuple) and len(shape) > 1
assume(
not (
input_type == "series"
and (dtype in UNSUPPORTED_CUDF_DTYPES or multidimensional)
)
)
data = draw(arrays(dtype=dtype, shape=shape))
if input_type == "numpy":
ret = np.asarray(data, order=order)
elif input_type == "cupy":
ret = cp.array(data, dtype=dtype, order=order)
elif input_type == "series":
ret = cudf.Series(data)
else:
raise ValueError(
"The value for 'input_type' must be "
f"one of {', '.join(_CUML_ARRAY_INPUT_TYPES)}."
)
# Cupy currently does not support masked arrays.
cai = getattr(ret, "__cuda_array_interface__", dict())
assume(cai.get("mask") is None)
return ret
@composite
def cuml_arrays(
draw,
input_types=cuml_array_input_types(),
dtypes=cuml_array_dtypes(),
shapes=cuml_array_shapes(),
orders=cuml_array_orders(),
mem_types=cuml_array_mem_types(),
):
"""
Generates cuml arrays.
Parameters
----------
input_types: SearchStrategy[("numpy", "cupy", "series")], \
default=cuml_array_input_tyes()
A search strategy for the type of array input.
dtypes: SearchStrategy[np.dtype], default=cuml_array_dtypes()
A search strategy for a numpy/cupy compatible data type.
shapes: SearchStrategy[int | tuple[int]], default=cuml_array_shapes()
A search strategy for array shapes.
orders : str in {'C', 'F'}, default=cuml_array_orders()
A search strategy for array orders.
Returns
-------
A strategy for cuml arrays.
"""
array_input = create_cuml_array_input(
input_type=draw(input_types),
dtype=draw(dtypes),
shape=draw(shapes),
order=draw(orders),
)
return CumlArray(data=array_input, mem_type=draw(mem_types))
def _get_limits(strategy):
"""Try to find the strategy's limits.
Raises AttributeError if limits cannot be determined.
"""
# unwrap if lazy
strategy = getattr(strategy, "wrapped_strategy", strategy)
try:
yield getattr(strategy, "value") # just(...)
except AttributeError:
# assume numbers strategy
yield strategy.start
yield strategy.stop
@composite
def standard_datasets(
draw,
dtypes=floating_dtypes(),
n_samples=integers(min_value=0, max_value=200),
n_features=integers(min_value=0, max_value=200),
*,
n_targets=just(1),
):
"""
Returns a strategy to generate standard estimator input datasets.
Parameters
----------
dtypes: SearchStrategy[np.dtype], default=floating_dtypes()
Returned arrays will have a dtype drawn from these types.
n_samples: SearchStrategy[int], \
default=integers(min_value=0, max_value=200)
Returned arrays will have number of rows drawn from these values.
n_features: SearchStrategy[int], \
default=integers(min_value=0, max_values=200)
Returned arrays will have number of columns drawn from these values.
n_targets: SearchStrategy[int], default=just(1)
Determines the number of targets returned datasets may contain.
Returns
-------
X: SearchStrategy[array] (n_samples, n_features)
The search strategy for input samples.
y: SearchStrategy[array] (n_samples,) or (n_samples, n_targets)
The search strategy for output samples.
"""
xs = draw(n_samples)
ys = draw(n_features)
X = arrays(dtype=dtypes, shape=(xs, ys))
y = arrays(dtype=dtypes, shape=(xs, draw(n_targets)))
return draw(X), draw(y)
def combined_datasets_strategy(*datasets, name=None, doc=None):
"""
Combine multiple datasets strategies into a single datasets strategy.
This function will return a new strategy that will build the provided
strategy functions with the common parameters (dtypes, n_samples,
n_features) and then draw from one of them.
Parameters:
-----------
* datasets: list[Callable[[dtypes, n_samples, n_features], SearchStrategy]]
A list of functions that return a dataset search strategy when called
with the shown arguments.
name: The name of the returned search strategy, default="datasets"
Defaults to a combination of names of the provided dataset strategy
functions.
doc: The doc-string of the returned search strategy, default=None
Defaults to a generic doc-string.
Returns
-------
Datasets search strategy: SearchStrategy[array], SearchStrategy[array]
"""
@composite
def strategy(
draw,
dtypes=floating_dtypes(),
n_samples=integers(min_value=0, max_value=200),
n_features=integers(min_value=0, max_value=200),
):
"""Datasets strategy composed of multiple datasets strategies."""
datasets_strategies = (
dataset(dtypes, n_samples, n_features) for dataset in datasets
)
return draw(one_of(datasets_strategies))
strategy.__name__ = "datasets" if name is None else name
if doc is not None:
strategy.__doc__ = doc
return strategy
@composite
def split_datasets(
draw,
datasets,
test_sizes=None,
):
"""
Split a generic search strategy for datasets into test and train subsets.
The resulting split is guaranteed to have at least one sample in both the
train and test split respectively.
Note: This function uses the sklearn.model_selection.train_test_split
function.
See also:
standard_datasets(): A search strategy for datasets that can serve as input
to this strategy.
Parameters
----------
datasets: SearchStrategy[dataset]
A search strategy for datasets.
test_sizes: SearchStrategy[int | float], default=None
A search strategy for the test size. Must be provided as a search
strategy for integers or floats. Integers should be bound by one and
the sample size, floats should be between 0 and 1.0. Defaults to
a search strategy that will generate a valid unbiased split.
Returns
-------
(X_train, X_test, y_train, y_test): tuple[SearchStrategy[array], ...]
The train-test split of the input and output samples drawn from
the provided datasets search strategy.
"""
X, y = draw(datasets)
assume(len(X) > 1)
# Determine default value for test_sizes
if test_sizes is None:
test_sizes = integers(1, max(1, len(X) - 1))
test_size = draw(test_sizes)
# Check assumptions for test_size
if isinstance(test_size, float):
assume(int(len(X) * test_size) > 0)
assume(int(len(X) * (1.0 - test_size)) > 0)
elif isinstance(test_size, int):
assume(1 < test_size < len(X))
return train_test_split(X, y, test_size=test_size)
@composite
def standard_regression_datasets(
draw,
dtypes=floating_dtypes(),
n_samples=integers(min_value=100, max_value=200),
n_features=integers(min_value=100, max_value=200),
*,
n_informative=None,
n_targets=just(1),
bias=just(0.0),
effective_rank=none(),
tail_strength=just(0.5),
noise=just(0.0),
shuffle=just(True),
random_state=None,
):
"""
Returns a strategy to generate regression problem input datasets.
Note:
This function uses the sklearn.datasets.make_regression function to
generate the regression problem from the provided search strategies.
Parameters
----------
dtypes: SearchStrategy[np.dtype]
Returned arrays will have a dtype drawn from these types.
n_samples: SearchStrategy[int]
Returned arrays will have number of rows drawn from these values.
n_features: SearchStrategy[int]
Returned arrays will have number of columns drawn from these values.
n_informative: SearchStrategy[int], default=none
A search strategy for the number of informative features. If none,
will use 10% of the actual number of features, but not less than 1
unless the number of features is zero.
n_targets: SearchStrategy[int], default=just(1)
A search strategy for the number of targets, that means the number of
columns of the returned y output array.
bias: SearchStrategy[float], default=just(0.0)
A search strategy for the bias term.
effective_rank=none()
If not None, a search strategy for the effective rank of the input data
for the regression problem. See sklearn.dataset.make_regression() for a
detailed explanation of this parameter.
tail_strength: SearchStrategy[float], default=just(0.5)
See sklearn.dataset.make_regression() for a detailed explanation of
this parameter.
noise: SearchStrategy[float], default=just(0.0)
A search strategy for the standard deviation of the gaussian noise.
shuffle: SearchStrategy[bool], default=just(True)
A boolean search strategy to determine whether samples and features
are shuffled.
random_state: int, RandomState instance or None, default=None
Pass a random state or integer to determine the random number
generation for data set generation.
Returns
-------
(X, y): SearchStrategy[array], SearchStrategy[array]
A tuple of search strategies for arrays subject to the constraints of
the provided parameters.
"""
n_features_ = draw(n_features)
if n_informative is None:
n_informative = just(max(min(n_features_, 1), int(0.1 * n_features_)))
X, y = make_regression(
n_samples=draw(n_samples),
n_features=n_features_,
n_informative=draw(n_informative),
n_targets=draw(n_targets),
bias=draw(bias),
effective_rank=draw(effective_rank),
tail_strength=draw(tail_strength),
noise=draw(noise),
shuffle=draw(shuffle),
random_state=random_state,
)
dtype_ = draw(dtypes)
return X.astype(dtype_), y.astype(dtype_)
regression_datasets = combined_datasets_strategy(
standard_datasets,
standard_regression_datasets,
name="regression_datasets",
doc="""
Returns strategy for the generation of regression problem datasets.
Drawn from the standard_datasets and the standard_regression_datasets
strategies.
""",
)
@composite
def standard_classification_datasets(
draw,
dtypes=floating_dtypes(),
n_samples=integers(min_value=100, max_value=200),
n_features=integers(min_value=10, max_value=20),
*,
n_informative=None,
n_redundant=None,
n_repeated=just(0),
n_classes=just(2),
n_clusters_per_class=just(2),
weights=none(),
flip_y=just(0.01),
class_sep=just(1.0),
hypercube=just(True),
shift=just(0.0),
scale=just(1.0),
shuffle=just(True),
random_state=None,
labels_dtypes=integer_dtypes(),
):
n_features_ = draw(n_features)
if n_informative is None:
try:
# Try to meet:
# log_2(n_classes * n_clusters_per_class) <= n_informative
n_classes_min = min(_get_limits(n_classes))
n_clusters_per_class_min = min(_get_limits(n_clusters_per_class))
n_informative_min = int(
np.ceil(np.log2(n_classes_min * n_clusters_per_class_min))
)
except AttributeError:
# Otherwise aim for 10% of n_features, but at least 1.
n_informative_min = max(1, int(0.1 * n_features_))
n_informative = just(min(n_features_, n_informative_min))
if n_redundant is None:
n_redundant = just(max(min(n_features_, 1), int(0.1 * n_features_)))
# Check whether the
# log_2(n_classes * n_clusters_per_class) <= n_informative
# inequality can in principle be met.
try:
n_classes_min = min(_get_limits(n_classes))
n_clusters_per_class_min = min(_get_limits(n_clusters_per_class))
n_informative_max = max(_get_limits(n_informative))
except AttributeError:
pass # unable to determine limits
else:
if (
np.log2(n_classes_min * n_clusters_per_class_min)
> n_informative_max
):
raise ValueError(
"Assumptions cannot be met, the following inequality must "
"hold: log_2(n_classes * n_clusters_per_class) "
"<= n_informative ."
)
# Check base assumption concerning the composition of feature vectors.
n_informative_ = draw(n_informative)
n_redundant_ = draw(n_redundant)
n_repeated_ = draw(n_repeated)
assume(n_informative_ + n_redundant_ + n_repeated_ <= n_features_)
# Check base assumption concerning relationship of number of clusters and
# informative features.
n_classes_ = draw(n_classes)
n_clusters_per_class_ = draw(n_clusters_per_class)
assume(np.log2(n_classes_ * n_clusters_per_class_) <= n_informative_)
X, y = make_classification(
n_samples=draw(n_samples),
n_features=n_features_,
n_informative=n_informative_,
n_redundant=n_redundant_,
n_repeated=n_repeated_,
n_classes=n_classes_,
n_clusters_per_class=n_clusters_per_class_,
weights=draw(weights),
flip_y=draw(flip_y),
class_sep=draw(class_sep),
hypercube=draw(hypercube),
shift=draw(shift),
scale=draw(scale),
shuffle=draw(shuffle),
random_state=random_state,
)
return X.astype(draw(dtypes)), y.astype(draw(labels_dtypes))
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/testing/utils.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from cuml.internals.mem_type import MemoryType
from cuml.internals.input_utils import input_to_cuml_array, is_array_like
from cuml.internals.base import Base
import cuml
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, brier_score_loss
from sklearn.datasets import make_classification, make_regression
from sklearn import datasets
from pylibraft.common.cuda import Stream
from sklearn.datasets import make_regression as skl_make_reg
from numba.cuda.cudadrv.devicearray import DeviceNDArray
from numbers import Number
from cuml.internals.safe_imports import gpu_only_import_from
from itertools import dropwhile
from copy import deepcopy
from cuml.internals.safe_imports import cpu_only_import
import inspect
from textwrap import dedent, indent
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
cuda = gpu_only_import_from("numba", "cuda")
cudf = gpu_only_import("cudf")
def array_difference(a, b, with_sign=True):
"""
Utility function to compute the difference between 2 arrays.
"""
a = to_nparray(a)
b = to_nparray(b)
if len(a) == 0 and len(b) == 0:
return 0
if not with_sign:
a, b = np.abs(a), np.abs(b)
return np.sum(np.abs(a - b))
class array_equal:
"""
Utility functor to compare 2 numpy arrays and optionally show a meaningful
error message in case they are not. Two individual elements are assumed
equal if they are within `unit_tol` of each other, and two arrays are
considered equal if less than `total_tol` percentage of elements are
different.
"""
def __init__(self, a, b, unit_tol=1e-4, total_tol=1e-4, with_sign=True):
self.a = to_nparray(a)
self.b = to_nparray(b)
self.unit_tol = unit_tol
self.total_tol = total_tol
self.with_sign = with_sign
def compute_difference(self):
return array_difference(self.a, self.b, with_sign=self.with_sign)
def __bool__(self):
if len(self.a) == len(self.b) == 0:
return True
if self.with_sign:
a, b = self.a, self.b
else:
a, b = np.abs(self.a), np.abs(self.b)
res = (np.sum(np.abs(a - b) > self.unit_tol)) / a.size < self.total_tol
return bool(res)
def __eq__(self, other):
if isinstance(other, bool):
return bool(self) == other
return super().__eq__(other)
def _repr(self, threshold=None):
name = self.__class__.__name__
return (
[f"<{name}: "]
+ f"{np.array2string(self.a, threshold=threshold)} ".splitlines()
+ f"{np.array2string(self.b, threshold=threshold)} ".splitlines()
+ [
f"unit_tol={self.unit_tol} ",
f"total_tol={self.total_tol} ",
f"with_sign={self.with_sign}",
">",
]
)
def __repr__(self):
return "".join(self._repr(threshold=5))
def __str__(self):
tokens = self._repr(threshold=1000)
return "\n".join(
f"{' ' if 0 < n < len(tokens) - 1 else ''}{token}"
for n, token in enumerate(tokens)
)
def assert_array_equal(a, b, unit_tol=1e-4, total_tol=1e-4, with_sign=True):
"""
Raises an AssertionError if arrays are not considered equal.
Uses the same arguments as array_equal(), but raises an AssertionError in
case that the test considers the arrays to not be equal.
This function will generate a nicer error message in the context of pytest
compared to a plain `assert array_equal(...)`.
"""
# Determine array equality.
equal = array_equal(
a, b, unit_tol=unit_tol, total_tol=total_tol, with_sign=with_sign
)
if not equal:
# Generate indented array string representation ...
str_a = indent(np.array2string(a), " ").splitlines()
str_b = indent(np.array2string(b), " ").splitlines()
# ... and add labels
str_a[0] = f"a: {str_a[0][3:]}"
str_b[0] = f"b: {str_b[0][3:]}"
# Create assertion error message and raise exception.
assertion_error_msg = (
dedent(
f"""
Arrays are not equal
unit_tol: {unit_tol}
total_tol: {total_tol}
with_sign: {with_sign}
"""
)
+ "\n".join(str_a + str_b)
)
raise AssertionError(assertion_error_msg)
def get_pattern(name, n_samples):
np.random.seed(0)
random_state = 170
if name == "noisy_circles":
data = datasets.make_circles(
n_samples=n_samples, factor=0.5, noise=0.05
)
params = {
"damping": 0.77,
"preference": -240,
"quantile": 0.2,
"n_clusters": 2,
}
elif name == "noisy_moons":
data = datasets.make_moons(n_samples=n_samples, noise=0.05)
params = {"damping": 0.75, "preference": -220, "n_clusters": 2}
elif name == "varied":
data = datasets.make_blobs(
n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state,
)
params = {"eps": 0.18, "n_neighbors": 2}
elif name == "blobs":
data = datasets.make_blobs(n_samples=n_samples, random_state=8)
params = {}
elif name == "aniso":
X, y = datasets.make_blobs(
n_samples=n_samples, random_state=random_state
)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
data = (X_aniso, y)
params = {"eps": 0.15, "n_neighbors": 2}
elif name == "no_structure":
data = np.random.rand(n_samples, 2), None
params = {}
return [data, params]
def normalize_clusters(a0, b0, n_clusters):
a = to_nparray(a0)
b = to_nparray(b0)
c = deepcopy(b)
for i in range(n_clusters):
(idx,) = np.where(a == i)
a_to_b = c[idx[0]]
b[c == a_to_b] = i
return a, b
def as_type(type, *args):
# Convert array args to type supported by
# CumlArray.to_output ('numpy','cudf','cupy'...)
# Ensure 2 dimensional inputs are not converted to 1 dimension
# None remains as None
# Scalar remains a scalar
result = []
for arg in args:
if arg is None or np.isscalar(arg):
result.append(arg)
else:
# make sure X with a single feature remains 2 dimensional
if type in ("cudf", "pandas", "df_obj") and len(arg.shape) > 1:
if type == "pandas":
mem_type = MemoryType.host
else:
mem_type = None
result.append(
input_to_cuml_array(arg).array.to_output(
output_type="dataframe", output_mem_type=mem_type
)
)
else:
result.append(input_to_cuml_array(arg).array.to_output(type))
if len(result) == 1:
return result[0]
return tuple(result)
def to_nparray(x):
if isinstance(x, Number):
return np.asarray([x])
elif isinstance(x, pd.DataFrame):
return x.values
elif isinstance(x, cudf.DataFrame):
return x.to_pandas().values
elif isinstance(x, cudf.Series):
return x.to_pandas().values
elif isinstance(x, DeviceNDArray):
return x.copy_to_host()
elif isinstance(x, cp.ndarray):
return cp.asnumpy(x)
return np.asarray(x)
def clusters_equal(a0, b0, n_clusters, tol=1e-4):
a, b = normalize_clusters(a0, b0, n_clusters)
return array_equal(a, b, total_tol=tol)
def assert_dbscan_equal(ref, actual, X, core_indices, eps):
"""
Utility function to compare two numpy label arrays.
The labels of core/noise points are expected to be equal, and the labels
of border points are verified by finding a neighboring core point with the
same label.
"""
core_set = set(core_indices)
N, _ = X.shape
eps2 = eps**2
def sqnorm(x):
return np.inner(x, x)
for i in range(N):
la, lb = ref[i], actual[i]
if i in core_set: # core point
assert (
la == lb
), "Core point mismatch at #{}: " "{} (expected {})".format(
i, lb, la
)
elif la == -1: # noise point
assert lb == -1, "Noise mislabelled at #{}: {}".format(i, lb)
else: # border point
found = False
for j in range(N):
# Check if j is a core point with the same label
if j in core_set and lb == actual[j]:
# Check if j is a neighbor of i
if sqnorm(X[i] - X[j]) <= eps2:
found = True
break
assert found, (
"Border point not connected to cluster at #{}: "
"{} (reference: {})".format(i, lb, la)
)
# Note: we can also do it in a rand score fashion by checking that pairs
# correspond in both label arrays for core points, if we need to drop the
# requirement of minimality for core points
def get_handle(use_handle, n_streams=0):
if not use_handle:
return None, None
s = Stream()
h = cuml.Handle(stream=s, n_streams=n_streams)
return h, s
def small_regression_dataset(datatype):
X, y = make_regression(
n_samples=1000, n_features=20, n_informative=10, random_state=10
)
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
return X_train, X_test, y_train, y_test
def small_classification_dataset(datatype):
X, y = make_classification(
n_samples=500,
n_features=20,
n_informative=10,
n_classes=2,
random_state=10,
)
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
return X_train, X_test, y_train, y_test
def unit_param(*args, **kwargs):
return pytest.param(*args, **kwargs, marks=pytest.mark.unit)
def quality_param(*args, **kwargs):
return pytest.param(*args, **kwargs, marks=pytest.mark.quality)
def stress_param(*args, **kwargs):
return pytest.param(*args, **kwargs, marks=pytest.mark.stress)
class ClassEnumerator:
"""Helper class to automatically pick up every models classes in a module.
Filters out classes not inheriting from cuml.Base.
Parameters
----------
module: python module (ex: cuml.linear_regression)
The module for which to retrieve models.
exclude_classes: list of classes (optional)
Those classes will be filtered out from the retrieved models.
custom_constructors: dictionary of {class_name: lambda}
Custom constructors to use instead of the default one.
ex: {'LogisticRegression': lambda: cuml.LogisticRegression(handle=1)}
recursive: bool, default=False
Instructs the class to recursively search submodules when True,
otherwise only classes in the specified model will be enumerated
"""
def __init__(
self,
module,
exclude_classes=None,
custom_constructors=None,
recursive=False,
):
self.module = module
self.exclude_classes = exclude_classes or []
self.custom_constructors = custom_constructors or []
self.recursive = recursive
def _get_classes(self):
def recurse_module(module):
classes = {}
modules = []
if self.recursive:
modules = inspect.getmembers(module, inspect.ismodule)
# Enumerate child modules only if they are a submodule of the
# current one. i.e. `{parent_module}.{submodule}`
for _, m in modules:
if module.__name__ + "." in m.__name__:
classes.update(recurse_module(m))
# Ensure we only get classes that are part of this module
classes.update(
{
(".".join((klass.__module__, klass.__qualname__))): klass
for name, klass in inspect.getmembers(
module, inspect.isclass
)
if module.__name__ + "."
in ".".join((klass.__module__, klass.__qualname__))
}
)
return classes
return [
(val.__name__, val)
for key, val in recurse_module(self.module).items()
]
def get_models(self):
"""Picks up every models classes from self.module.
Filters out classes not inheriting from cuml.Base.
Returns
-------
models: dictionary of {class_name: class|class_constructor}
Dictionary of models in the module, except when a
custom_constructor is specified, in that case the value is the
specified custom_constructor.
"""
classes = self._get_classes()
models = {
name: cls
for name, cls in classes
if cls not in self.exclude_classes and issubclass(cls, Base)
}
models.update(self.custom_constructors)
return models
def get_classes_from_package(package, import_sub_packages=False):
"""
Gets all modules imported in the specified package and returns a dictionary
of any classes that derive from `cuml.Base`
Parameters
----------
package : python module The python module to search import_sub_packages :
bool, default=False When set to True, will try to import sub packages
by searching the directory tree for __init__.py files and importing
them accordingly. By default this is set to False
Returns
-------
ClassEnumerator Class enumerator for the specified package
"""
if import_sub_packages:
import os
import importlib
# First, find all __init__.py files in subdirectories of this package
root_dir = os.path.dirname(package.__file__)
root_relative = os.path.dirname(root_dir)
# Now loop
for root, _, files in os.walk(root_dir):
if "__init__.py" in files:
module_name = os.path.relpath(root, root_relative).replace(
os.sep, "."
)
importlib.import_module(module_name)
return ClassEnumerator(module=package, recursive=True).get_models()
def generate_random_labels(random_generation_lambda, seed=1234, as_cupy=False):
"""
Generates random labels to act as ground_truth and predictions for tests.
Parameters
----------
random_generation_lambda : lambda function [numpy.random] -> ndarray
A lambda function used to generate labels for either y_true or y_pred
using a seeded numpy.random object.
seed : int
Seed for the numpy.random object.
as_cupy : bool
Choose return type of y_true and y_pred.
True: returns Cupy ndarray
False: returns Numba cuda DeviceNDArray
Returns
-------
y_true, y_pred, np_y_true, np_y_pred : tuple
y_true : Numba cuda DeviceNDArray or Cupy ndarray
Random target values.
y_pred : Numba cuda DeviceNDArray or Cupy ndarray
Random predictions.
np_y_true : Numpy ndarray
Same as y_true but as a numpy ndarray.
np_y_pred : Numpy ndarray
Same as y_pred but as a numpy ndarray.
"""
rng = np.random.RandomState(seed) # makes it reproducible
a = random_generation_lambda(rng)
b = random_generation_lambda(rng)
if as_cupy:
return cp.array(a), cp.array(b), a, b
else:
return cuda.to_device(a), cuda.to_device(b), a, b
def score_labeling_with_handle(
func, ground_truth, predictions, use_handle, dtype=np.int32
):
"""Test helper to standardize inputs between sklearn and our prims metrics.
Using this function we can pass python lists as input of a test just like
with sklearn as well as an option to use handle with our metrics.
"""
a = cp.array(ground_truth, dtype=dtype)
b = cp.array(predictions, dtype=dtype)
handle, stream = get_handle(use_handle)
return func(a, b, handle=handle)
def get_number_positional_args(func, default=2):
# function to return number of positional arguments in func
if hasattr(func, "__code__"):
all_args = func.__code__.co_argcount
if func.__defaults__ is not None:
kwargs = len(func.__defaults__)
else:
kwargs = 0
return all_args - kwargs
return default
def get_shap_values(
model,
explainer,
background_dataset,
explained_dataset,
api_type="shap_values",
):
# function to get shap values from an explainer using SHAP style API.
# This function allows isolating all calls in test suite for the case of
# API changes.
explainer = explainer(model=model, data=background_dataset)
if api_type == "shap_values":
shap_values = explainer.shap_values(explained_dataset)
elif api_type == "__call__":
shap_values = explainer(explained_dataset)
return explainer, shap_values
def generate_inputs_from_categories(
categories=None, n_samples=10, seed=5060, as_array=False
):
if categories is None:
if as_array:
categories = {
"strings": list(range(1000, 4000, 3)),
"integers": list(range(1000)),
}
else:
categories = {
"strings": ["Foo", "Bar", "Baz"],
"integers": list(range(1000)),
}
rd = np.random.RandomState(seed)
pandas_df = pd.DataFrame(
{name: rd.choice(cat, n_samples) for name, cat in categories.items()}
)
ary = from_df_to_numpy(pandas_df)
if as_array:
inp_ary = cp.array(ary)
return inp_ary, ary
else:
df = cudf.DataFrame.from_pandas(pandas_df)
return df, ary
def assert_inverse_equal(ours, ref):
if isinstance(ours, cp.ndarray):
cp.testing.assert_array_equal(ours, ref)
else:
pd.testing.assert_frame_equal(ours.to_pandas(), ref.to_pandas())
def from_df_to_numpy(df):
if isinstance(df, pd.DataFrame):
return list(zip(*[df[feature] for feature in df.columns]))
else:
return list(zip(*[df[feature].values_host for feature in df.columns]))
def compare_svm(
svm1,
svm2,
X,
y,
b_tol=None,
coef_tol=None,
report_summary=False,
cmp_decision_func=False,
):
"""Compares two svm classifiers
Parameters:
-----------
svm1 : svm classifier to be tested
svm2 : svm classifier, the correct model
b_tol : float
tolerance while comparing the constant in the decision functions
coef_tol: float
tolerance used while comparing coef_ attribute for linear SVM
Support vector machines have a decision function:
F(x) = sum_{i=1}^{n_sv} d_i K(x_i, x) + b,
where n_sv is the number of support vectors, K is the kernel function, x_i
are the support vectors, d_i are the dual coefficients (more precisely
d = alpha_i * y_i, where alpha_i is the dual coef), and b is the intercept.
For linear svms K(x_i, x) = x_i * x, and we can simplify F by introducing
w = sum_{i=1}^{n_sv} d_i x_i, the normal vector of the separating
hyperplane:
F(x) = w * x + b.
Mathematically the solution of the optimization should be unique, which
means w and b should be unique.
There could be multiple set of vectors that lead to the same w, therefore
comparing parameters d_k, n_sv or the support vector indices can lead to
false positives.
We can only evaluate w for linear models, for nonlinear models we can only
test model accuracy and intercept.
"""
n = X.shape[0]
accuracy1 = svm1.score(X, y)
accuracy2 = svm2.score(X, y)
# We use at least 0.1% tolerance for accuracy comparison
accuracy_tol_min = 0.001
if accuracy2 < 1:
# Set tolerance to include the 95% confidence interval of svm2's
# accuracy. In practice this gives 0.9% tolerance for a 90% accurate
# model (assuming n_test = 4000).
accuracy_tol = 1.96 * np.sqrt(accuracy2 * (1 - accuracy2) / n)
if accuracy_tol < accuracy_tol_min:
accuracy_tol = accuracy_tol_min
else:
accuracy_tol = accuracy_tol_min
assert accuracy1 >= accuracy2 - accuracy_tol
if b_tol is None:
b_tol = 100 * svm1.tol # Using default tol=1e-3 leads to b_tol=0.1
if accuracy2 < 0.5:
# Increase error margin for classifiers that are not accurate.
# Although analytically the classifier should always be the same,
# we fit only until we reach a certain numerical tolerance, and
# therefore the resulting SVM's can be different. We increase the
# tolerance in these cases.
#
# A good example is the gaussian dataset with linear classifier:
# the classes are concentric blobs, and we cannot separate that with a
# straight line. When we have a large number of data points, then
# any separating hyperplane that goes through the center would be good.
b_tol *= 10
if n >= 250:
coef_tol = 2 # allow any direction
else:
coef_tol *= 10
# Compare model parameter b (intercept). In practice some models can have
# some differences in the model parameters while still being within
# the accuracy tolerance.
#
# We skip this test for multiclass (when intercept_ is an array). Apart
# from the larger discrepancies in multiclass case, sklearn also uses a
# different sign convention for intercept in that case.
if (not is_array_like(svm2.intercept_)) or svm2.intercept_.shape[0] == 1:
if abs(svm2.intercept_) > 1e-6:
assert (
abs((svm1.intercept_ - svm2.intercept_) / svm2.intercept_)
<= b_tol
)
else:
assert abs((svm1.intercept_ - svm2.intercept_)) <= b_tol
# For linear kernels we can compare the normal vector of the separating
# hyperplane w, which is stored in the coef_ attribute.
if svm1.kernel == "linear":
if coef_tol is None:
coef_tol = 1e-5
cs = np.dot(svm1.coef_, svm2.coef_.T) / (
np.linalg.norm(svm1.coef_) * np.linalg.norm(svm2.coef_)
)
assert cs > 1 - coef_tol
if cmp_decision_func:
if accuracy2 > 0.9 and svm1.kernel != "sigmoid":
df1 = svm1.decision_function(X)
df2 = svm2.decision_function(X)
# For classification, the class is determined by
# sign(decision function). We should not expect tight match for
# the actual value of the function, therefore we set large tolerance
assert svm_array_equal(
df1, df2, tol=1e-1, relative_diff=True, report_summary=True
)
else:
print(
"Skipping decision function test due to low accuracy",
accuracy2,
)
# Compare support_ (dataset indices of points that form the support
# vectors) and ensure that some overlap (~1/8) between two exists
support1 = set(svm1.support_)
support2 = set(svm2.support_)
intersection_len = len(support1.intersection(support2))
average_len = (len(support1) + len(support2)) / 2
assert intersection_len > average_len / 8
def compare_probabilistic_svm(
svc1, svc2, X_test, y_test, tol=1e-3, brier_tol=1e-3
):
"""Compare the probability output from two support vector classifiers."""
prob1 = svc1.predict_proba(X_test)
prob2 = svc2.predict_proba(X_test)
assert mean_squared_error(prob1, prob2) <= tol
if svc1.n_classes_ == 2:
brier1 = brier_score_loss(y_test, prob1[:, 1])
brier2 = brier_score_loss(y_test, prob2[:, 1])
# Brier score - smaller is better
assert brier1 - brier2 <= brier_tol
def create_synthetic_dataset(
generator=skl_make_reg,
n_samples=100,
n_features=10,
test_size=0.25,
random_state_generator=None,
random_state_train_test_split=None,
dtype=np.float32,
**kwargs,
):
X, y = generator(
n_samples=n_samples,
n_features=n_features,
random_state=random_state_generator,
**kwargs,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state_train_test_split
)
X_train = X_train.astype(dtype)
X_test = X_test.astype(dtype)
y_train = y_train.astype(dtype)
y_test = y_test.astype(dtype)
return X_train, X_test, y_train, y_test
def svm_array_equal(a, b, tol=1e-6, relative_diff=True, report_summary=False):
diff = np.abs(a - b)
if relative_diff:
idx = np.nonzero(abs(b) > tol)
diff[idx] = diff[idx] / abs(b[idx])
equal = np.all(diff <= tol)
if not equal and report_summary:
idx = np.argsort(diff)
print("Largest diffs")
a = a.ravel()
b = b.ravel()
diff = diff.ravel()
for i in idx[-5:]:
if diff[i] > tol:
print(diff[i], "at", i, "values", a[i], b[i])
print(
"Avgdiff:",
np.mean(diff),
"stddiyy:",
np.std(diff),
"avgval:",
np.mean(b),
)
return equal
def normalized_shape(shape):
"""Normalize shape to tuple."""
return (shape,) if isinstance(shape, int) else shape
def squeezed_shape(shape):
"""Remove all trailing axes of length 1 from shape.
Similar to, but not exactly like np.squeeze().
"""
return tuple(reversed(list(dropwhile(lambda d: d == 1, reversed(shape)))))
def series_squeezed_shape(shape):
"""Remove all but one axes of length 1 from shape."""
if shape:
return tuple([d for d in normalized_shape(shape) if d != 1]) or (1,)
else:
return ()
| 0 |
rapidsai_public_repos/cuml/python/cuml/testing | rapidsai_public_repos/cuml/python/cuml/testing/dask/__init__.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/cuml/python/cuml/testing | rapidsai_public_repos/cuml/python/cuml/testing/dask/utils.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
from sklearn.datasets import fetch_20newsgroups
from cuml.dask.common import to_sparse_dask_array
from sklearn.feature_extraction.text import HashingVectorizer
import dask
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
def load_text_corpus(client):
categories = [
"alt.atheism",
"soc.religion.christian",
"comp.graphics",
"sci.med",
]
twenty_train = fetch_20newsgroups(
subset="train", categories=categories, shuffle=True, random_state=42
)
hv = HashingVectorizer(alternate_sign=False, norm=None)
xformed = hv.fit_transform(twenty_train.data).astype(cp.float32)
X = to_sparse_dask_array(xformed, client)
y = dask.array.from_array(
twenty_train.target, asarray=False, fancy=False
).astype(cp.int32)
return X, y
| 0 |
rapidsai_public_repos/cuml/python/cuml/testing | rapidsai_public_repos/cuml/python/cuml/testing/plugins/quick_run_plugin.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
import pytest
import _pytest.python
def pytest_addoption(parser):
group = parser.getgroup("Quick Run Plugin")
group.addoption(
"--quick_run",
default=False,
action="store_true",
help=(
"Selecting this option will reduce the number of "
"tests run by only running parameter combinations "
"if one of the parameters has never been seen "
"before. Useful for testing code correctness while "
"not running all numeric tests for the algorithms."
),
)
# This hook must be run last after all others as some plugins may have skipped
# some tests
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(config, items):
quick_run = config.getoption("--quick_run")
if quick_run:
root_node = {}
leafs = []
def get_leaf(node_list: list) -> list:
"""
Responsible for returning the leaf test node and building any
interior nodes in the process
Parameters
----------
node_list : list List of strings for each pytest node returned from
`listchain()`
Returns
-------
list Returns the leaf node containing a list of all tests in the
leaf
"""
curr_node = root_node
for n in node_list:
name = getattr(n, "originalname", n.name)
# Add the interior node if it doesn't exist. Must be a function
# to be a leaf
if name not in curr_node:
if isinstance(n, _pytest.python.Function):
curr_node[name] = []
leafs.append(curr_node[name])
else:
curr_node[name] = {}
curr_node = curr_node[name]
return curr_node
# Loop over all nodes and generate a tree structure from their layout
for item in items:
leaf = get_leaf(item.listchain())
leaf.append(item)
selected_items = []
deselected_items = []
def process_leaf_seeonce(leaf: typing.List[_pytest.python.Function]):
seen = {}
# Returns True if this test's parameters are too similar to another
# test already in the selected list
def has_been_seen(cs: _pytest.python.CallSpec2):
for key, val in enumerate(cs._idlist):
if key not in seen:
return False
if val not in seen[key]:
return False
return True
# Updates the seen dictionary with the parameters from this test
def update_seen(cs: _pytest.python.CallSpec2):
for key, val in enumerate(cs._idlist):
if key not in seen:
seen[key] = []
if val not in seen[key]:
seen[key].append(val)
for f in leaf:
# If this is going to be skipped, add to deselected. No need to
# run it
if f.get_closest_marker("skip") is not None:
deselected_items.append(f)
continue
# If no callspec, this is the only function call. Must be run
if not hasattr(f, "callspec"):
selected_items.append(f)
continue
callspec = f.callspec
# Check if this has been seen
if has_been_seen(callspec):
deselected_items.append(f)
else:
# Otherwise, add to seen and selected
selected_items.append(f)
update_seen(callspec)
# Now looping over all leafs, see which ones we can process only once
for leaf in leafs:
process_leaf_seeonce(leaf)
# Deselect the skipped nodes and shorten the items list
config.hook.pytest_deselected(items=deselected_items)
items[:] = selected_items
| 0 |
rapidsai_public_repos/cuml/python/cuml/testing | rapidsai_public_repos/cuml/python/cuml/testing/plugins/__init__.py | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/feature_extraction/_tfidf_vectorizer.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Portions of this code are derived from the scikit-learn feature_extraction
# package, which has the following license:
#
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
#
from cuml.feature_extraction._vectorizers import CountVectorizer
from cuml.feature_extraction._tfidf import TfidfTransformer
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to :class:`CountVectorizer` followed by
:class:`TfidfTransformer`.
Parameters
----------
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the input documents.
If None, no stop words will be used. max_df can be set to a value
to automatically detect and filter stop words based on intra corpus
document frequency of terms.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
analyzer : string, {'word', 'char', 'char_wb'}, default='word'
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : cudf.Series, optional
If not given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
delimiter : str, whitespace by default
String used as a replacement for stop words if stop_words is not None.
Typically the delimiting character between words is a good choice.
norm : {'l1', 'l2'}, default='l2'
Each output row will have unit norm, either:
* 'l2': Sum of squares of vector elements is 1. The cosine similarity
between two vectors is their dot product when l2 norm has been
applied.
* 'l1': Sum of absolute values of vector elements is 1.
use_idf : bool, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array of shape (n_features)
The inverse document frequency (IDF) vector; only defined
if `use_idf` is True.
vocabulary_ : cudf.Series[str]
Array mapping from feature integer indices to feature name.
stop_words_ : cudf.Series[str]
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
This class is largely based on scikit-learn 0.23.1's TfIdfVectorizer code,
which is provided under the BSD-3 license.
"""
def __init__(
self,
input=None,
encoding=None,
decode_error=None,
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=None,
ngram_range=(1, 1),
analyzer="word",
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=cp.float32,
delimiter=" ",
norm="l2",
use_idf=True,
smooth_idf=True,
sublinear_tf=False,
):
super().__init__(
input=input,
encoding=encoding,
decode_error=decode_error,
strip_accents=strip_accents,
lowercase=lowercase,
preprocessor=preprocessor,
tokenizer=tokenizer,
analyzer=analyzer,
stop_words=stop_words,
token_pattern=token_pattern,
ngram_range=ngram_range,
max_df=max_df,
min_df=min_df,
max_features=max_features,
vocabulary=vocabulary,
binary=binary,
dtype=dtype,
delimiter=delimiter,
)
self._tfidf = TfidfTransformer(
norm=norm,
use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf,
)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
@idf_.setter
def idf_(self, value):
if hasattr(self, "vocabulary_"):
if len(self.vocabulary_) != len(value):
raise ValueError(
"idf length = %d must be equal "
"to vocabulary size = %d"
% (len(value), len(self.vocabulary))
)
self._tfidf.idf_ = value
def fit(self, raw_documents):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : cudf.Series or pd.Series
A Series of string documents
Returns
-------
self : object
Fitted vectorizer.
"""
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : cudf.Series or pd.Series
A Series of string documents
y : None
Ignored.
Returns
-------
X : cupy csr array of shape (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : cudf.Series or pd.Series
A Series of string documents
Returns
-------
X : cupy csr array of shape (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
X = super().transform(raw_documents)
return self._tfidf.transform(X, copy=False)
def get_feature_names(self):
"""
Array mapping from feature integer indices to feature name.
Returns
-------
feature_names : Series
A list of feature names.
"""
return super().get_feature_names()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/feature_extraction/_tfidf.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml import Base
from cuml.internals.array import CumlArray
from cuml.common.sparsefuncs import csr_diag_mul
from cuml.common.sparsefuncs import csr_row_normalize_l1, csr_row_normalize_l2
import cuml.internals
from cuml.common.exceptions import NotFittedError
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
def _sparse_document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if cupyx.scipy.sparse.isspmatrix_csr(X):
return cp.bincount(X.indices, minlength=X.shape[1])
else:
return cp.diff(X.indptr)
def _get_dtype(X):
"""
Returns the valid dtype for tf-idf transformer
"""
import numpy as np
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
dtype = X.dtype if X.dtype in FLOAT_DTYPES else cp.float32
return dtype
class TfidfTransformer(Base):
"""
Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf for a term t of a document d
in a document set is tf-idf(t, d) = tf(t, d) * idf(t), and the idf is
computed as idf(t) = log [ n / df(t) ] + 1 (if ``smooth_idf=False``), where
n is the total number of documents in the document set and df(t) is the
document frequency of t; the document frequency is the number of documents
in the document set that contain the term t. The effect of adding "1" to
the idf in the equation above is that terms with zero idf, i.e., terms
that occur in all documents in a training set, will not be entirely
ignored.
(Note that the idf formula above differs from the standard textbook
notation that defines the idf as
idf(t) = log [ n / (df(t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(t) = log [ (1 + n) / (1 + df(t)) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Parameters
----------
norm : {'l1', 'l2'}, default='l2'
Each output row will have unit norm, either:
* 'l2': Sum of squares of vector elements is 1. The cosine similarity
between two vectors is their dot product when l2 norm has been
applied.
* 'l1': Sum of absolute values of vector elements is 1.
use_idf : bool, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
idf_ : array of shape (n_features)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
"""
def __init__(
self,
*,
norm="l2",
use_idf=True,
smooth_idf=True,
sublinear_tf=False,
handle=None,
verbose=False,
output_type=None,
):
super().__init__(
handle=handle, verbose=verbose, output_type=output_type
)
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def _set_doc_stats(self, X):
"""
We set the following document level statistics here:
n_samples
n_features
df(document frequency)
"""
# Should not have a cost if already sparse
output_dtype = _get_dtype(X)
X = self._convert_to_csr(X, output_dtype)
n_samples, n_features = X.shape
df = _sparse_document_frequency(X)
df = df.astype(output_dtype, copy=False)
self.__df = CumlArray(df)
self.__n_samples = n_samples
self.__n_features = n_features
return
def _set_idf_diag(self):
"""
Sets idf_diagonal sparse array
"""
# perform idf smoothing if required
df = self.__df.to_output("cupy") + int(self.smooth_idf)
n_samples = self.__n_samples + int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = cp.log(n_samples / df) + 1
self._idf_diag = cupyx.scipy.sparse.dia_matrix(
(idf, 0),
shape=(self.__n_features, self.__n_features),
dtype=df.dtype,
)
# Free up memory occupied by below
del self.__df
@cuml.internals.api_base_return_any_skipall
def fit(self, X) -> "TfidfTransformer":
"""Learn the idf vector (global term weights).
Parameters
----------
X : array-like of shape n_samples, n_features
A matrix of term/token counts.
"""
output_dtype = _get_dtype(X)
X = self._convert_to_csr(X, output_dtype)
if self.use_idf:
self._set_doc_stats(X)
self._set_idf_diag()
return self
@cuml.internals.api_base_return_any_skipall
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : array-like of (n_samples, n_features)
A matrix of term/token counts
copy : bool, default=True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : array-like of shape (n_samples, n_features)
"""
if copy:
X = X.copy()
dtype = _get_dtype(X)
X = self._convert_to_csr(X, dtype)
if X.dtype != dtype:
X = X.astype(dtype)
n_samples, n_features = X.shape
if self.sublinear_tf:
cp.log(X.data, X.data)
X.data += 1
if self.use_idf:
self._check_is_idf_fitted()
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError(
"Input has n_features=%d while the model"
" has been trained with n_features=%d"
% (n_features, expected_n_features)
)
csr_diag_mul(X, self._idf_diag, inplace=True)
if self.norm:
if self.norm == "l1":
csr_row_normalize_l1(X, inplace=True)
elif self.norm == "l2":
csr_row_normalize_l2(X, inplace=True)
return X
@cuml.internals.api_base_return_any_skipall
def fit_transform(self, X, copy=True):
"""
Fit TfidfTransformer to X, then transform X.
Equivalent to fit(X).transform(X).
Parameters
----------
X : array-like of (n_samples, n_features)
A matrix of term/token counts
copy : bool, default=True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : array-like of shape (n_samples, n_features)
"""
return self.fit(X).transform(X, copy=copy)
def _check_is_idf_fitted(self):
if not hasattr(self, "idf_"):
msg = (
"This TfidfTransformer instance is not fitted or the "
"value of use_idf is not consistent between "
".fit() and .transform()."
)
raise NotFittedError(msg)
def _convert_to_csr(self, X, dtype):
"""Convert array to CSR format if it not sparse nor CSR."""
if not cupyx.scipy.sparse.isspmatrix_csr(X):
if not cupyx.scipy.sparse.issparse(X):
X = cupyx.scipy.sparse.csr_matrix(X.astype(dtype))
else:
X = X.tocsr()
return X
@property
def idf_(self):
# if _idf_diag is not set, this will raise an attribute error,
# which means hasattr(self, "idf_") is False
return self._idf_diag.data
@idf_.setter
def idf_(self, value):
value = cp.asarray(value, dtype=cp.float32)
n_features = value.shape[0]
self._idf_diag = cupyx.scipy.sparse.dia_matrix(
(value, 0), shape=(n_features, n_features), dtype=cp.float32
)
def get_param_names(self):
return super().get_param_names() + [
"norm",
"use_idf",
"smooth_idf",
"sublinear_tf",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/feature_extraction/_stop_words.py | # Portion of this code are derived from the scikit-learn feature_extraction
# package, which has the following license:
#
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
#
# This list of English stop words is taken from the "Glasgow Information
# Retrieval Group". The original list can be found at
# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
ENGLISH_STOP_WORDS = frozenset([
"about", "above", "across", "after", "afterwards", "again", "against",
"all", "almost", "alone", "along", "already", "also", "although", "always",
"am", "among", "amongst", "amoungst", "amount", "an", "and", "another",
"any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are",
"around", "as", "at", "back", "be", "became", "because", "become",
"becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"below", "beside", "besides", "between", "beyond", "bill", "both",
"bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do", "done",
"down", "due", "during", "each", "eg", "eight", "either", "eleven", "else",
"elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone",
"everything", "everywhere", "except", "few", "fifteen", "fifty", "fill",
"find", "fire", "first", "five", "for", "former", "formerly", "forty",
"found", "four", "from", "front", "full", "further", "get", "give", "go",
"had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter",
"hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "ie", "if", "in", "inc", "indeed",
"interest", "into", "is", "it", "its", "itself", "keep", "last", "latter",
"latterly", "least", "less", "ltd", "made", "many", "may", "me",
"meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly",
"move", "much", "must", "my", "myself", "name", "namely", "neither",
"never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone",
"nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on",
"once", "one", "only", "onto", "or", "other", "others", "otherwise", "our",
"ours", "ourselves", "out", "over", "own", "part", "per", "perhaps",
"please", "put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should", "show", "side",
"since", "sincere", "six", "sixty", "so", "some", "somehow", "someone",
"something", "sometime", "sometimes", "somewhere", "still", "such",
"system", "take", "ten", "than", "that", "the", "their", "them",
"themselves", "then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they", "thick", "thin",
"third", "this", "those", "though", "three", "through", "throughout",
"thru", "thus", "to", "together", "too", "top", "toward", "towards",
"twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever", "when",
"whence", "whenever", "where", "whereafter", "whereas", "whereby",
"wherein", "whereupon", "wherever", "whether", "which", "while", "whither",
"who", "whoever", "whole", "whom", "whose", "why", "will", "with",
"within", "without", "would", "yet", "you", "your", "yours", "yourself",
"yourselves", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l",
"m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"])
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/feature_extraction/_vectorizers.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import cpu_only_import
import cuml.internals.logger as logger
from cudf.utils.dtypes import min_signed_type
from cuml.internals.type_utils import CUPY_SPARSE_DTYPES
import numbers
from cuml.internals.safe_imports import gpu_only_import
from functools import partial
from cuml.common.sparsefuncs import create_csr_matrix_from_count_df
from cuml.common.sparsefuncs import csr_row_normalize_l1, csr_row_normalize_l2
from cuml.feature_extraction._stop_words import ENGLISH_STOP_WORDS
from cuml.common.exceptions import NotFittedError
from cuml.internals.safe_imports import gpu_only_import_from
Series = gpu_only_import_from("cudf", "Series")
cp = gpu_only_import("cupy")
cudf = gpu_only_import("cudf")
pd = cpu_only_import("pandas")
def _preprocess(
doc,
lower=False,
remove_non_alphanumeric=False,
delimiter=" ",
keep_underscore_char=True,
remove_single_token_len=True,
):
"""
Chain together an optional series of text preprocessing steps to
apply to a document.
Parameters
----------
doc: cudf.Series[str] or pd.Series[str]
The string to preprocess
lower: bool
Whether to use str.lower to lowercase all of the text
remove_non_alphanumeric: bool
Whether or not to remove non-alphanumeric characters.
keep_underscore_char: bool
Whether or not to keep the underscore character
Returns
-------
doc: cudf.Series[str]
preprocessed string
"""
if isinstance(doc, pd.Series):
doc = Series(doc)
if lower:
doc = doc.str.lower()
if remove_non_alphanumeric:
if keep_underscore_char:
# why: sklearn by default keeps `_` char along with alphanumerics
# currently we dont have a easy way of removing
# all chars but `_`
# in cudf.Series[str] below works around it
temp_string = "cumlSt"
doc = doc.str.replace("_", temp_string, regex=False)
doc = doc.str.filter_alphanum(" ", keep=True)
doc = doc.str.replace(temp_string, "_", regex=False)
else:
doc = doc.str.filter_alphanum(" ", keep=True)
# sklearn by default removes tokens of
# length 1, if its remove alphanumerics
if remove_single_token_len:
doc = doc.str.filter_tokens(2)
return doc
class _VectorizerMixin:
"""
Provides common code for text vectorizers (tokenization logic).
"""
def _remove_stop_words(self, doc):
"""
Remove stop words only if needed.
"""
if self.analyzer == "word" and self.stop_words is not None:
stop_words = Series(self._get_stop_words())
doc = doc.str.replace_tokens(
stop_words,
replacements=self.delimiter,
delimiter=self.delimiter,
)
return doc
def build_preprocessor(self):
"""
Return a function to preprocess the text before tokenization.
If analyzer == 'word' and stop_words is not None, stop words are
removed from the input documents after preprocessing.
Returns
-------
preprocessor: callable
A function to preprocess the text before tokenization.
"""
if self.preprocessor is not None:
preprocess = self.preprocessor
else:
remove_non_alpha = self.analyzer == "word"
preprocess = partial(
_preprocess,
lower=self.lowercase,
remove_non_alphanumeric=remove_non_alpha,
delimiter=self.delimiter,
)
return lambda doc: self._remove_stop_words(preprocess(doc))
def _get_stop_words(self):
"""
Build or fetch the effective stop words list.
Returns
-------
stop_words: list or None
A list of stop words.
"""
if self.stop_words == "english":
return list(ENGLISH_STOP_WORDS)
elif isinstance(self.stop_words, str):
raise ValueError("not a built-in stop list: %s" % self.stop_words)
elif self.stop_words is None:
return None
else: # assume it's a collection
return list(self.stop_words)
def get_char_ngrams(self, ngram_size, str_series, doc_id_sr):
"""
Handles ngram generation for characters analyzers.
When analyzer is 'char_wb', we generate ngrams within word boundaries,
meaning we need to first tokenize and pad each token with a delimiter.
"""
if self.analyzer == "char_wb" and ngram_size != 1:
token_count = str_series.str.token_count(delimiter=self.delimiter)
tokens = str_series.str.tokenize(self.delimiter)
del str_series
padding = Series(self.delimiter).repeat(len(tokens))
tokens = tokens.str.cat(padding)
padding = padding.reset_index(drop=True)
tokens = padding.str.cat(tokens)
tokens = tokens.reset_index(drop=True)
ngram_sr = tokens.str.character_ngrams(n=ngram_size)
doc_id_df = cudf.DataFrame(
{
"doc_id": doc_id_sr.repeat(token_count).reset_index(
drop=True
),
# formula to count ngrams given number of letters per token:
"ngram_count": tokens.str.len() - (ngram_size - 1),
}
)
del tokens
ngram_count = doc_id_df.groupby("doc_id", sort=True).sum()[
"ngram_count"
]
return ngram_sr, ngram_count, token_count
if ngram_size == 1:
token_count = str_series.str.len()
ngram_sr = str_series.str.character_tokenize()
del str_series
elif self.analyzer == "char":
token_count = str_series.str.len()
ngram_sr = str_series.str.character_ngrams(n=ngram_size)
del str_series
ngram_count = token_count - (ngram_size - 1)
return ngram_sr, ngram_count, token_count
def get_ngrams(self, str_series, ngram_size, doc_id_sr):
"""
This returns the ngrams for the string series
Parameters
----------
str_series : (cudf.Series)
String series to tokenize
ngram_size : int
Gram level to get (1 for unigram, 2 for bigram etc)
doc_id_sr : cudf.Series
Int series containing documents ids
"""
if self.analyzer == "word":
token_count_sr = str_series.str.token_count(self.delimiter)
ngram_sr = str_series.str.ngrams_tokenize(
n=ngram_size, separator=" ", delimiter=self.delimiter
)
# formula to count ngrams given number of tokens x per doc: x-(n-1)
ngram_count = token_count_sr - (ngram_size - 1)
else:
ngram_sr, ngram_count, token_count_sr = self.get_char_ngrams(
ngram_size, str_series, doc_id_sr
)
not_empty_docs = token_count_sr > 0
doc_id_sr = doc_id_sr[not_empty_docs]
ngram_count = ngram_count[not_empty_docs]
doc_id_sr = doc_id_sr.repeat(ngram_count).reset_index(drop=True)
tokenized_df = cudf.DataFrame()
tokenized_df["doc_id"] = doc_id_sr
ngram_sr = ngram_sr.reset_index(drop=True)
tokenized_df["token"] = ngram_sr
return tokenized_df
def _create_tokenized_df(self, docs):
"""
Creates a tokenized DataFrame from a string Series.
Each row describes the token string and the corresponding document id.
"""
min_n, max_n = self.ngram_range
doc_id = cp.arange(start=0, stop=len(docs), dtype=cp.int32)
doc_id = Series(doc_id)
tokenized_df_ls = [
self.get_ngrams(docs, n, doc_id) for n in range(min_n, max_n + 1)
]
del docs
tokenized_df = cudf.concat(tokenized_df_ls)
tokenized_df = tokenized_df.reset_index(drop=True)
return tokenized_df
def _compute_empty_doc_ids(self, count_df, n_doc):
"""
Compute empty docs ids using the remaining docs, given the total number
of documents.
"""
remaining_docs = count_df["doc_id"].unique()
dtype = min_signed_type(n_doc)
doc_ids = cudf.DataFrame(
data={"all_ids": cp.arange(0, n_doc, dtype=dtype)}, dtype=dtype
)
empty_docs = doc_ids - doc_ids.iloc[remaining_docs]
empty_ids = empty_docs[empty_docs["all_ids"].isnull()].index.values
return empty_ids
def _validate_params(self):
"""
Check validity of ngram_range parameter
"""
min_n, max_m = self.ngram_range
msg = ""
if min_n < 1:
msg += "lower boundary must be >= 1. "
if min_n > max_m:
msg += "lower boundary larger than the upper boundary. "
if msg != "":
msg = f"Invalid value for ngram_range={self.ngram_range} {msg}"
raise ValueError(msg)
if hasattr(self, "n_features"):
if not isinstance(self.n_features, numbers.Integral):
raise TypeError(
f"n_features must be integral, got {self.n_features}\
({type(self.n_features)})."
)
def _warn_for_unused_params(self):
if self.analyzer != "word" and self.stop_words is not None:
logger.warn(
"The parameter 'stop_words' will not be used"
" since 'analyzer' != 'word'"
)
def _check_sklearn_params(self, analyzer, sklearn_params):
if callable(analyzer):
raise ValueError(
"cuML does not support callable analyzer,"
" please refer to the cuML documentation for"
" more information."
)
for key, vals in sklearn_params.items():
if vals is not None:
raise TypeError(
"The Scikit-learn variable",
key,
" is not supported in cuML,"
" please read the cuML documentation for"
" more information.",
)
def _document_frequency(X):
"""
Count the number of non-zero values for each feature in X.
"""
doc_freq = X[["token", "doc_id"]].groupby(["token"], sort=True).count()
return doc_freq["doc_id"].values
def _term_frequency(X):
"""
Count the number of occurrences of each term in X.
"""
term_freq = X[["token", "count"]].groupby(["token"], sort=True).sum()
return term_freq["count"].values
class CountVectorizer(_VectorizerMixin):
"""
Convert a collection of text documents to a matrix of token counts
If you do not provide an a-priori dictionary then the number of features
will be equal to the vocabulary size found by analyzing the data.
Parameters
----------
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the input documents.
If None, no stop words will be used. max_df can be set to a value
to automatically detect and filter stop words based on intra corpus
document frequency of terms.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
analyzer : string, {'word', 'char', 'char_wb'}
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : cudf.Series, optional
If not given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
delimiter : str, whitespace by default
String used as a replacement for stop words if stop_words is not None.
Typically the delimiting character between words is a good choice.
Attributes
----------
vocabulary_ : cudf.Series[str]
Array mapping from feature integer indices to feature name.
stop_words_ : cudf.Series[str]
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
"""
def __init__(
self,
input=None,
encoding=None,
decode_error=None,
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=None,
ngram_range=(1, 1),
analyzer="word",
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=cp.float32,
delimiter=" ",
):
self.preprocessor = preprocessor
self.analyzer = analyzer
self.lowercase = lowercase
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if not isinstance(max_features, int) or max_features <= 0:
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features
)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
self.delimiter = delimiter
if dtype not in CUPY_SPARSE_DTYPES:
msg = f"Expected dtype in {CUPY_SPARSE_DTYPES}, got {dtype}"
raise ValueError(msg)
sklearn_params = {
"input": input,
"encoding": encoding,
"decode_error": decode_error,
"strip_accents": strip_accents,
"tokenizer": tokenizer,
"token_pattern": token_pattern,
}
self._check_sklearn_params(analyzer, sklearn_params)
def _count_vocab(self, tokenized_df):
"""
Count occurrences of tokens in each document.
"""
# Transform string tokens into token indexes from 0 to len(vocab)
# The indexes are based on lexicographical ordering.
tokenized_df["token"] = tokenized_df["token"].astype("category")
tokenized_df["token"] = (
tokenized_df["token"]
.cat.set_categories(self.vocabulary_)
._column.codes
)
# Count of each token in each document
count_df = (
tokenized_df[["doc_id", "token"]]
.groupby(["doc_id", "token"], sort=True)
.size()
.reset_index()
.rename({0: "count"}, axis=1)
)
return count_df
def _filter_and_renumber(self, df, keep_values, column):
"""
Filter dataframe to keep only values from column matching
keep_values.
"""
df[column] = (
df[column]
.astype("category")
.cat.set_categories(keep_values)
._column.codes
)
df = df.dropna(subset=column)
return df
def _limit_features(self, count_df, vocab, high, low, limit):
"""
Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
Sets `self.vocabulary_` and `self.stop_words_` with the new values.
"""
if high is None and low is None and limit is None:
self.stop_words_ = None
return count_df
document_frequency = _document_frequency(count_df)
mask = cp.ones(len(document_frequency), dtype=bool)
if high is not None:
mask &= document_frequency <= high
if low is not None:
mask &= document_frequency >= low
if limit is not None and mask.sum() > limit:
term_frequency = _term_frequency(count_df)
mask_inds = (-term_frequency[mask]).argsort()[:limit]
new_mask = cp.zeros(len(document_frequency), dtype=bool)
new_mask[cp.where(mask)[0][mask_inds]] = True
mask = new_mask
keep_idx = cp.where(mask)[0].astype(cp.int32)
keep_num = keep_idx.shape[0]
if keep_num == 0:
raise ValueError(
"After pruning, no terms remain. Try a lower"
" min_df or a higher max_df."
)
if len(vocab) - keep_num != 0:
count_df = self._filter_and_renumber(count_df, keep_idx, "token")
self.stop_words_ = vocab[~mask].reset_index(drop=True)
self.vocabulary_ = vocab[mask].reset_index(drop=True)
return count_df
def _preprocess(self, raw_documents):
preprocess = self.build_preprocessor()
return preprocess(raw_documents)
def fit(self, raw_documents, y=None):
"""
Build a vocabulary of all tokens in the raw documents.
Parameters
----------
raw_documents : cudf.Series or pd.Series
A Series of string documents
y : None
Ignored.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""
Build the vocabulary and return document-term matrix.
Equivalent to ``self.fit(X).transform(X)`` but preprocess `X` only
once.
Parameters
----------
raw_documents : cudf.Series or pd.Series
A Series of string documents
y : None
Ignored.
Returns
-------
X : cupy csr array of shape (n_samples, n_features)
Document-term matrix.
"""
self._warn_for_unused_params()
self._validate_params()
self._fixed_vocabulary = self.vocabulary is not None
docs = self._preprocess(raw_documents)
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
if self._fixed_vocabulary:
self.vocabulary_ = self.vocabulary
else:
self.vocabulary_ = (
tokenized_df["token"].drop_duplicates().sort_values()
)
count_df = self._count_vocab(tokenized_df)
if not self._fixed_vocabulary:
max_doc_count = (
self.max_df
if isinstance(self.max_df, numbers.Integral)
else self.max_df * n_doc
)
min_doc_count = (
self.min_df
if isinstance(self.min_df, numbers.Integral)
else self.min_df * n_doc
)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df"
)
count_df = self._limit_features(
count_df,
self.vocabulary_,
max_doc_count,
min_doc_count,
self.max_features,
)
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(
count_df,
empty_doc_ids,
n_doc,
len(self.vocabulary_),
dtype=self.dtype,
)
if self.binary:
X.data.fill(1)
return X
def transform(self, raw_documents):
"""
Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : cudf.Series or pd.Series
A Series of string documents
Returns
-------
X : cupy csr array of shape (n_samples, n_features)
Document-term matrix.
"""
if not hasattr(self, "vocabulary_"):
if self.vocabulary is not None:
self.vocabulary_ = self.vocabulary
else:
raise NotFittedError()
docs = self._preprocess(raw_documents)
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
count_df = self._count_vocab(tokenized_df)
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(
count_df,
empty_doc_ids,
n_doc,
len(self.vocabulary_),
dtype=self.dtype,
)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""
Return terms per document with nonzero entries in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Document-term matrix.
Returns
-------
X_inv : list of cudf.Series of shape (n_samples,)
List of Series of terms.
"""
vocab = Series(self.vocabulary_)
return [vocab[X[i, :].indices] for i in range(X.shape[0])]
def get_feature_names(self):
"""
Array mapping from feature integer indices to feature name.
Returns
-------
feature_names : Series
A list of feature names.
"""
return self.vocabulary_
class HashingVectorizer(_VectorizerMixin):
"""
Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a cupyx.scipy.sparse matrix
holding token occurrence counts (or binary occurrence information),
possibly normalized as token frequencies if norm='l1' or projected on the
euclidean unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory which is even more important
as GPU's that are often memory constrained
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as
there is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices
to string feature names) which can be a problem when trying to
introspect which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
stop_words : string {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
analyzer : string, {'word', 'char', 'char_wb'}
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
n_features : int, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
binary : bool, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
norm : {'l1', 'l2'}, default='l2'
Norm used to normalize term vectors. None for no normalization.
alternate_sign : bool, default=True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
delimiter : str, whitespace by default
String used as a replacement for stop words if `stop_words` is not
None. Typically the delimiting character between words is a good
choice.
Examples
--------
.. code-block:: python
>>> from cuml.feature_extraction.text import HashingVectorizer
>>> import pandas as pd
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = HashingVectorizer(n_features=2**4)
>>> X = vectorizer.fit_transform(pd.Series(corpus))
>>> print(X.shape)
(4, 16)
See Also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(
self,
input=None,
encoding=None,
decode_error=None,
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=None,
ngram_range=(1, 1),
analyzer="word",
n_features=(2**20),
binary=False,
norm="l2",
alternate_sign=True,
dtype=cp.float32,
delimiter=" ",
):
self.preprocessor = preprocessor
self.analyzer = analyzer
self.lowercase = lowercase
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.dtype = dtype
self.delimiter = delimiter
if dtype not in CUPY_SPARSE_DTYPES:
msg = f"Expected dtype in {CUPY_SPARSE_DTYPES}, got {dtype}"
raise ValueError(msg)
if self.norm not in ("l1", "l2", None):
raise ValueError(f"{self.norm} is not a supported norm")
sklearn_params = {
"input": input,
"encoding": encoding,
"decode_error": decode_error,
"strip_accents": strip_accents,
"tokenizer": tokenizer,
"token_pattern": token_pattern,
}
self._check_sklearn_params(analyzer, sklearn_params)
def partial_fit(self, X, y=None):
"""
Does nothing: This transformer is stateless
This method is just there to mark the fact that this transformer
can work in a streaming setup.
Parameters
----------
X : cudf.Series(A Series of string documents).
"""
return self
def fit(self, X, y=None):
"""
This method only checks the input type and the model parameter.
It does not do anything meaningful as this transformer is stateless
Parameters
----------
X : cudf.Series or pd.Series
A Series of string documents
"""
self._warn_for_unused_params()
self._validate_params()
return self
def _preprocess(self, raw_documents):
preprocess = self.build_preprocessor()
return preprocess(raw_documents)
def _count_hash(self, tokenized_df):
"""
Count occurrences of tokens in each document.
"""
# Transform string tokens into token indexes from 0 to n_features
tokenized_df["token"] = tokenized_df["token"].hash_values()
if self.alternate_sign:
# below logic is equivalent to: value *= ((h >= 0) * 2) - 1
tokenized_df["value"] = ((tokenized_df["token"] >= 0) * 2) - 1
tokenized_df["token"] = (
tokenized_df["token"].abs() % self.n_features
)
count_ser = tokenized_df.groupby(
["doc_id", "token"], sort=True
).value.sum()
count_ser.name = "count"
else:
tokenized_df["token"] = (
tokenized_df["token"].abs() % self.n_features
)
count_ser = tokenized_df.groupby(
["doc_id", "token"], sort=True
).size()
count_ser.name = "count"
count_df = count_ser.reset_index(drop=False)
del count_ser, tokenized_df
return count_df
def fit_transform(self, X, y=None):
"""
Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
X : sparse CuPy CSR matrix of shape (n_samples, n_features)
Document-term matrix.
"""
return self.fit(X, y).transform(X)
def transform(self, raw_documents):
"""
Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : cudf.Series or pd.Series
A Series of string documents
Returns
-------
X : sparse CuPy CSR matrix of shape (n_samples, n_features)
Document-term matrix.
"""
docs = self._preprocess(raw_documents)
del raw_documents
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
del docs
count_df = self._count_hash(tokenized_df)
del tokenized_df
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(
count_df, empty_doc_ids, n_doc, self.n_features, dtype=self.dtype
)
if self.binary:
X.data.fill(1)
if self.norm:
if self.norm == "l1":
csr_row_normalize_l1(X, inplace=True)
elif self.norm == "l2":
csr_row_normalize_l2(X, inplace=True)
return X
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/feature_extraction/text.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The following imports are needed so that we can import those classes
from cuml.feature_extraction.text just like scikit-learn. Do not remove.
"""
from cuml.feature_extraction._tfidf import (
TfidfTransformer,
) # noqa # pylint: disable=unused-import
from cuml.feature_extraction._tfidf_vectorizer import (
TfidfVectorizer,
) # noqa # pylint: disable=unused-import
from cuml.feature_extraction._vectorizers import (
CountVectorizer,
) # noqa # pylint: disable=unused-import
from cuml.feature_extraction._vectorizers import (
HashingVectorizer,
) # noqa # pylint: disable=unused-import
__all__ = [
"CountVectorizer",
"HashingVectorizer",
"TfidfTransformer",
"TfidfVectorizer",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/feature_extraction/__init__.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from . import text
__all__ = ["text"]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/dask/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask import cluster
from cuml.dask import common
from cuml.dask import datasets
from cuml.dask import decomposition
from cuml.dask import ensemble
from cuml.dask import feature_extraction
from cuml.dask import linear_model
from cuml.dask import manifold
from cuml.dask import metrics
from cuml.dask import naive_bayes
from cuml.dask import neighbors
from cuml.dask import preprocessing
from cuml.dask import solvers
__all__ = [
"cluster",
"common",
"datasets",
"decomposition",
"ensemble",
"feature_extraction",
"linear_model",
"manifold",
"metrics",
"naive_bayes",
"neighbors",
"preprocessing",
"solvers",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/preprocessing/LabelEncoder.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing import LabelEncoder as LE
from cuml.common.exceptions import NotFittedError
from dask_cudf.core import Series as daskSeries
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.base import DelayedInverseTransformMixin
from toolz import first
from collections.abc import Sequence
from cuml.internals.safe_imports import gpu_only_import_from
dcDataFrame = gpu_only_import_from("dask_cudf.core", "DataFrame")
class LabelEncoder(
BaseEstimator, DelayedTransformMixin, DelayedInverseTransformMixin
):
"""
A cuDF-based implementation of ordinal label encoding
Parameters
----------
handle_unknown : {'error', 'ignore'}, default='error'
Whether to raise an error or ignore if an unknown categorical feature
is present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform or inverse transform, the resulting encoding will be null.
Examples
--------
Converting a categorical implementation to a numerical one
.. code-block:: python
>>> from dask_cuda import LocalCUDACluster
>>> from dask.distributed import Client
>>> import cudf
>>> import dask_cudf
>>> from cuml.dask.preprocessing import LabelEncoder
>>> import pandas as pd
>>> pd.set_option('display.max_colwidth', 2000)
>>> cluster = LocalCUDACluster(threads_per_worker=1)
>>> client = Client(cluster)
>>> df = cudf.DataFrame({'num_col':[10, 20, 30, 30, 30],
... 'cat_col':['a','b','c','a','a']})
>>> ddf = dask_cudf.from_cudf(df, npartitions=2)
>>> # There are two functionally equivalent ways to do this
>>> le = LabelEncoder()
>>> le.fit(ddf.cat_col) # le = le.fit(data.category) also works
<cuml.dask.preprocessing.LabelEncoder.LabelEncoder object at 0x...>
>>> encoded = le.transform(ddf.cat_col)
>>> print(encoded.compute())
0 0
1 1
2 2
3 0
4 0
dtype: uint8
>>> # This method is preferred
>>> le = LabelEncoder()
>>> encoded = le.fit_transform(ddf.cat_col)
>>> print(encoded.compute())
0 0
1 1
2 2
3 0
4 0
dtype: uint8
>>> # We can assign this to a new column
>>> ddf = ddf.assign(encoded=encoded.values)
>>> print(ddf.compute())
num_col cat_col encoded
0 10 a 0
1 20 b 1
2 30 c 2
3 30 a 0
4 30 a 0
>>> # We can also encode more data
>>> test_data = cudf.Series(['c', 'a'])
>>> encoded = le.transform(dask_cudf.from_cudf(test_data,
... npartitions=2))
>>> print(encoded.compute())
0 2
1 0
dtype: uint8
>>> # After train, ordinal label can be inverse_transform() back to
>>> # string labels
>>> ord_label = cudf.Series([0, 0, 1, 2, 1])
>>> ord_label = le.inverse_transform(
... dask_cudf.from_cudf(ord_label,npartitions=2))
>>> print(ord_label.compute())
0 a
1 a
2 b
0 c
1 b
dtype: object
>>> client.close()
>>> cluster.close()
"""
def __init__(self, *, client=None, verbose=False, **kwargs):
super().__init__(client=client, verbose=verbose, **kwargs)
def fit(self, y):
"""
Fit a LabelEncoder instance to a set of categories
Parameters
----------
y : dask_cudf.Series
Series containing the categories to be encoded. Its elements
may or may not be unique
Returns
-------
self : LabelEncoder
A fitted instance of itself to allow method chaining
Notes
-----
Number of unique classes will be collected at the client. It'll
consume memory proportional to the number of unique classes.
"""
_classes = y.unique().compute().sort_values(ignore_index=True)
el = first(y) if isinstance(y, Sequence) else y
self.datatype = (
"cudf" if isinstance(el, (dcDataFrame, daskSeries)) else "cupy"
)
self._set_internal_model(LE(**self.kwargs).fit(y, _classes=_classes))
return self
def fit_transform(self, y, delayed=True):
"""
Simultaneously fit and transform an input
This is functionally equivalent to (but faster than)
LabelEncoder().fit(y).transform(y)
"""
return self.fit(y).transform(y, delayed=delayed)
def transform(self, y, delayed=True):
"""
Transform an input into its categorical keys.
This is intended for use with small inputs relative to the size of the
dataset. For fitting and transforming an entire dataset, prefer
`fit_transform`.
Parameters
----------
y : dask_cudf.Series
Input keys to be transformed. Its values should match the
categories given to `fit`
Returns
-------
encoded : dask_cudf.Series
The ordinally encoded input series
Raises
------
KeyError
if a category appears that was not seen in `fit`
"""
if self._get_internal_model() is not None:
return self._transform(
y,
delayed=delayed,
output_dtype="int32",
output_collection_type="cudf",
)
else:
msg = (
"This LabelEncoder instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator."
)
raise NotFittedError(msg)
def inverse_transform(self, y, delayed=True):
"""
Convert the data back to the original representation.
In case unknown categories are encountered (all zeros in the
one-hot encoding), ``None`` is used to represent this category.
Parameters
----------
X : dask_cudf Series
The string representation of the categories.
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
X_tr : dask_cudf.Series
Distributed object containing the inverse transformed array.
"""
if self._get_internal_model() is not None:
return self._inverse_transform(
y, delayed=delayed, output_collection_type="cudf"
)
else:
msg = (
"This LabelEncoder instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator."
)
raise NotFittedError(msg)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/preprocessing/label.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing.label import LabelBinarizer as LB
from cuml.dask.common.input_utils import _extract_partitions
from cuml.dask.common.base import BaseEstimator
from cuml.common import rmm_cupy_ary
import dask
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
class LabelBinarizer(BaseEstimator):
"""
A distributed version of LabelBinarizer for one-hot encoding
a collection of labels.
Examples
--------
Create an array with labels and dummy encode them
.. code-block:: python
>>> import cupy as cp
>>> import cupyx
>>> from cuml.dask.preprocessing import LabelBinarizer
>>> from dask_cuda import LocalCUDACluster
>>> from dask.distributed import Client
>>> import dask
>>> cluster = LocalCUDACluster()
>>> client = Client(cluster)
>>> labels = cp.asarray([0, 5, 10, 7, 2, 4, 1, 0, 0, 4, 3, 2, 1],
... dtype=cp.int32)
>>> labels = dask.array.from_array(labels)
>>> lb = LabelBinarizer()
>>> encoded = lb.fit_transform(labels)
>>> print(encoded.compute())
[[1 0 0 0 0 0 0 0]
[0 0 0 0 0 1 0 0]
[0 0 0 0 0 0 0 1]
[0 0 0 0 0 0 1 0]
[0 0 1 0 0 0 0 0]
[0 0 0 0 1 0 0 0]
[0 1 0 0 0 0 0 0]
[1 0 0 0 0 0 0 0]
[1 0 0 0 0 0 0 0]
[0 0 0 0 1 0 0 0]
[0 0 0 1 0 0 0 0]
[0 0 1 0 0 0 0 0]
[0 1 0 0 0 0 0 0]]
>>> decoded = lb.inverse_transform(encoded)
>>> print(decoded.compute())
[ 0 5 10 7 2 4 1 0 0 4 3 2 1]
>>> client.close()
>>> cluster.close()
"""
def __init__(self, *, client=None, **kwargs):
super().__init__(client=client, **kwargs)
"""
Initialize new LabelBinarizer instance
Parameters
----------
client : dask.Client optional client to use
kwargs : dict of arguments to proxy to underlying single-process
LabelBinarizer
"""
# Sparse output will be added once sparse CuPy arrays are supported
# by Dask.Array: https://github.com/rapidsai/cuml/issues/1665
if (
"sparse_output" in self.kwargs
and self.kwargs["sparse_output"] is True
):
raise ValueError(
"Sparse output not yet " "supported in distributed mode"
)
@staticmethod
def _func_create_model(**kwargs):
return LB(**kwargs)
@staticmethod
def _func_unique_classes(y):
return rmm_cupy_ary(cp.unique, y)
@staticmethod
def _func_xform(model, y):
xform_in = rmm_cupy_ary(cp.asarray, y, dtype=y.dtype)
return model.transform(xform_in)
@staticmethod
def _func_inv_xform(model, y, threshold):
y = rmm_cupy_ary(cp.asarray, y, dtype=y.dtype)
return model.inverse_transform(y, threshold)
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : Dask.Array of shape [n_samples,] or [n_samples, n_classes]
chunked by row.
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
# Take the unique classes and broadcast them all around the cluster.
futures = self.client.sync(_extract_partitions, y)
unique = [
self.client.submit(LabelBinarizer._func_unique_classes, f)
for w, f in futures
]
classes = self.client.compute(unique, True)
classes = rmm_cupy_ary(
cp.unique, rmm_cupy_ary(cp.stack, classes, axis=0)
)
self._set_internal_model(LB(**self.kwargs).fit(classes))
return self
def fit_transform(self, y):
"""
Fit the label encoder and return transformed labels
Parameters
----------
y : Dask.Array of shape [n_samples,] or [n_samples, n_classes]
target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
arr : Dask.Array backed by CuPy arrays containing encoded labels
"""
return self.fit(y).transform(y)
def transform(self, y):
"""
Transform and return encoded labels
Parameters
----------
y : Dask.Array of shape [n_samples,] or [n_samples, n_classes]
Returns
-------
arr : Dask.Array backed by CuPy arrays containing encoded labels
"""
parts = self.client.sync(_extract_partitions, y)
internal_model = self._get_internal_model()
xform_func = dask.delayed(LabelBinarizer._func_xform)
meta = rmm_cupy_ary(cp.zeros, 1)
if internal_model.sparse_output:
meta = cupyx.scipy.sparse.csr_matrix(meta)
f = [
dask.array.from_delayed(
xform_func(internal_model, part),
meta=meta,
dtype=cp.float32,
shape=(cp.nan, len(self.classes_)),
)
for w, part in parts
]
arr = dask.array.concatenate(f, axis=0, allow_unknown_chunksizes=True)
return arr
def inverse_transform(self, y, threshold=None):
"""
Invert a set of encoded labels back to original labels
Parameters
----------
y : Dask.Array of shape [n_samples, n_classes] containing encoded
labels
threshold : float This value is currently ignored
Returns
-------
arr : Dask.Array backed by CuPy arrays containing original labels
"""
parts = self.client.sync(_extract_partitions, y)
inv_func = dask.delayed(LabelBinarizer._func_inv_xform)
dtype = self.classes_.dtype
meta = rmm_cupy_ary(cp.zeros, 1, dtype=dtype)
internal_model = self._get_internal_model()
f = [
dask.array.from_delayed(
inv_func(internal_model, part, threshold),
dtype=dtype,
shape=(cp.nan,),
meta=meta,
)
for w, part in parts
]
arr = dask.array.concatenate(f, axis=0, allow_unknown_chunksizes=True)
return arr
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/preprocessing/encoders.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections.abc import Sequence
from cuml.common import with_cupy_rmm
from cuml.dask.common.base import (
BaseEstimator,
DelayedInverseTransformMixin,
DelayedTransformMixin,
)
from cuml.internals.safe_imports import gpu_only_import_from, gpu_only_import
from dask_cudf.core import Series as daskSeries
from toolz import first
dask_cudf = gpu_only_import("dask_cudf")
dcDataFrame = gpu_only_import_from("dask_cudf.core", "DataFrame")
class DelayedFitTransformMixin:
def fit_transform(self, X, delayed=True):
"""Fit the encoder to X, then transform X. Equivalent to fit(X).transform(X).
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
The data to encode.
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
out : Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the transformed data
"""
return self.fit(X).transform(X, delayed=delayed)
class OneHotEncoder(
BaseEstimator,
DelayedTransformMixin,
DelayedInverseTransformMixin,
DelayedFitTransformMixin,
):
"""
Encode categorical features as a one-hot numeric array.
The input to this transformer should be a dask_cuDF.DataFrame or cupy
dask.Array, denoting the values taken on by categorical features.
The features are encoded using a one-hot (aka 'one-of-K' or 'dummy')
encoding scheme. This creates a binary column for each category and
returns a sparse matrix or dense array (depending on the ``sparse``
parameter).
By default, the encoder derives the categories based on the unique values
in each feature. Alternatively, you can also specify the `categories`
manually.
Parameters
----------
categories : 'auto', cupy.ndarray or cudf.DataFrame, default='auto'
Categories (unique values) per feature. All categories are expected to
fit on one GPU.
- 'auto' : Determine categories automatically from the training data.
- DataFrame/ndarray : ``categories[col]`` holds the categories expected
in the feature col.
drop : 'first', None or a dict, default=None
Specifies a methodology to use to drop one of the categories per
feature. This is useful in situations where perfectly collinear
features cause problems, such as when feeding the resulting data
into a neural network or an unregularized regression.
- None : retain all features (the default).
- 'first' : drop the first category in each feature. If only one
category is present, the feature will be dropped entirely.
- Dict : ``drop[col]`` is the category in feature col that
should be dropped.
sparse : bool, default=False
This feature was deactivated and will give an exception when True.
The reason is because sparse matrix are not fully supported by cupy
yet, causing incorrect values when computing one hot encodings.
See https://github.com/cupy/cupy/issues/3223
dtype : number type, default=np.float
Desired datatype of transform's output.
handle_unknown : {'error', 'ignore'}, default='error'
Whether to raise an error or ignore if an unknown categorical feature
is present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros. In the inverse transform, an unknown category
will be denoted as None.
"""
@with_cupy_rmm
def fit(self, X):
"""Fit a multi-node multi-gpu OneHotEncoder to X.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
The data to determine the categories of each feature.
Returns
-------
self
"""
from cuml.preprocessing.onehotencoder_mg import OneHotEncoderMG
el = first(X) if isinstance(X, Sequence) else X
self.datatype = (
"cudf" if isinstance(el, (dcDataFrame, daskSeries)) else "cupy"
)
self._set_internal_model(OneHotEncoderMG(**self.kwargs).fit(X))
return self
@with_cupy_rmm
def transform(self, X, delayed=True):
"""Transform X using one-hot encoding.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
The data to encode.
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
out : Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the transformed input.
"""
return self._transform(
X,
n_dims=2,
delayed=delayed,
output_dtype=self._get_internal_model().dtype,
output_collection_type="cupy",
)
@with_cupy_rmm
def inverse_transform(self, X, delayed=True):
"""Convert the data back to the original representation. In case unknown
categories are encountered (all zeros in the one-hot encoding), ``None`` is used
to represent this category.
Parameters
----------
X : CuPy backed Dask Array, shape [n_samples, n_encoded_features]
The transformed data.
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
X_tr : Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the inverse transformed array.
"""
dtype = self._get_internal_model().dtype
return self._inverse_transform(
X,
n_dims=2,
delayed=delayed,
output_dtype=dtype,
output_collection_type=self.datatype,
)
class OrdinalEncoder(
BaseEstimator,
DelayedTransformMixin,
DelayedInverseTransformMixin,
DelayedFitTransformMixin,
):
"""Encode categorical features as an integer array.
The input to this transformer should be an :py:class:`dask_cudf.DataFrame` or a
:py:class:`dask.array.Array` backed by cupy, denoting the unique values taken on by
categorical (discrete) features. The features are converted to ordinal
integers. This results in a single column of integers (0 to n_categories - 1) per
feature.
Parameters
----------
categories : :py:class:`cupy.ndarray` or :py:class`cudf.DataFrameq, default='auto'
Categories (unique values) per feature. All categories are expected to
fit on one GPU.
- 'auto' : Determine categories automatically from the training data.
- DataFrame/ndarray : ``categories[col]`` holds the categories expected
in the feature col.
handle_unknown : {'error', 'ignore'}, default='error'
Whether to raise an error or ignore if an unknown categorical feature is
present during transform (default is to raise). When this parameter is set
to 'ignore' and an unknown category is encountered during transform, the
resulting encoded value would be null when output type is cudf
dataframe.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`. See
:ref:`verbosity-levels` for more info.
"""
@with_cupy_rmm
def fit(self, X):
"""Fit Ordinal to X.
Parameters
----------
X : :py:class:`dask_cudf.DataFrame` or a CuPy backed :py:class:`dask.array.Array`.
shape = (n_samples, n_features) The data to determine the categories of each
feature.
Returns
-------
self
"""
from cuml.preprocessing.ordinalencoder_mg import OrdinalEncoderMG
el = first(X) if isinstance(X, Sequence) else X
self.datatype = (
"cudf" if isinstance(el, (dcDataFrame, daskSeries)) else "cupy"
)
self._set_internal_model(OrdinalEncoderMG(**self.kwargs).fit(X))
return self
@with_cupy_rmm
def transform(self, X, delayed=True):
"""Transform X using ordinal encoding.
Parameters
----------
X : :py:class:`dask_cudf.DataFrame` or cupy backed dask array. The data to
encode.
Returns
-------
X_out :
Transformed input.
"""
return self._transform(
X,
n_dims=2,
delayed=delayed,
output_dtype=self._get_internal_model().dtype,
output_collection_type=self.datatype,
)
@with_cupy_rmm
def inverse_transform(self, X, delayed=True):
"""Convert the data back to the original representation.
Parameters
----------
X : :py:class:`dask_cudf.DataFrame` or cupy backed dask array.
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
X_tr :
Distributed object containing the inverse transformed array.
"""
dtype = self._get_internal_model().dtype
return self._inverse_transform(
X,
n_dims=2,
delayed=delayed,
output_dtype=dtype,
output_collection_type=self.datatype,
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/preprocessing/__init__.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.preprocessing.encoders import OneHotEncoder, OrdinalEncoder
from cuml.dask.preprocessing.label import LabelBinarizer
from cuml.dask.preprocessing.LabelEncoder import LabelEncoder
__all__ = [
"LabelBinarizer",
"OneHotEncoder",
"OrdinalEncoder",
"LabelEncoder",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/decomposition/tsvd.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.decomposition.base import BaseDecomposition
from cuml.dask.decomposition.base import DecompositionSyncFitMixin
from cuml.dask.common.base import mnmg_import
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.base import DelayedInverseTransformMixin
class TruncatedSVD(
BaseDecomposition,
DelayedTransformMixin,
DelayedInverseTransformMixin,
DecompositionSyncFitMixin,
):
"""
Examples
--------
.. code-block:: python
>>> from dask_cuda import LocalCUDACluster
>>> from dask.distributed import Client, wait
>>> import cupy as cp
>>> from cuml.dask.decomposition import TruncatedSVD
>>> from cuml.dask.datasets import make_blobs
>>> cluster = LocalCUDACluster(threads_per_worker=1)
>>> client = Client(cluster)
>>> nrows = 6
>>> ncols = 3
>>> n_parts = 2
>>> X_cudf, _ = make_blobs(n_samples=nrows, n_features=ncols,
... centers=1, n_parts=n_parts,
... cluster_std=1.8, random_state=10,
... dtype=cp.float32)
>>> in_blobs = X_cudf.compute()
>>> print(in_blobs) # doctest: +SKIP
[[ 6.953966 6.2313757 0.84974563]
[10.012338 3.4641726 3.0827546 ]
[ 9.537406 4.0504313 3.2793145 ]
[ 8.32713 2.957846 1.8215517 ]
[ 5.7044296 1.855514 3.7996366 ]
[10.089077 2.1995444 2.2072687 ]]
>>> cumlModel = TruncatedSVD(n_components = 1)
>>> XT = cumlModel.fit_transform(X_cudf)
>>> result = XT.compute()
>>> print(result) # doctest: +SKIP
[[ 8.699628 0. 0. ]
[11.018815 0. 0. ]
[10.8554535 0. 0. ]
[ 9.000192 0. 0. ]
[ 6.7628784 0. 0. ]
[10.40526 0. 0. ]]
>>> client.close()
>>> cluster.close()
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
n_components : int (default = 1)
The number of top K singular vectors / values you want.
Must be <= number(columns).
svd_solver : 'full', 'jacobi'
Only Full algorithm is supported since it's significantly faster on GPU
then the other solvers including randomized SVD.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
components_ : array
The top K components (VT.T[:,:n_components]) in U, S, VT = svd(X)
explained_variance_ : array
How much each component explains the variance in the data given by S**2
explained_variance_ratio_ : array
How much in % the variance is explained given by S**2/sum(S**2)
singular_values_ : array
The top K singular values. Remember all singular values >= 0
"""
def __init__(self, *, client=None, **kwargs):
"""
Constructor for distributed TruncatedSVD model
"""
super().__init__(
model_func=TruncatedSVD._create_tsvd, client=client, **kwargs
)
def fit(self, X, _transform=False):
"""
Fit the model with X.
Parameters
----------
X : dask cuDF input
"""
# `_transform=True` here as tSVD currently needs to
# call `fit_transform` to be able to build
# `explained_variance_`
out = self._fit(X, _transform=True)
if _transform:
return out
return self
def fit_transform(self, X):
"""
Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : dask cuDF
Returns
-------
X_new : dask cuDF
"""
return self.fit(X, _transform=True)
def transform(self, X, delayed=True):
"""
Apply dimensionality reduction to `X`.
`X` is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : dask cuDF
Returns
-------
X_new : dask cuDF
"""
return self._transform(X, n_dims=2, delayed=delayed)
def inverse_transform(self, X, delayed=True):
"""
Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : dask cuDF
Returns
-------
X_original : dask cuDF
"""
return self._inverse_transform(X, n_dims=2, delayed=delayed)
def get_param_names(self):
return list(self.kwargs.keys())
@staticmethod
@mnmg_import
def _create_tsvd(handle, datatype, **kwargs):
from cuml.decomposition.tsvd_mg import TSVDMG as cumlTSVD
return cumlTSVD(handle=handle, output_type=datatype, **kwargs)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/decomposition/__init__.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.decomposition.pca import PCA
from cuml.dask.decomposition.tsvd import TruncatedSVD
__all__ = ["PCA", "TruncatedSVD"]
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/decomposition/base.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common import raise_exception_from_futures
from raft_dask.common.comms import get_raft_comm_state
from raft_dask.common.comms import Comms
from cuml.dask.common.input_utils import to_output
from cuml.dask.common import parts_to_ranks
from cuml.dask.common.part_utils import flatten_grouped_results
from dask.distributed import wait, get_worker
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.input_utils import DistributedDataHandler
class BaseDecomposition(BaseEstimator):
def __init__(self, *, model_func, client=None, verbose=False, **kwargs):
"""
Constructor for distributed decomposition model
"""
super().__init__(client=client, verbose=verbose, **kwargs)
self._model_func = model_func
class DecompositionSyncFitMixin(object):
@staticmethod
def _func_fit(m, dfs, M, N, partsToRanks, rank, _transform):
return m.fit(dfs, M, N, partsToRanks, rank, _transform)
def _fit(self, X, _transform=False):
"""
Fit the model with X.
Parameters
----------
X : dask cuDF input
"""
n_cols = X.shape[1]
data = DistributedDataHandler.create(data=X, client=self.client)
self.datatype = data.datatype
if "svd_solver" in self.kwargs and self.kwargs["svd_solver"] == "tsqr":
comms = Comms(comms_p2p=True)
else:
comms = Comms(comms_p2p=False)
comms.init(workers=data.workers)
data.calculate_parts_to_sizes(comms)
worker_info = comms.worker_info(comms.worker_addresses)
parts_to_sizes, _ = parts_to_ranks(
self.client, worker_info, data.gpu_futures
)
total_rows = data.total_rows
models = dict(
[
(
data.worker_info[wf[0]]["rank"],
self.client.submit(
self._create_model,
comms.sessionId,
self._model_func,
self.datatype,
**self.kwargs,
pure=False,
workers=[wf[0]],
),
)
for idx, wf in enumerate(data.worker_to_parts.items())
]
)
pca_fit = dict(
[
(
wf[0],
self.client.submit(
DecompositionSyncFitMixin._func_fit,
models[data.worker_info[wf[0]]["rank"]],
wf[1],
total_rows,
n_cols,
parts_to_sizes,
data.worker_info[wf[0]]["rank"],
_transform,
pure=False,
workers=[wf[0]],
),
)
for idx, wf in enumerate(data.worker_to_parts.items())
]
)
wait(list(pca_fit.values()))
raise_exception_from_futures(list(pca_fit.values()))
comms.destroy()
self._set_internal_model(list(models.values())[0])
if _transform:
out_futures = flatten_grouped_results(
self.client, data.gpu_futures, pca_fit
)
return to_output(out_futures, self.datatype)
return self
@staticmethod
def _create_model(sessionId, model_func, datatype, **kwargs):
dask_worker = get_worker()
handle = get_raft_comm_state(sessionId, dask_worker)["handle"]
return model_func(handle, datatype, **kwargs)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/decomposition/pca.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.decomposition.base import BaseDecomposition
from cuml.dask.decomposition.base import DecompositionSyncFitMixin
from cuml.dask.common.base import mnmg_import
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.base import DelayedInverseTransformMixin
class PCA(
BaseDecomposition,
DelayedTransformMixin,
DelayedInverseTransformMixin,
DecompositionSyncFitMixin,
):
"""
PCA (Principal Component Analysis) is a fundamental dimensionality
reduction technique used to combine features in X in linear combinations
such that each new component captures the most information or variance of
the data. N_components is usually small, say at 3, where it can be used for
data visualization, data compression and exploratory analysis.
cuML's multi-node multi-gpu (MNMG) PCA expects a dask-cuDF object as input
and provides 2 algorithms, Full and Jacobi. Full (default) uses a full
eigendecomposition then selects the top K eigenvectors. The Jacobi
algorithm can be much faster as it iteratively tries to correct the top K
eigenvectors, but might be less accurate.
Examples
--------
.. code-block:: python
>>> from dask_cuda import LocalCUDACluster
>>> from dask.distributed import Client, wait
>>> import cupy as cp
>>> from cuml.dask.decomposition import PCA
>>> from cuml.dask.datasets import make_blobs
>>> cluster = LocalCUDACluster(threads_per_worker=1)
>>> client = Client(cluster)
>>> nrows = 6
>>> ncols = 3
>>> n_parts = 2
>>> X_cudf, _ = make_blobs(n_samples=nrows, n_features=ncols,
... centers=1, n_parts=n_parts,
... cluster_std=0.01, random_state=10,
... dtype=cp.float32)
>>> blobs = X_cudf.compute()
>>> print(blobs) # doctest: +SKIP
[[8.688037 3.122401 1.2581943]
[8.705028 3.1070278 1.2705998]
[8.70239 3.1102846 1.2716919]
[8.695665 3.1042147 1.2635932]
[8.681095 3.0980906 1.2745825]
[8.705454 3.100002 1.2657361]]
>>> cumlModel = PCA(n_components = 1, whiten=False)
>>> XT = cumlModel.fit_transform(X_cudf)
>>> print(XT.compute()) # doctest: +SKIP
[[-1.7516235e-02]
[ 7.8094802e-03]
[ 4.2757220e-03]
[-6.7228684e-05]
[-5.0618490e-03]
[ 1.0557819e-02]]
>>> client.close()
>>> cluster.close()
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
n_components : int (default = 1)
The number of top K singular vectors / values you want.
Must be <= number(columns).
svd_solver : 'full', 'jacobi', 'auto'
'full': Run exact full SVD and select the components by postprocessing
'jacobi': Iteratively compute SVD of the covariance matrix
'auto': For compatibility with Scikit-learn. Alias for 'jacobi'.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
whiten : boolean (default = False)
If True, de-correlates the components. This is done by dividing them by
the corresponding singular values then multiplying by sqrt(n_samples).
Whitening allows each component to have unit variance and removes
multi-collinearity. It might be beneficial for downstream
tasks like LinearRegression where correlated features cause problems.
Attributes
----------
components_ : array
The top K components (VT.T[:,:n_components]) in U, S, VT = svd(X)
explained_variance_ : array
How much each component explains the variance in the data given by S**2
explained_variance_ratio_ : array
How much in % the variance is explained given by S**2/sum(S**2)
singular_values_ : array
The top K singular values. Remember all singular values >= 0
mean_ : array
The column wise mean of X. Used to mean - center the data first.
noise_variance_ : float
From Bishop 1999's Textbook. Used in later tasks like calculating the
estimated covariance of X.
Notes
-----
PCA considers linear combinations of features, specifically those that
maximize global variance structure. This means PCA is fantastic for global
structure analyses, but weak for local relationships. Consider UMAP or
T-SNE for a locally important embedding.
**Applications of PCA**
PCA is used extensively in practice for data visualization and data
compression. It has been used to visualize extremely large word
embeddings like Word2Vec and GloVe in 2 or 3 dimensions, large
datasets of everyday objects and images, and used to distinguish
between cancerous cells from healthy cells.
For additional docs, see `scikitlearn's PCA
<http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_.
"""
def __init__(self, *, client=None, verbose=False, **kwargs):
super().__init__(
model_func=PCA._create_pca,
client=client,
verbose=verbose,
**kwargs,
)
def fit(self, X):
"""
Fit the model with X.
Parameters
----------
X : dask cuDF input
"""
self._fit(X)
return self
def fit_transform(self, X):
"""
Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : dask cuDF
Returns
-------
X_new : dask cuDF
"""
return self.fit(X).transform(X)
def transform(self, X, delayed=True):
"""
Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : dask cuDF
Returns
-------
X_new : dask cuDF
"""
return self._transform(X, n_dims=2, delayed=delayed)
def inverse_transform(self, X, delayed=True):
"""
Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : dask cuDF
Returns
-------
X_original : dask cuDF
"""
return self._inverse_transform(X, n_dims=2, delayed=delayed)
def get_param_names(self):
return list(self.kwargs.keys())
@staticmethod
@mnmg_import
def _create_pca(handle, datatype, **kwargs):
from cuml.decomposition.pca_mg import PCAMG as cumlPCA
return cumlPCA(handle=handle, output_type=datatype, **kwargs)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/metrics/confusion_matrix.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.prims.label import make_monotonic
from cuml.dask.metrics.utils import sorted_unique_labels
from cuml.internals.memory_utils import with_cupy_rmm
from cuml.dask.common.utils import get_client
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
@with_cupy_rmm
def _local_cm(inputs, labels, use_sample_weight):
if use_sample_weight:
y_true, y_pred, sample_weight = inputs
else:
y_true, y_pred = inputs
sample_weight = cp.ones(y_true.shape[0], dtype=y_true.dtype)
y_true, _ = make_monotonic(y_true, labels, copy=True)
y_pred, _ = make_monotonic(y_pred, labels, copy=True)
n_labels = labels.size
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = cp.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
sample_weight = sample_weight[ind]
cm = cupyx.scipy.sparse.coo_matrix(
(sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels),
dtype=cp.float64,
).toarray()
return cp.nan_to_num(cm)
@with_cupy_rmm
def confusion_matrix(
y_true,
y_pred,
labels=None,
normalize=None,
sample_weight=None,
client=None,
):
"""Compute confusion matrix to evaluate the accuracy of a classification.
Parameters
----------
y_true : dask.Array (device or host) shape = (n_samples,)
or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : dask.Array (device or host) shape = (n_samples,)
or (n_samples, n_outputs)
Estimated target values.
labels : array-like (device or host) shape = (n_classes,), optional
List of labels to index the matrix. This may be used to reorder or
select a subset of labels. If None is given, those that appear at least
once in y_true or y_pred are used in sorted order.
sample_weight : dask.Array (device or host) shape = (n_samples,), optional
Sample weights.
normalize : string in ['true', 'pred', 'all']
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix will not be
normalized.
client : dask.distributed.Client, optional
Dask client to use. Will use the default client if None.
Returns
-------
C : array-like (device or host) shape = (n_classes, n_classes)
Confusion matrix.
"""
client = get_client(client)
if labels is None:
labels = sorted_unique_labels(y_true, y_pred)
if normalize not in ["true", "pred", "all", None]:
msg = (
"normalize must be one of "
f"{{'true', 'pred', 'all', None}}, got {normalize}."
)
raise ValueError(msg)
use_sample_weight = bool(sample_weight is not None)
dask_arrays = (
[y_true, y_pred, sample_weight]
if use_sample_weight
else [y_true, y_pred]
)
# run cm computation on each partition.
data = DistributedDataHandler.create(dask_arrays, client=client)
cms = [
client.submit(
_local_cm, p, labels, use_sample_weight, workers=[w]
).result()
for w, p in data.gpu_futures
]
# reduce each partition's result into one cupy matrix
cm = sum(cms)
with np.errstate(all="ignore"):
if normalize == "true":
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == "pred":
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == "all":
cm = cm / cm.sum()
cm = np.nan_to_num(cm)
return cm
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/metrics/__init__.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.import_utils import has_dask
import warnings
if has_dask():
from cuml.dask.metrics.confusion_matrix import confusion_matrix
else:
warnings.warn(
"Dask not found. All Dask-based multi-GPU operation is disabled."
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/metrics/utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
def sorted_unique_labels(*ys):
"""Extract an ordered array of unique labels from one or more dask arrays
of labels."""
ys = (
cp.unique(y.map_blocks(lambda x: cp.unique(x)).compute()) for y in ys
)
labels = cp.unique(cp.concatenate(ys))
return labels
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/naive_bayes/naive_bayes.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.naive_bayes import MultinomialNB as MNB
from cuml.common import rmm_cupy_ary
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.dask.common.func import tree_reduce
from cuml.dask.common.func import reduce
from cuml.dask.common.utils import wait_and_raise_from_futures
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import BaseEstimator
from cuml.common import with_cupy_rmm
import dask.array
from toolz import first
import dask
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
class MultinomialNB(BaseEstimator, DelayedPredictionMixin):
"""
Distributed Naive Bayes classifier for multinomial models
Examples
--------
Load the 20 newsgroups dataset from Scikit-learn and train a
Naive Bayes classifier.
.. code-block:: python
>>> import cupy as cp
>>> from sklearn.datasets import fetch_20newsgroups
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> from dask_cuda import LocalCUDACluster
>>> from dask.distributed import Client
>>> import dask
>>> from cuml.dask.common import to_sparse_dask_array
>>> from cuml.dask.naive_bayes import MultinomialNB
>>> # Create a local CUDA cluster
>>> cluster = LocalCUDACluster()
>>> client = Client(cluster)
>>> # Load corpus
>>> twenty_train = fetch_20newsgroups(subset='train',
... shuffle=True, random_state=42)
>>> cv = CountVectorizer()
>>> xformed = cv.fit_transform(twenty_train.data).astype(cp.float32)
>>> X = to_sparse_dask_array(xformed, client)
>>> y = dask.array.from_array(twenty_train.target, asarray=False,
... fancy=False).astype(cp.int32)
>>> # Train model
>>> model = MultinomialNB()
>>> model.fit(X, y)
<cuml.dask.naive_bayes.naive_bayes.MultinomialNB object at 0x...>
>>> # Compute accuracy on training set
>>> model.score(X, y)
array(0.924...)
>>> client.close()
>>> cluster.close()
"""
def __init__(self, *, client=None, verbose=False, **kwargs):
"""
Create new multinomial distributed Naive Bayes classifier instance
Parameters
-----------
client : dask.distributed.Client optional Dask client to use
"""
super().__init__(client=client, verbose=verbose, **kwargs)
self.datatype = "cupy"
# Make any potential model args available and catch any potential
# ValueErrors before distributed training begins.
self._set_internal_model(MNB(**kwargs))
@staticmethod
@with_cupy_rmm
def _fit(Xy, classes, kwargs):
X, y = Xy
model = MNB(**kwargs)
model.partial_fit(X, y, classes=classes)
return model
@staticmethod
def _unique(x):
return rmm_cupy_ary(cp.unique, x)
@staticmethod
def _merge_counts_to_model(models):
modela = first(models)
for model in models[1:]:
modela.feature_count_ += model.feature_count_
modela.class_count_ += model.class_count_
return modela
@staticmethod
def _update_log_probs(model):
model.update_log_probs()
return model
@with_cupy_rmm
def fit(self, X, y, classes=None):
"""
Fit distributed Naive Bayes classifier model
Parameters
----------
X : dask.Array with blocks containing dense or sparse cupy arrays
y : dask.Array with blocks containing cupy.ndarray
classes : array-like containing unique class labels
Returns
-------
cuml.dask.naive_bayes.MultinomialNB current model instance
"""
# Only Dask.Array supported for now
if not isinstance(X, dask.array.core.Array):
raise ValueError("Only dask.Array is supported for X")
if not isinstance(y, dask.array.core.Array):
raise ValueError("Only dask.Array is supported for y")
if len(X.chunks[1]) != 1:
raise ValueError(
"X must be chunked by row only. "
"Multi-dimensional chunking is not supported"
)
futures = DistributedDataHandler.create([X, y], self.client)
classes = (
self._unique(y.map_blocks(MultinomialNB._unique).compute())
if classes is None
else classes
)
models = [
self.client.submit(
self._fit, part, classes, self.kwargs, pure=False
)
for w, part in futures.gpu_futures
]
models = reduce(
models, self._merge_counts_to_model, client=self.client
)
models = self.client.submit(self._update_log_probs, models, pure=False)
wait_and_raise_from_futures([models])
self._set_internal_model(models)
return self
@staticmethod
def _get_part(parts, idx):
return parts[idx]
@staticmethod
def _get_size(arrs):
return arrs.shape[0]
def predict(self, X):
# TODO: Once cupy sparse arrays are fully supported underneath Dask
# arrays, and Naive Bayes is refactored to use CumlArray, this can
# extend DelayedPredictionMixin.
# Ref: https://github.com/rapidsai/cuml/issues/1834
# Ref: https://github.com/rapidsai/cuml/issues/1387
"""
Use distributed Naive Bayes model to predict the classes for a
given set of data samples.
Parameters
----------
X : dask.Array with blocks containing dense or sparse cupy arrays
Returns
-------
dask.Array containing predicted classes
"""
if not isinstance(X, dask.array.core.Array):
raise ValueError("Only dask.Array is supported for X")
return self._predict(X, delayed=True, output_dtype=cp.int32)
def score(self, X, y):
"""
Compute accuracy score
Parameters
----------
X : Dask.Array
Features to predict. Note- it is assumed that chunk sizes and
shape of X are known. This can be done for a fully delayed
Array by calling X.compute_chunks_sizes()
y : Dask.Array
Labels to use for computing accuracy. Note- it is assumed that
chunk sizes and shape of X are known. This can be done for a fully
delayed Array by calling X.compute_chunks_sizes()
Returns
-------
score : float the resulting accuracy score
"""
y_hat = self.predict(X)
@dask.delayed
def _count_accurate_predictions(y_hat, y):
y_hat = rmm_cupy_ary(cp.asarray, y_hat, dtype=y_hat.dtype)
y = rmm_cupy_ary(cp.asarray, y, dtype=y.dtype)
return y.shape[0] - cp.count_nonzero(y - y_hat)
delayed_parts = zip(y_hat.to_delayed(), y.to_delayed())
accuracy_parts = [
_count_accurate_predictions(*p) for p in delayed_parts
]
reduced = first(dask.compute(tree_reduce(accuracy_parts)))
return reduced / X.shape[0]
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/naive_bayes/__init__.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.naive_bayes.naive_bayes import MultinomialNB
__all__ = ["MultinomialNB"]
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/datasets/blobs.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.internals.logger as logger
import dask.array as da
from cuml.datasets.blobs import _get_centers
from cuml.datasets.blobs import make_blobs as sg_make_blobs
from cuml.common import with_cupy_rmm
from cuml.datasets.utils import _create_rs_generator
from cuml.dask.datasets.utils import _get_X
from cuml.dask.datasets.utils import _get_labels
from cuml.dask.datasets.utils import _create_delayed
from cuml.dask.common.utils import get_client
import math
def _create_local_data(
m, n, centers, cluster_std, shuffle, random_state, order, dtype
):
X, y = sg_make_blobs(
m,
n,
centers=centers,
cluster_std=cluster_std,
random_state=random_state,
shuffle=shuffle,
order=order,
dtype=dtype,
)
return X, y
@with_cupy_rmm
def make_blobs(
n_samples=100,
n_features=2,
centers=None,
cluster_std=1.0,
n_parts=None,
center_box=(-10, 10),
shuffle=True,
random_state=None,
return_centers=False,
verbose=False,
order="F",
dtype="float32",
client=None,
workers=None,
):
"""
Makes labeled Dask-Cupy arrays containing blobs
for a randomly generated set of centroids.
This function calls `make_blobs` from `cuml.datasets` on each Dask worker
and aggregates them into a single Dask Dataframe.
For more information on Scikit-learn's `make_blobs
<https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html>`_.
Parameters
----------
n_samples : int
number of rows
n_features : int
number of features
centers : int or array of shape [n_centers, n_features],
optional (default=None) The number of centers to generate, or the fixed
center locations. If n_samples is an int and centers is None, 3 centers
are generated. If n_samples is array-like, centers must be either None
or an array of length equal to the length of n_samples.
cluster_std : float (default = 1.0)
standard deviation of points around centroid
n_parts : int (default = None)
number of partitions to generate (this can be greater
than the number of workers)
center_box : tuple (int, int) (default = (-10, 10))
the bounding box which constrains all the centroids
random_state : int (default = None)
sets random seed (or use None to reinitialize each time)
return_centers : bool, optional (default=False)
If True, then return the centers of each cluster
verbose : int or boolean (default = False)
Logging level.
shuffle : bool (default=False)
Shuffles the samples on each worker.
order: str, optional (default='F')
The order of the generated samples
dtype : str, optional (default='float32')
Dtype of the generated samples
client : dask.distributed.Client (optional)
Dask client to use
workers : optional, list of strings
Dask addresses of workers to use for computation.
If None, all available Dask workers will be used.
(e.g. : `workers = list(client.scheduler_info()['workers'].keys())`)
Returns
-------
X : dask.array backed by CuPy array of shape [n_samples, n_features]
The input samples.
y : dask.array backed by CuPy array of shape [n_samples]
The output values.
centers : dask.array backed by CuPy array of shape
[n_centers, n_features], optional
The centers of the underlying blobs. It is returned only if
return_centers is True.
Examples
--------
.. code-block:: python
>>> from dask_cuda import LocalCUDACluster
>>> from dask.distributed import Client
>>> from cuml.dask.datasets import make_blobs
>>> cluster = LocalCUDACluster(threads_per_worker=1)
>>> client = Client(cluster)
>>> workers = list(client.scheduler_info()['workers'].keys())
>>> X, y = make_blobs(1000, 10, centers=42, cluster_std=0.1,
... workers=workers)
>>> client.close()
>>> cluster.close()
"""
client = get_client(client=client)
generator = _create_rs_generator(random_state=random_state)
if workers is None:
workers = list(client.scheduler_info()["workers"].keys())
n_parts = n_parts if n_parts is not None else len(workers)
parts_workers = (workers * n_parts)[:n_parts]
centers, n_centers = _get_centers(
generator, centers, center_box, n_samples, n_features, dtype
)
rows_per_part = max(1, int(n_samples / n_parts))
worker_rows = [rows_per_part] * n_parts
worker_rows[-1] += n_samples % n_parts
worker_rows = tuple(worker_rows)
logger.debug(
"Generating %d samples across %d partitions on "
"%d workers (total=%d samples)"
% (
math.ceil(n_samples / len(workers)),
n_parts,
len(workers),
n_samples,
)
)
seeds = generator.randint(n_samples, size=len(parts_workers))
parts = [
client.submit(
_create_local_data,
part_rows,
n_features,
centers,
cluster_std,
shuffle,
int(seeds[idx]),
order,
dtype,
pure=False,
workers=[parts_workers[idx]],
)
for idx, part_rows in enumerate(worker_rows)
]
X = [client.submit(_get_X, f, pure=False) for idx, f in enumerate(parts)]
y = [
client.submit(_get_labels, f, pure=False)
for idx, f in enumerate(parts)
]
X_del = _create_delayed(X, dtype, worker_rows, n_features)
y_del = _create_delayed(y, dtype, worker_rows)
X_final = da.concatenate(X_del, axis=0)
y_final = da.concatenate(y_del, axis=0)
if return_centers:
return X_final, y_final, centers
else:
return X_final, y_final
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/datasets/regression.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.utils import get_client
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.common import with_cupy_rmm
from cuml.dask.datasets.utils import _create_delayed
from cuml.dask.datasets.utils import _get_labels
from cuml.dask.datasets.utils import _get_X
from cuml.internals.safe_imports import gpu_only_import
import dask.array as da
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
def _create_rs_generator(random_state):
if hasattr(random_state, "__module__"):
rs_type = random_state.__module__ + "." + type(random_state).__name__
else:
rs_type = type(random_state).__name__
rs = None
if rs_type == "NoneType" or rs_type == "int":
rs = da.random.RandomState(
seed=random_state, RandomState=cp.random.RandomState
)
elif rs_type == "cupy.random.generator.RandomState":
rs = da.random.RandomState(RandomState=random_state)
elif rs_type == "dask.array.random.RandomState":
rs = random_state
else:
raise ValueError(
"random_state type must be int, CuPy RandomState \
or Dask RandomState"
)
return rs
def _dask_f_order_standard_normal(nrows, ncols, dtype, seed):
local_rs = cp.random.RandomState(seed=seed)
x = local_rs.standard_normal(nrows * ncols, dtype=dtype)
x = x.reshape((nrows, ncols), order="F")
return x
def _f_order_standard_normal(client, rs, chunksizes, ncols, dtype):
workers = list(client.has_what().keys())
n_chunks = len(chunksizes)
chunks_workers = (workers * n_chunks)[:n_chunks]
chunk_seeds = rs.permutation(len(chunksizes))
chunks = [
client.submit(
_dask_f_order_standard_normal,
chunksize,
ncols,
dtype,
chunk_seeds[idx],
workers=[chunks_workers[idx]],
pure=False,
)
for idx, chunksize in enumerate(chunksizes)
]
chunks_dela = _create_delayed(chunks, dtype, chunksizes, ncols)
return da.concatenate(chunks_dela, axis=0)
def _dask_data_from_multivariate_normal(
seed, covar, n_samples, n_features, dtype
):
mean = cp.zeros(n_features)
local_rs = cp.random.RandomState()
return local_rs.multivariate_normal(mean, covar, n_samples, dtype=dtype)
def _data_from_multivariate_normal(
client, rs, covar, chunksizes, n_features, dtype
):
workers = list(client.has_what().keys())
n_chunks = len(chunksizes)
chunks_workers = (workers * n_chunks)[:n_chunks]
chunk_seeds = rs.permutation(len(chunksizes))
data_parts = [
client.submit(
_dask_data_from_multivariate_normal,
chunk_seeds[idx],
covar,
chunksizes[idx],
n_features,
dtype,
workers=[chunks_workers[idx]],
pure=False,
)
for idx, chunk in enumerate(chunksizes)
]
data_dela = _create_delayed(data_parts, dtype, chunksizes, n_features)
return da.concatenate(data_dela, axis=0)
def _dask_shuffle(part, n_samples, seed, features_indices):
X, y = part[0], part[1]
local_rs = cp.random.RandomState(seed=seed)
samples_indices = local_rs.permutation(n_samples)
X[...] = X[samples_indices, :]
X[...] = X[:, features_indices]
y[...] = y[samples_indices, :]
return X, y
def _shuffle(
client,
rs,
X,
y,
chunksizes,
n_features,
features_indices,
n_targets,
dtype,
):
data_ddh = DistributedDataHandler.create(data=(X, y), client=client)
chunk_seeds = rs.permutation(len(chunksizes))
shuffled = [
client.submit(
_dask_shuffle,
part,
chunksizes[idx],
chunk_seeds[idx],
features_indices,
workers=[w],
pure=False,
)
for idx, (w, part) in enumerate(data_ddh.gpu_futures)
]
X_shuffled = [
client.submit(_get_X, f, pure=False) for idx, f in enumerate(shuffled)
]
y_shuffled = [
client.submit(_get_labels, f, pure=False)
for idx, f in enumerate(shuffled)
]
X_dela = _create_delayed(X_shuffled, dtype, chunksizes, n_features)
y_dela = _create_delayed(y_shuffled, dtype, chunksizes, n_targets)
return da.concatenate(X_dela, axis=0), da.concatenate(y_dela, axis=0)
def _convert_to_order(client, X, chunksizes, order, n_features, dtype):
X_ddh = DistributedDataHandler.create(data=X, client=client)
X_converted = [
client.submit(cp.array, X_part, copy=False, order=order, workers=[w])
for idx, (w, X_part) in enumerate(X_ddh.gpu_futures)
]
X_dela = _create_delayed(X_converted, dtype, chunksizes, n_features)
return da.concatenate(X_dela, axis=0)
def _generate_chunks_for_qr(total_size, min_size, n_parts):
n_total_per_part = max(1, int(total_size / n_parts))
if n_total_per_part > min_size:
min_size = n_total_per_part
n_partitions = int(max(1, total_size / min_size))
rest = total_size % (n_partitions * min_size)
chunks_list = [min_size for i in range(n_partitions - 1)]
chunks_list.append(min_size + rest)
return tuple(chunks_list)
def _generate_singular_values(
n, effective_rank, tail_strength, n_samples_per_part, dtype="float32"
):
# Index of the singular values
sing_ind = cp.arange(n, dtype=dtype)
# Build the singular profile by assembling signal and noise components
tmp = sing_ind / effective_rank
low_rank = (1 - tail_strength) * cp.exp(-1.0 * tmp**2)
tail = tail_strength * cp.exp(-0.1 * tmp)
s = low_rank + tail
return s
def _dask_make_low_rank_covariance(
n_features,
effective_rank,
tail_strength,
seed,
n_parts,
n_samples_per_part,
dtype,
):
"""
This approach is a faster approach than making X as a full low
rank matrix. Here, we take advantage of the fact that with
SVD, X * X^T = V * S^2 * V^T. This means that we can
generate a covariance matrix by generating only the right
eigen-vector and the squared, low-rank singular values.
With a memory usage of only O(n_features ^ 2) in this case, we pass
this covariance matrix to workers to generate each part of X
embarrassingly parallel from a multi-variate normal with mean 0
and generated covariance.
"""
local_rs = cp.random.RandomState(seed=seed)
m2 = local_rs.standard_normal((n_features, n_features), dtype=dtype)
v, _ = cp.linalg.qr(m2)
s = _generate_singular_values(
n_features, effective_rank, tail_strength, n_samples_per_part
)
v *= s**2
return cp.dot(v, cp.transpose(v))
def _make_low_rank_covariance(
client,
n_features,
effective_rank,
tail_strength,
seed,
n_parts,
n_samples_per_part,
dtype,
):
return client.submit(
_dask_make_low_rank_covariance,
n_features,
effective_rank,
tail_strength,
seed,
n_parts,
n_samples_per_part,
dtype,
)
def make_low_rank_matrix(
n_samples=100,
n_features=100,
effective_rank=10,
tail_strength=0.5,
random_state=None,
n_parts=1,
n_samples_per_part=None,
dtype="float32",
):
""" Generate a mostly low rank matrix with bell-shaped singular values
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, CuPy RandomState instance, Dask RandomState instance \
or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
n_parts : int, optional (default=1)
The number of parts of work.
dtype: str, optional (default='float32')
dtype of generated data
Returns
-------
X : Dask-CuPy array of shape [n_samples, n_features]
The matrix.
"""
rs = _create_rs_generator(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
m1 = rs.standard_normal(
(n_samples, n),
chunks=(_generate_chunks_for_qr(n_samples, n, n_parts), -1),
dtype=dtype,
)
u, _ = da.linalg.qr(m1)
m2 = rs.standard_normal(
(n, n_features),
chunks=(-1, _generate_chunks_for_qr(n_features, n, n_parts)),
dtype=dtype,
)
v, _ = da.linalg.qr(m2)
# For final multiplication
if n_samples_per_part is None:
n_samples_per_part = max(1, int(n_samples / n_parts))
u = u.rechunk({0: n_samples_per_part, 1: -1})
v = v.rechunk({0: n_samples_per_part, 1: -1})
local_s = _generate_singular_values(
n, effective_rank, tail_strength, n_samples_per_part
)
s = da.from_array(local_s, chunks=(int(n_samples_per_part),))
u *= s
return da.dot(u, v)
@with_cupy_rmm
def make_regression(
n_samples=100,
n_features=100,
n_informative=10,
n_targets=1,
bias=0.0,
effective_rank=None,
tail_strength=0.5,
noise=0.0,
shuffle=False,
coef=False,
random_state=None,
n_parts=1,
n_samples_per_part=None,
order="F",
dtype="float32",
client=None,
use_full_low_rank=True,
):
"""
Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile.
The output is generated by applying a (potentially biased) random linear
regression model with "n_informative" nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if "effective_rank" is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=False)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, CuPy RandomState instance, Dask RandomState instance \
or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
n_parts : int, optional (default=1)
The number of parts of work.
order : str, optional (default='F')
Row-major or Col-major
dtype: str, optional (default='float32')
dtype of generated data
use_full_low_rank : boolean (default=True)
Whether to use the entire dataset to generate the low rank matrix.
If False, it creates a low rank covariance and uses the
corresponding covariance to generate a multivariate normal
distribution on the remaining chunks
Returns
-------
X : Dask-CuPy array of shape [n_samples, n_features]
The input samples.
y : Dask-CuPy array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : Dask-CuPy array of shape [n_features] \
or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
Notes
-----
Known Performance Limitations:
1. When `effective_rank` is set and `use_full_low_rank` is True, \
we cannot generate order `F` by construction, and an explicit \
transpose is performed on each part. This may cause memory to spike \
(other parameters make order `F` by construction)
2. When `n_targets > 1` and `order = 'F'` as above, we have to \
explicitly transpose the `y` array. If `coef = True`, then we also \
explicitly transpose the `ground_truth` array
3. When `shuffle = True` and `order = F`, there are memory spikes to \
shuffle the `F` order arrays
.. note:: If out-of-memory errors are encountered in any of the above
configurations, try increasing the `n_parts` parameter.
"""
client = get_client(client=client)
n_informative = min(n_features, n_informative)
rs = _create_rs_generator(random_state)
if n_samples_per_part is None:
n_samples_per_part = max(1, int(n_samples / n_parts))
data_chunksizes = [n_samples_per_part] * n_parts
data_chunksizes[-1] += n_samples % n_parts
data_chunksizes = tuple(data_chunksizes)
if effective_rank is None:
# Randomly generate a well conditioned input set
if order == "F":
X = _f_order_standard_normal(
client, rs, data_chunksizes, n_features, dtype
)
elif order == "C":
X = rs.standard_normal(
(n_samples, n_features),
chunks=(data_chunksizes, -1),
dtype=dtype,
)
else:
# Randomly generate a low rank, fat tail input set
if use_full_low_rank:
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=rs,
n_parts=n_parts,
n_samples_per_part=n_samples_per_part,
dtype=dtype,
)
X = X.rechunk({0: data_chunksizes, 1: -1})
else:
seed = int(rs.randint(n_samples).compute())
covar = _make_low_rank_covariance(
client,
n_features,
effective_rank,
tail_strength,
seed,
n_parts,
n_samples_per_part,
dtype,
)
X = _data_from_multivariate_normal(
client, rs, covar, data_chunksizes, n_features, dtype
)
X = _convert_to_order(
client, X, data_chunksizes, order, n_features, dtype
)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = 100.0 * rs.standard_normal(
(n_informative, n_targets),
chunks=(n_samples_per_part, -1),
dtype=dtype,
)
y = da.dot(X[:, :n_informative], ground_truth) + bias
if n_informative != n_features:
zeroes = 0.0 * rs.standard_normal(
(n_features - n_informative, n_targets), dtype=dtype
)
ground_truth = da.concatenate([ground_truth, zeroes], axis=0)
ground_truth = ground_truth.rechunk(-1)
# Add noise
if noise > 0.0:
y += rs.normal(scale=noise, size=y.shape, dtype=dtype)
# Randomly permute samples and features
if shuffle:
features_indices = np.random.permutation(n_features)
X, y = _shuffle(
client,
rs,
X,
y,
data_chunksizes,
n_features,
features_indices,
n_targets,
dtype,
)
ground_truth = ground_truth[features_indices, :]
y = da.squeeze(y)
if order == "F" and n_targets > 1:
y = _convert_to_order(client, y, y.chunks[0], order, n_targets, dtype)
if coef:
ground_truth = _convert_to_order(
client,
ground_truth,
ground_truth.chunks[0],
order,
n_targets,
dtype,
)
if coef:
ground_truth = da.squeeze(ground_truth)
return X, y, ground_truth
else:
return X, y
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/datasets/classification.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import cpu_only_import
from cuml.datasets.classification import _generate_hypercube
from cuml.datasets.classification import (
make_classification as sg_make_classification,
)
from cuml.datasets.utils import _create_rs_generator
from cuml.dask.datasets.utils import _get_X
from cuml.dask.datasets.utils import _get_labels
from cuml.dask.datasets.utils import _create_delayed
from cuml.dask.common.utils import get_client
from cuml.common import with_cupy_rmm
import dask.array as da
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
def _create_covariance(dims, seed, dtype="float32"):
local_rs = cp.random.RandomState(seed=seed)
return 2 * local_rs.rand(*dims, dtype=dtype) - 1
@with_cupy_rmm
def make_classification(
n_samples=100,
n_features=20,
n_informative=2,
n_redundant=2,
n_repeated=0,
n_classes=2,
n_clusters_per_class=2,
weights=None,
flip_y=0.01,
class_sep=1.0,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=None,
order="F",
dtype="float32",
n_parts=None,
client=None,
):
"""
Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of an `n_informative`-dimensional hypercube with sides of
length :py:`2 * class_sep` and assigns an equal number of clusters to each
class. It introduces interdependence between these features and adds
various types of further noise to the data.
Without shuffling, `X` horizontally stacks features in the following
order: the primary `n_informative` features, followed by `n_redundant`
linear combinations of the informative features, followed by `n_repeated`
duplicates, drawn randomly with replacement from the informative and
redundant features. The remaining features are filled with random noise.
Thus, without shuffling, all useful features are contained in the columns
:py:`X[:, :n_informative + n_redundant + n_repeated]`.
Examples
--------
.. code-block:: python
>>> from dask.distributed import Client
>>> from dask_cuda import LocalCUDACluster
>>> from cuml.dask.datasets.classification import make_classification
>>> cluster = LocalCUDACluster()
>>> client = Client(cluster)
>>> X, y = make_classification(n_samples=10, n_features=4,
... random_state=1, n_informative=2,
... n_classes=2)
>>> print(X.compute()) # doctest: +SKIP
[[-1.1273878 1.2844919 -0.32349187 0.1595734 ]
[ 0.80521786 -0.65946865 -0.40753683 0.15538901]
[ 1.0404129 -1.481386 1.4241115 1.2664981 ]
[-0.92821544 -0.6805706 -0.26001272 0.36004275]
[-1.0392245 -1.1977317 0.16345565 -0.21848428]
[ 1.2273135 -0.529214 2.4799604 0.44108105]
[-1.9163864 -0.39505136 -1.9588828 -1.8881643 ]
[-0.9788184 -0.89851004 -0.08339313 0.1130247 ]
[-1.0549078 -0.8993015 -0.11921967 0.04821599]
[-1.8388828 -1.4063598 -0.02838472 -1.0874642 ]]
>>> print(y.compute()) # doctest: +SKIP
[1 0 0 0 0 1 0 0 0 0]
>>> client.close()
>>> cluster.close()
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features,
`n_repeated` duplicated features and
:py:`n_features-n_informative-n_redundant-n_repeated` useless features
drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : array-like of shape :py:`(n_classes,)` or :py:`(n_classes - 1,)`\
, (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if :py:`len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of
`weights` exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class is assigned randomly. Larger
values introduce noise in the labels and make the classification
task harder.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube size. Larger values spread
out the clusters/classes and make the classification task easier.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
order: str, optional (default='F')
The order of the generated samples
dtype : str, optional (default='float32')
Dtype of the generated samples
n_parts : int (default = None)
number of partitions to generate (this can be greater
than the number of workers)
Returns
-------
X : dask.array backed by CuPy array of shape [n_samples, n_features]
The generated samples.
y : dask.array backed by CuPy array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
How we extended the dask MNMG version from the single GPU version:
1. We generate centroids of shape ``(n_centroids, n_informative)``
2. We generate an informative covariance of shape \
``(n_centroids, n_informative, n_informative)``
3. We generate a redundant covariance of shape \
``(n_informative, n_redundant)``
4. We generate the indices for the repeated features \
We pass along the references to the futures of the above arrays \
with each part to the single GPU \
`cuml.datasets.classification.make_classification` so that each \
part (and worker) has access to the correct values to generate \
data from the same covariances
"""
client = get_client(client=client)
rs = _create_rs_generator(random_state)
workers = list(client.scheduler_info()["workers"].keys())
n_parts = n_parts if n_parts is not None else len(workers)
parts_workers = (workers * n_parts)[:n_parts]
n_clusters = n_classes * n_clusters_per_class
# create centroids
centroids = cp.array(
_generate_hypercube(n_clusters, n_informative, rs)
).astype(dtype, copy=False)
covariance_seeds = rs.randint(n_features, size=2)
informative_covariance = client.submit(
_create_covariance,
(n_clusters, n_informative, n_informative),
int(covariance_seeds[0]),
pure=False,
)
redundant_covariance = client.submit(
_create_covariance,
(n_informative, n_redundant),
int(covariance_seeds[1]),
pure=False,
)
# repeated indices
n = n_informative + n_redundant
repeated_indices = (
(n - 1) * rs.rand(n_repeated, dtype=dtype) + 0.5
).astype(np.intp)
# scale and shift
if shift is None:
shift = (2 * rs.rand(n_features, dtype=dtype) - 1) * class_sep
if scale is None:
scale = 1 + 100 * rs.rand(n_features, dtype=dtype)
# Create arrays on each worker (gpu)
rows_per_part = max(1, int(n_samples / n_parts))
worker_rows = [rows_per_part] * n_parts
worker_rows[-1] += n_samples % n_parts
worker_rows = tuple(worker_rows)
part_seeds = rs.permutation(n_parts)
parts = [
client.submit(
sg_make_classification,
worker_rows[i],
n_features,
n_informative,
n_redundant,
n_repeated,
n_classes,
n_clusters_per_class,
weights,
flip_y,
class_sep,
hypercube,
shift,
scale,
shuffle,
int(part_seeds[i]),
order,
dtype,
centroids,
informative_covariance,
redundant_covariance,
repeated_indices,
pure=False,
workers=[parts_workers[i]],
)
for i in range(len(parts_workers))
]
X_parts = [
client.submit(_get_X, f, pure=False) for idx, f in enumerate(parts)
]
y_parts = [
client.submit(_get_labels, f, pure=False)
for idx, f in enumerate(parts)
]
X_dela = _create_delayed(X_parts, dtype, worker_rows, n_features)
y_dela = _create_delayed(y_parts, np.int64, worker_rows)
X = da.concatenate(X_dela)
y = da.concatenate(y_dela)
return X, y
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/datasets/__init__.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.datasets.blobs import make_blobs
from cuml.dask.datasets.regression import make_regression
from cuml.dask.datasets.classification import make_classification
__all__ = [
"make_blobs",
"make_regression",
"make_classification",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/datasets/utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask.array as da
import dask.delayed
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
def _get_X(t):
return t[0]
def _get_labels(t):
return t[1]
def _dask_array_from_delayed(part, dtype, nrows, ncols=None):
# NOTE: ncols = None is for when we want to create a
# flat array of ndim == 1. When ncols = 1, we go ahead
# and make an array of shape (nrows, 1)
shape = (nrows, ncols) if ncols else (nrows,)
return da.from_delayed(
dask.delayed(part), shape=shape, meta=cp.zeros((1)), dtype=dtype
)
def _create_delayed(parts, dtype, rows_per_part, ncols=None):
"""
This function takes a list of GPU futures and returns
a list of delayed dask arrays, with each part having
a corresponding dask.array in the list
"""
return [
_dask_array_from_delayed(part, dtype, rows_per_part[idx], ncols)
for idx, part in enumerate(parts)
]
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/feature_extraction/__init__.py | #
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.feature_extraction import text
__all__ = ["text"]
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask/feature_extraction | rapidsai_public_repos/cuml/python/cuml/dask/feature_extraction/text/tfidf_transformer.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask
from toolz import first
import dask.array
from cuml.common import with_cupy_rmm
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.utils import wait_and_raise_from_futures
from cuml.dask.common.func import reduce
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.feature_extraction.text import TfidfTransformer as s_TfidfTransformer
class TfidfTransformer(BaseEstimator, DelayedTransformMixin):
"""
Distributed TF-IDF transformer
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from sklearn.datasets import fetch_20newsgroups
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> from dask_cuda import LocalCUDACluster
>>> from dask.distributed import Client
>>> from cuml.dask.common import to_sparse_dask_array
>>> from cuml.dask.naive_bayes import MultinomialNB
>>> import dask
>>> from cuml.dask.feature_extraction.text import TfidfTransformer
>>> # Create a local CUDA cluster
>>> cluster = LocalCUDACluster()
>>> client = Client(cluster)
>>> # Load corpus
>>> twenty_train = fetch_20newsgroups(subset='train',
... shuffle=True, random_state=42)
>>> cv = CountVectorizer()
>>> xformed = cv.fit_transform(twenty_train.data).astype(cp.float32)
>>> X = to_sparse_dask_array(xformed, client)
>>> y = dask.array.from_array(twenty_train.target, asarray=False,
... fancy=False).astype(cp.int32)
>>> multi_gpu_transformer = TfidfTransformer()
>>> X_transformed = multi_gpu_transformer.fit_transform(X)
>>> X_transformed.compute_chunk_sizes()
dask.array<...>
>>> model = MultinomialNB()
>>> model.fit(X_transformed, y)
<cuml.dask.naive_bayes.naive_bayes.MultinomialNB object at 0x...>
>>> result = model.score(X_transformed, y)
>>> print(result) # doctest: +SKIP
array(0.93264981)
>>> client.close()
>>> cluster.close()
"""
def __init__(self, *, client=None, verbose=False, **kwargs):
"""
Create new distributed TF-IDF transformer instance
Parameters
-----------
client : dask.distributed.Client optional Dask client to use
"""
super().__init__(client=client, verbose=verbose, **kwargs)
self.datatype = "cupy"
# Make any potential model args available and catch any potential
# ValueErrors before distributed training begins.
self._set_internal_model(s_TfidfTransformer(**kwargs))
@staticmethod
@with_cupy_rmm
def _set_doc_stats(X, kwargs):
model = s_TfidfTransformer(**kwargs)
# Below is only required if we have to set stats
if model.use_idf:
model._set_doc_stats(X)
return model
@staticmethod
def _merge_stats_to_model(models):
modela = first(models)
if modela.use_idf:
for model in models[1:]:
modela.__n_samples += model.__n_samples
modela.__df += model.__df
return modela
@staticmethod
def _set_idf_diag(model):
model._set_idf_diag()
return model
@with_cupy_rmm
def fit(self, X, y=None):
"""
Fit distributed TFIDF Transformer
Parameters
----------
X : dask.Array with blocks containing dense or sparse cupy arrays
Returns
-------
cuml.dask.feature_extraction.text.TfidfTransformer instance
"""
# Only Dask.Array supported for now
if not isinstance(X, dask.array.core.Array):
raise ValueError("Only dask.Array is supported for X")
if len(X.chunks[1]) != 1:
raise ValueError(
"X must be chunked by row only. "
"Multi-dimensional chunking is not supported"
)
# We don't' do anything if we don't need idf
if not self.internal_model.use_idf:
return self
futures = DistributedDataHandler.create(X, self.client)
models = [
self.client.submit(
self._set_doc_stats, part, self.kwargs, pure=False
)
for w, part in futures.gpu_futures
]
models = reduce(models, self._merge_stats_to_model, client=self.client)
wait_and_raise_from_futures([models])
models = self.client.submit(self._set_idf_diag, models, pure=False)
wait_and_raise_from_futures([models])
self._set_internal_model(models)
return self
@staticmethod
def _get_part(parts, idx):
return parts[idx]
@staticmethod
def _get_size(arrs):
return arrs.shape[0]
def fit_transform(self, X, y=None):
"""
Fit distributed TFIDFTransformer and then transform
the given set of data samples.
Parameters
----------
X : dask.Array with blocks containing dense or sparse cupy arrays
Returns
-------
dask.Array with blocks containing transformed sparse cupy arrays
"""
return self.fit(X).transform(X)
def transform(self, X, y=None):
"""
Use distributed TFIDFTransformer to transform the
given set of data samples.
Parameters
----------
X : dask.Array with blocks containing dense or sparse cupy arrays
Returns
-------
dask.Array with blocks containing transformed sparse cupy arrays
"""
if not isinstance(X, dask.array.core.Array):
raise ValueError("Only dask.Array is supported for X")
return self._transform(
X, n_dims=2, delayed=True, output_collection_type="cupy"
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask/feature_extraction | rapidsai_public_repos/cuml/python/cuml/dask/feature_extraction/text/__init__.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.feature_extraction.text.tfidf_transformer import (
TfidfTransformer,
)
__all__ = ["TfidfTransformer"]
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/linear_model/linear_regression.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import mnmg_import
from cuml.dask.common.base import SyncFitMixinLinearModel
from raft_dask.common.comms import get_raft_comm_state
from dask.distributed import get_worker
class LinearRegression(
BaseEstimator, SyncFitMixinLinearModel, DelayedPredictionMixin
):
"""
LinearRegression is a simple machine learning model where the response y is
modelled by a linear combination of the predictors in X.
cuML's dask Linear Regression (multi-node multi-gpu) expects dask cuDF
DataFrame and provides an algorithms, Eig, to fit a linear model.
And provides an eigendecomposition-based algorithm to fit a linear model.
(SVD, which is more stable than eig, will be added in an upcoming version.)
Eig algorithm is usually preferred when the X is a tall and skinny matrix.
As the number of features in X increases, the accuracy of Eig algorithm
drops.
This is an experimental implementation of dask Linear Regression. It
supports input X that has more than one column. Single column input
X will be supported after SVD algorithm is added in an upcoming version.
Parameters
----------
algorithm : 'eig'
Eig uses a eigendecomposition of the covariance matrix, and is much
faster.
SVD is slower, but guaranteed to be stable.
fit_intercept : boolean (default = True)
LinearRegression adds an additional term c to correct for the global
mean of y, modeling the response as "x * beta + c".
If False, the model expects that you have centered the data.
normalize : boolean (default = False)
If True, the predictors in X will be normalized by dividing by its
L2 norm.
If False, no scaling will be done.
Attributes
----------
coef_ : cuDF series, shape (n_features)
The estimated coefficients for the linear regression model.
intercept_ : array
The independent term. If `fit_intercept` is False, will be 0.
"""
def __init__(self, *, client=None, verbose=False, **kwargs):
super().__init__(client=client, verbose=verbose, **kwargs)
def fit(self, X, y):
"""
Fit the model with X and y.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Features for regression
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
Labels (outcome values)
"""
models = self._fit(
model_func=LinearRegression._create_model, data=(X, y)
)
self._set_internal_model(models[0])
return self
def predict(self, X, delayed=True):
"""
Make predictions for X and returns a dask collection.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
Returns
-------
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
"""
return self._predict(X, delayed=delayed)
def get_param_names(self):
return list(self.kwargs.keys())
@staticmethod
@mnmg_import
def _create_model(sessionId, datatype, **kwargs):
from cuml.linear_model.linear_regression_mg import LinearRegressionMG
handle = get_raft_comm_state(sessionId, get_worker())["handle"]
return LinearRegressionMG(
handle=handle, output_type=datatype, **kwargs
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/linear_model/ridge.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import mnmg_import
from cuml.dask.common.base import SyncFitMixinLinearModel
from raft_dask.common.comms import get_raft_comm_state
from dask.distributed import get_worker
class Ridge(BaseEstimator, SyncFitMixinLinearModel, DelayedPredictionMixin):
"""
Ridge extends LinearRegression by providing L2 regularization on the
coefficients when predicting response y with a linear combination of the
predictors in X. It can reduce the variance of the predictors, and improves
the conditioning of the problem.
cuML's dask Ridge (multi-node multi-gpu) expects dask cuDF
DataFrame and provides an algorithms, Eig, to fit a linear model.
And provides an eigendecomposition-based algorithm to fit a linear model.
(SVD, which is more stable than eig, will be added in an upcoming version)
Eig algorithm is usually preferred when the X is a tall and skinny matrix.
As the number of features in X increases, the accuracy of Eig algorithm
drops.
This is an experimental implementation of dask Ridge Regression. It
supports input X that has more than one column. Single column input
X will be supported after SVD algorithm is added in an upcoming version.
Parameters
----------
alpha : float (default = 1.0)
Regularization strength - must be a positive float. Larger values
specify stronger regularization. Array input will be supported later.
solver : {'eig'}
Eig uses a eigendecomposition of the covariance matrix, and is much
faster.
Other solvers will be supported in the future.
fit_intercept : boolean (default = True)
If True, Ridge adds an additional term c to correct for the global
mean of y, modeling the response as "x * beta + c".
If False, the model expects that you have centered the data.
normalize : boolean (default = False)
If True, the predictors in X will be normalized by dividing by it's L2
norm.
If False, no scaling will be done.
Attributes
----------
coef_ : array, shape (n_features)
The estimated coefficients for the linear regression model.
intercept_ : array
The independent term. If `fit_intercept` is False, will be 0.
"""
def __init__(self, *, client=None, verbose=False, **kwargs):
super().__init__(client=client, verbose=verbose, **kwargs)
self.coef_ = None
self.intercept_ = None
self._model_fit = False
self._consec_call = 0
def fit(self, X, y):
"""
Fit the model with X and y.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Features for regression
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
Labels (outcome values)
"""
models = self._fit(model_func=Ridge._create_model, data=(X, y))
self._set_internal_model(models[0])
return self
def predict(self, X, delayed=True):
"""
Make predictions for X and returns a dask collection.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
Returns
-------
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
"""
return self._predict(X, delayed=delayed)
def get_param_names(self):
return list(self.kwargs.keys())
@staticmethod
@mnmg_import
def _create_model(sessionId, datatype, **kwargs):
from cuml.linear_model.ridge_mg import RidgeMG
handle = get_raft_comm_state(sessionId, get_worker())["handle"]
return RidgeMG(handle=handle, output_type=datatype, **kwargs)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/linear_model/lasso.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.solvers import CD
from cuml.dask.common.base import BaseEstimator
class Lasso(BaseEstimator):
"""
Lasso extends LinearRegression by providing L1 regularization on the
coefficients when predicting response y with a linear combination of the
predictors in X. It can zero some of the coefficients for feature
selection and improves the conditioning of the problem.
cuML's Lasso an array-like object or cuDF DataFrame and
uses coordinate descent to fit a linear model.
Parameters
----------
alpha : float (default = 1.0)
Constant that multiplies the L1 term.
alpha = 0 is equivalent to an ordinary least square, solved by the
LinearRegression class.
For numerical reasons, using alpha = 0 with the Lasso class is not
advised.
Given this, you should use the LinearRegression class.
fit_intercept : boolean (default = True)
If True, Lasso tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
normalize : boolean (default = False)
If True, the predictors in X will be normalized by dividing by it's L2
norm.
If False, no scaling will be done.
max_iter : int (default = 1000)
The maximum number of iterations
tol : float (default = 1e-3)
The tolerance for the optimization: if the updates are smaller than
tol, the optimization code checks the dual gap for optimality and
continues until it is smaller than tol.
selection : {'cyclic', 'random'} (default='cyclic')
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default.
This (setting to 'random') often leads to significantly faster
convergence especially when tol is higher than 1e-4.
Attributes
----------
coef_ : array, shape (n_features)
The estimated coefficients for the linear regression model.
intercept_ : array
The independent term. If `fit_intercept` is False, will be 0.
For additional docs, see `scikitlearn's Lasso
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html>`_.
"""
def __init__(self, *, client=None, **kwargs):
super().__init__(client=client, **kwargs)
kwargs["shuffle"] = False
if "selection" in kwargs:
if kwargs["selection"] == "random":
kwargs["shuffle"] = True
del kwargs["selection"]
self.solver = CD(client=client, **kwargs)
def fit(self, X, y):
"""
Fit the model with X and y.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Dense matrix (floats or doubles) of shape (n_samples, n_features).
y : Dask cuDF DataFrame or CuPy backed Dask Array
Dense matrix (floats or doubles) of shape (n_samples, n_features).
"""
self.solver.fit(X, y)
return self
def predict(self, X, delayed=True):
"""
Predicts the y for X.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Dense matrix (floats or doubles) of shape (n_samples, n_features).
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
Returns
-------
y : Dask cuDF DataFrame or CuPy backed Dask Array
Dense matrix (floats or doubles) of shape (n_samples, n_features).
"""
return self.solver.predict(X, delayed=delayed)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/linear_model/__init__.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.import_utils import has_dask
import warnings
if has_dask():
from cuml.dask.linear_model.linear_regression import LinearRegression
from cuml.dask.linear_model.ridge import Ridge
from cuml.dask.linear_model.lasso import Lasso
from cuml.dask.linear_model.elastic_net import ElasticNet
from cuml.dask.linear_model.logistic_regression import LogisticRegression
else:
warnings.warn(
"Dask not found. All Dask-based multi-GPU operation is disabled."
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/linear_model/elastic_net.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.solvers import CD
from cuml.dask.common.base import BaseEstimator
class ElasticNet(BaseEstimator):
"""
ElasticNet extends LinearRegression with combined L1 and L2 regularizations
on the coefficients when predicting response y with a linear combination of
the predictors in X. It can reduce the variance of the predictors, force
some coefficients to be small, and improves the conditioning of the
problem.
cuML's ElasticNet an array-like object or cuDF DataFrame, uses coordinate
descent to fit a linear model.
Parameters
----------
alpha : float (default = 1.0)
Constant that multiplies the L1 term.
alpha = 0 is equivalent to an ordinary least square, solved by the
LinearRegression object.
For numerical reasons, using alpha = 0 with the Lasso object is not
advised.
Given this, you should use the LinearRegression object.
l1_ratio: float (default = 0.5)
The ElasticNet mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty. For l1_ratio = 1 it is
an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
fit_intercept : boolean (default = True)
If True, Lasso tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
normalize : boolean (default = False)
If True, the predictors in X will be normalized by dividing by it's L2
norm.
If False, no scaling will be done.
max_iter : int (default = 1000)
The maximum number of iterations
tol : float (default = 1e-3)
The tolerance for the optimization: if the updates are smaller than
tol, the optimization code checks the dual gap for optimality and
continues until it is smaller than tol.
selection : {'cyclic', 'random'} (default='cyclic')
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default.
This (setting to 'random') often leads to significantly faster
convergence especially when tol is higher than 1e-4.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
coef_ : array, shape (n_features)
The estimated coefficients for the linear regression model.
intercept_ : array
The independent term. If `fit_intercept` is False, will be 0.
For additional docs, see `scikitlearn's ElasticNet
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html>`_.
"""
def __init__(self, *, client=None, **kwargs):
super().__init__(client=client, **kwargs)
kwargs["shuffle"] = False
if "selection" in kwargs:
if kwargs["selection"] == "random":
kwargs["shuffle"] = True
del kwargs["selection"]
self.solver = CD(client=client, **kwargs)
def fit(self, X, y):
"""
Fit the model with X and y.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Dense matrix (floats or doubles) of shape (n_samples, n_features).
y : Dask cuDF DataFrame or CuPy backed Dask Array
Dense matrix (floats or doubles) of shape (n_samples, n_features).
"""
self.solver.fit(X, y)
return self
def predict(self, X, delayed=True):
"""
Predicts the y for X.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Dense matrix (floats or doubles) of shape (n_samples, n_features).
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
Returns
-------
y : Dask cuDF DataFrame or CuPy backed Dask Array
Dense matrix (floats or doubles) of shape (n_samples, n_features).
"""
return self.solver.predict(X)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/linear_model/logistic_regression.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import mnmg_import
from cuml.dask.common.base import SyncFitMixinLinearModel
from cuml.dask.linear_model import LinearRegression
from raft_dask.common.comms import get_raft_comm_state
from dask.distributed import get_worker
from cuml.dask.common import parts_to_ranks
from cuml.dask.common.input_utils import DistributedDataHandler, concatenate
from raft_dask.common.comms import Comms
from cuml.dask.common.utils import wait_and_raise_from_futures
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
class LogisticRegression(LinearRegression):
"""
LogisticRegression is a linear model that is used to model probability of
occurrence of certain events, for example probability of success or fail of
an event.
cuML's dask Logistic Regression (multi-node multi-gpu) expects dask cuDF
DataFrame and provides an algorithms, L-BFGS, to fit the logistic model. It
currently supports single class, l2 regularization, and sigmoid loss.
Note that, just like in Scikit-learn, the bias will not be regularized.
Examples
--------
.. code-block:: python
>>> from dask_cuda import LocalCUDACluster
>>> from dask.distributed import Client
>>> import dask_cudf
>>> import cudf
>>> import numpy as np
>>> from cuml.dask.linear_model import LogisticRegression
>>> cluster = LocalCUDACluster(CUDA_VISIBLE_DEVICES="0,1")
>>> client = Client(cluster)
>>> X = cudf.DataFrame()
>>> X['col1'] = np.array([1,1,2,2], dtype = np.float32)
>>> X['col2'] = np.array([1,2,2,3], dtype = np.float32)
>>> y = cudf.Series(np.array([0.0, 0.0, 1.0, 1.0], dtype=np.float32))
>>> X_ddf = dask_cudf.from_cudf(X, npartitions=2)
>>> y_ddf = dask_cudf.from_cudf(y, npartitions=2)
>>> reg = LogisticRegression()
>>> reg.fit(X_ddf, y_ddf)
LogisticRegression()
>>> print(reg.coef_)
0 1
0 0.69861 0.570058
>>> print(reg.intercept_)
0 -2.188068
dtype: float32
>>> X_new = cudf.DataFrame()
>>> X_new['col1'] = np.array([1,5], dtype = np.float32)
>>> X_new['col2'] = np.array([2,5], dtype = np.float32)
>>> X_new_ddf = dask_cudf.from_cudf(X_new, npartitions=2)
>>> preds = reg.predict(X_new_ddf)
>>> print(preds.compute())
0 0.0
1 1.0
dtype: float32
Parameters
----------
tol : float (default = 1e-4)
Tolerance for stopping criteria.
The exact stopping conditions depend on the L-BFGS solver.
Check the solver's documentation for more details:
* :class:`Quasi-Newton (L-BFGS)<cuml.QN>`
C : float (default = 1.0)
Inverse of regularization strength; must be a positive float.
fit_intercept : boolean (default = True)
If True, the model tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
max_iter : int (default = 1000)
Maximum number of iterations taken for the solvers to converge.
linesearch_max_iter : int (default = 50)
Max number of linesearch iterations per outer iteration used in the
solver.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
coef_: dev array, dim (n_classes, n_features) or (n_classes, n_features+1)
The estimated coefficients for the linear regression model.
intercept_: device array (n_classes, 1)
The independent term. If `fit_intercept` is False, will be 0.
Notes
-----
cuML's LogisticRegression uses a different solver that the equivalent
Scikit-learn, except when there is no penalty and `solver=lbfgs` is
used in Scikit-learn. This can cause (smaller) differences in the
coefficients and predictions of the model, similar to
using different solvers in Scikit-learn.
For additional information, see `Scikit-learn's LogisticRegression
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html>`_.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def fit(self, X, y):
"""
Fit the model with X and y.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Features for regression
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
Labels (outcome values)
"""
models = self._fit(
model_func=LogisticRegression._create_model, data=(X, y)
)
self._set_internal_model(models[0])
return self
@staticmethod
@mnmg_import
def _create_model(sessionId, datatype, **kwargs):
from cuml.linear_model.logistic_regression_mg import (
LogisticRegressionMG,
)
handle = get_raft_comm_state(sessionId, get_worker())["handle"]
return LogisticRegressionMG(handle=handle, **kwargs)
@staticmethod
def _func_fit(f, data, n_rows, n_cols, partsToSizes, rank):
inp_X = concatenate([X for X, _ in data])
inp_y = concatenate([y for _, y in data])
n_ranks = max([p[0] for p in partsToSizes]) + 1
aggregated_partsToSizes = [[i, 0] for i in range(n_ranks)]
for p in partsToSizes:
aggregated_partsToSizes[p[0]][1] += p[1]
return f.fit(
[(inp_X, inp_y)], n_rows, n_cols, aggregated_partsToSizes, rank
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/ensemble/randomforestclassifier.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask
from dask.distributed import default_client
from cuml.dask.ensemble.base import BaseRandomForestModel
from cuml.dask.common.base import (
DelayedPredictionMixin,
DelayedPredictionProbaMixin,
)
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.ensemble import RandomForestClassifier as cuRFC
from cuml.dask.common.base import BaseEstimator
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
class RandomForestClassifier(
BaseRandomForestModel,
DelayedPredictionMixin,
DelayedPredictionProbaMixin,
BaseEstimator,
):
"""
Experimental API implementing a multi-GPU Random Forest classifier
model which fits multiple decision tree classifiers in an
ensemble. This uses Dask to partition data over multiple GPUs
(possibly on different nodes).
Currently, this API makes the following assumptions:
* The set of Dask workers used between instantiation, fit, \
and predict are all consistent
* Training data comes in the form of cuDF dataframes or Dask Arrays \
distributed so that each worker has at least one partition.
* The get_summary_text and get_detailed_text functions provides the \
text representation of the forest on the worker.
Future versions of the API will support more flexible data
distribution and additional input types.
The distributed algorithm uses an *embarrassingly-parallel*
approach. For a forest with `N` trees being built on `w` workers, each
worker simply builds `N/w` trees on the data it has available
locally. In many cases, partitioning the data so that each worker
builds trees on a subset of the total dataset works well, but
it generally requires the data to be well-shuffled in advance.
Alternatively, callers can replicate all of the data across
workers so that ``rf.fit`` receives `w` partitions, each containing the
same data. This would produce results approximately identical to
single-GPU fitting.
Please check the single-GPU implementation of Random Forest
classifier for more information about the underlying algorithm.
Parameters
----------
n_estimators : int (default = 100)
total number of trees in the forest (not per-worker)
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
split_criterion : int or string (default = ``0`` (``'gini'``))
The criterion used to split nodes.\n
* ``0`` or ``'gini'`` for gini impurity
* ``1`` or ``'entropy'`` for information gain (entropy)
* ``2`` or ``'mse'`` for mean squared error
* ``4`` or ``'poisson'`` for poisson half deviance
* ``5`` or ``'gamma'`` for gamma half deviance
* ``6`` or ``'inverse_gaussian'`` for inverse gaussian deviance
``2``, ``'mse'``, ``4``, ``'poisson'``, ``5``, ``'gamma'``, ``6``,
``'inverse_gaussian'`` not valid for classification
bootstrap : boolean (default = True)
Control bootstrapping.\n
* If ``True``, each tree in the forest is built on a bootstrapped
sample with replacement.
* If ``False``, the whole dataset is used to build each tree.
max_samples : float (default = 1.0)
Ratio of dataset rows used while fitting each tree.
max_depth : int (default = 16)
Maximum tree depth. Must be greater than 0.
Unlimited depth (i.e, until leaves are pure)
is not supported.\n
.. note:: This default differs from scikit-learn's
random forest, which defaults to unlimited depth.
max_leaves : int (default = -1)
Maximum leaf nodes per tree. Soft constraint. Unlimited, If ``-1``.
max_features : float (default = 'auto')
Ratio of number of features (columns) to consider
per node split.\n
* If type ``int`` then ``max_features`` is the absolute count of
features to be used.
* If type ``float`` then ``max_features`` is a fraction.
* If ``'auto'`` then ``max_features=n_features = 1.0``.
* If ``'sqrt'`` then ``max_features=1/sqrt(n_features)``.
* If ``'log2'`` then ``max_features=log2(n_features)/n_features``.
* If ``None``, then ``max_features = 1.0``.
n_bins : int (default = 128)
Maximum number of bins used by the split algorithm per feature.
min_samples_leaf : int or float (default = 1)
The minimum number of samples (rows) in each leaf node.\n
* If type ``int``, then ``min_samples_leaf`` represents the minimum
number.
* If ``float``, then ``min_samples_leaf`` represents a fraction
and ``ceil(min_samples_leaf * n_rows)`` is the minimum number of
samples for each leaf node.
min_samples_split : int or float (default = 2)
The minimum number of samples required to split an internal
node.\n
* If type ``int``, then ``min_samples_split`` represents the minimum
number.
* If type ``float``, then ``min_samples_split`` represents a fraction
and ``ceil(min_samples_split * n_rows)`` is the minimum number of
samples for each split.
n_streams : int (default = 4 )
Number of parallel streams used for forest building
workers : optional, list of strings
Dask addresses of workers to use for computation.
If None, all available Dask workers will be used.
random_state : int (default = None)
Seed for the random number generator. Unseeded by default.
ignore_empty_partitions: Boolean (default = False)
Specify behavior when a worker does not hold any data
while splitting. When True, it returns the results from workers
with data (the number of trained estimators will be less than
n_estimators) When False, throws a RuntimeError.
This is an experimental parameter, and may be removed
in the future.
Examples
--------
For usage examples, please see the RAPIDS notebooks repository:
https://github.com/rapidsai/cuml/blob/main/notebooks/random_forest_mnmg_demo.ipynb
"""
def __init__(
self,
*,
workers=None,
client=None,
verbose=False,
n_estimators=100,
random_state=None,
ignore_empty_partitions=False,
**kwargs,
):
super().__init__(client=client, verbose=verbose, **kwargs)
self._create_model(
model_func=RandomForestClassifier._construct_rf,
client=client,
workers=workers,
n_estimators=n_estimators,
base_seed=random_state,
ignore_empty_partitions=ignore_empty_partitions,
**kwargs,
)
@staticmethod
def _construct_rf(n_estimators, random_state, **kwargs):
return cuRFC(
n_estimators=n_estimators, random_state=random_state, **kwargs
)
@staticmethod
def _predict_model_on_cpu(model, X, convert_dtype):
return model._predict_model_on_cpu(X, convert_dtype=convert_dtype)
def get_summary_text(self):
"""
Obtain the text summary of the random forest model
"""
return self._get_summary_text()
def get_detailed_text(self):
"""
Obtain the detailed information for the random forest model, as text
"""
return self._get_detailed_text()
def get_json(self):
"""
Export the Random Forest model as a JSON string
"""
return self._get_json()
def fit(self, X, y, convert_dtype=False, broadcast_data=False):
"""
Fit the input data with a Random Forest classifier
IMPORTANT: X is expected to be partitioned with at least one partition
on each Dask worker being used by the forest (self.workers).
If a worker has multiple data partitions, they will be concatenated
before fitting, which will lead to additional memory usage. To minimize
memory consumption, ensure that each worker has exactly one partition.
When persisting data, you can use
`cuml.dask.common.utils.persist_across_workers` to simplify this:
.. code-block:: python
X_dask_cudf = dask_cudf.from_cudf(X_cudf, npartitions=n_workers)
y_dask_cudf = dask_cudf.from_cudf(y_cudf, npartitions=n_workers)
X_dask_cudf, y_dask_cudf = persist_across_workers(dask_client,
[X_dask_cudf,
y_dask_cudf])
This is equivalent to calling `persist` with the data and workers:
.. code-block:: python
X_dask_cudf, y_dask_cudf = dask_client.persist([X_dask_cudf,
y_dask_cudf],
workers={
X_dask_cudf:workers,
y_dask_cudf:workers
})
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
Labels of training examples.
**y must be partitioned the same way as X**
convert_dtype : bool, optional (default = False)
When set to True, the fit method will, when necessary, convert
y to be of dtype int32. This will increase memory used for
the method.
broadcast_data : bool, optional (default = False)
When set to True, the whole dataset is broadcasted
to train the workers, otherwise each worker
is trained on its partition
"""
self.unique_classes = cp.asarray(
y.unique().compute().sort_values(ignore_index=True)
)
self.num_classes = len(self.unique_classes)
self._set_internal_model(None)
self._fit(
model=self.rfs,
dataset=(X, y),
convert_dtype=convert_dtype,
broadcast_data=broadcast_data,
)
return self
def predict(
self,
X,
algo="auto",
threshold=0.5,
convert_dtype=True,
predict_model="GPU",
fil_sparse_format="auto",
delayed=True,
broadcast_data=False,
):
"""
Predicts the labels for X.
GPU-based prediction in a multi-node, multi-GPU context works
by sending the sub-forest from each worker to the client,
concatenating these into one forest with the full
`n_estimators` set of trees, and sending this combined forest to
the workers, which will each infer on their local set of data.
Within the worker, this uses the cuML Forest Inference Library
(cuml.fil) for high-throughput prediction.
This allows inference to scale to large datasets, but the forest
transmission incurs overheads for very large trees. For inference
on small datasets, this overhead may dominate prediction time.
The 'CPU' fallback method works with sub-forests in-place,
broadcasting the datasets to all workers and combining predictions
via a voting method at the end. This method is slower
on a per-row basis but may be faster for problems with many trees
and few rows.
In the 0.15 cuML release, inference will be updated with much
faster tree transfer. Preliminary builds with this updated approach
will be available from rapids.ai
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
algo : string (default = 'auto')
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be
more coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. (Default)
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
threshold : float (default = 0.5)
Threshold used for classification. Optional and required only
while performing the predict operation on the GPU, that is for,
predict_model='GPU'.
convert_dtype : bool, optional (default = True)
When set to True, the predict method will, when necessary, convert
the input to the data type which was used to train the model. This
will increase memory used for the method.
predict_model : String (default = 'GPU')
'GPU' to predict using the GPU, 'CPU' otherwise. The GPU can only
be used if the model was trained on float32 data and `X` is float32
or convert_dtype is set to True.
fil_sparse_format : boolean or string (default = auto)
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one. It is not required while using
predict_model='CPU'.
broadcast_data : bool (default = False)
If broadcast_data=False, the trees are merged in a single model
before the workers perform inference on their share of the
prediction workload. When broadcast_data=True, trees aren't merged.
Instead each of the workers infer the whole prediction work
from trees at disposal. The results are reduced on the client.
May be advantageous when the model is larger than the data used
for inference.
Returns
-------
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
"""
if predict_model == "CPU":
preds = self.predict_model_on_cpu(X=X, convert_dtype=convert_dtype)
else:
if broadcast_data:
preds = self.partial_inference(
X,
algo=algo,
convert_dtype=convert_dtype,
fil_sparse_format=fil_sparse_format,
delayed=delayed,
)
else:
preds = self._predict_using_fil(
X,
algo=algo,
threshold=threshold,
convert_dtype=convert_dtype,
fil_sparse_format=fil_sparse_format,
delayed=delayed,
)
return preds
def partial_inference(self, X, delayed, **kwargs):
partial_infs = self._partial_inference(
X=X, op_type="classification", delayed=delayed, **kwargs
)
def reduce(partial_infs, workers_weights, unique_classes):
votes = dask.array.average(
partial_infs, axis=1, weights=workers_weights
)
merged_votes = votes.compute()
pred_class_indices = merged_votes.argmax(axis=1)
pred_class = unique_classes[pred_class_indices]
return pred_class
datatype = (
"daskArray" if isinstance(X, dask.array.Array) else "daskDataframe"
)
return self.apply_reduction(reduce, partial_infs, datatype, delayed)
def predict_using_fil(self, X, delayed, **kwargs):
if self._get_internal_model() is None:
self._set_internal_model(self._concat_treelite_models())
return self._predict_using_fil(X=X, delayed=delayed, **kwargs)
"""
TODO : Update function names used for CPU predict.
Cuml issue #1854 has been created to track this.
"""
def predict_model_on_cpu(self, X, convert_dtype=True):
"""
Predicts the labels for X.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
convert_dtype : bool, optional (default = True)
When set to True, the predict method will, when necessary, convert
the input to the data type which was used to train the model. This
will increase memory used for the method.
Returns
-------
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
"""
c = default_client()
workers = self.workers
X_Scattered = c.scatter(X)
futures = list()
for n, w in enumerate(workers):
futures.append(
c.submit(
RandomForestClassifier._predict_model_on_cpu,
self.rfs[w],
X_Scattered,
convert_dtype,
workers=[w],
)
)
rslts = self.client.gather(futures, errors="raise")
indexes = np.zeros(len(futures), dtype=np.int32)
pred = list()
for i in range(len(X)):
classes = dict()
max_class = -1
max_val = 0
for d in range(len(rslts)):
for j in range(self.n_estimators_per_worker[d]):
sub_ind = indexes[d] + j
cls = rslts[d][sub_ind]
if cls not in classes.keys():
classes[cls] = 1
else:
classes[cls] = classes[cls] + 1
if classes[cls] > max_val:
max_val = classes[cls]
max_class = cls
indexes[d] = indexes[d] + self.n_estimators_per_worker[d]
pred.append(max_class)
return pred
def predict_proba(self, X, delayed=True, **kwargs):
"""
Predicts the probability of each class for X.
See documentation of `predict` for notes on performance.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
predict_model : String (default = 'GPU')
'GPU' to predict using the GPU, 'CPU' otherwise. The 'GPU' can only
be used if the model was trained on float32 data and `X` is float32
or convert_dtype is set to True. Also the 'GPU' should only be
used for classification problems.
algo : string (default = 'auto')
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be
more coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. (Default)
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
threshold : float (default = 0.5)
Threshold used for classification. Optional and required only
while performing the predict operation on the GPU.
convert_dtype : bool, optional (default = True)
When set to True, the predict method will, when necessary, convert
the input to the data type which was used to train the model. This
will increase memory used for the method.
fil_sparse_format : boolean or string (default = auto)
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
Returns
-------
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_classes)
"""
if self._get_internal_model() is None:
self._set_internal_model(self._concat_treelite_models())
data = DistributedDataHandler.create(X, client=self.client)
return self._predict_proba(
X, delayed, output_collection_type=data.datatype, **kwargs
)
def get_params(self, deep=True):
"""
Returns the value of all parameters
required to configure this estimator as a dictionary.
Parameters
----------
deep : boolean (default = True)
"""
return self._get_params(deep)
def set_params(self, **params):
"""
Sets the value of parameters required to
configure this estimator, it functions similar to
the sklearn set_params.
Parameters
----------
params : dict of new params.
"""
return self._set_params(**params)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/ensemble/__init__.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.import_utils import has_dask
import warnings
if has_dask():
from cuml.dask.ensemble.randomforestclassifier import (
RandomForestClassifier,
)
from cuml.dask.ensemble.randomforestregressor import RandomForestRegressor
else:
warnings.warn(
"Dask not found. All Dask-based multi-GPU operation is disabled."
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/ensemble/base.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.fil.fil import TreeliteModel
from cuml.dask.common.utils import get_client, wait_and_raise_from_futures
from cuml.dask.common.input_utils import DistributedDataHandler, concatenate
from dask.distributed import Future
from collections.abc import Iterable
from cuml import using_output_type
import warnings
from cuml.internals.safe_imports import gpu_only_import
import dask
import json
import math
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
class BaseRandomForestModel(object):
"""
BaseRandomForestModel defines functions used in both Random Forest
Classifier and Regressor for Multi Node and Multi GPU models. The common
functions are defined here and called from the main Random Forest Multi
Node Multi GPU APIs. The functions defined here are not meant to be used
as a part of the public API.
"""
def _create_model(
self,
model_func,
client,
workers,
n_estimators,
base_seed,
ignore_empty_partitions,
**kwargs,
):
self.client = get_client(client)
if workers is None:
# Default to all workers
workers = list(self.client.scheduler_info()["workers"].keys())
self.workers = workers
self._set_internal_model(None)
self.active_workers = list()
self.ignore_empty_partitions = ignore_empty_partitions
self.n_estimators = n_estimators
self.n_estimators_per_worker = self._estimators_per_worker(
n_estimators
)
if base_seed is None:
base_seed = 0
seeds = [base_seed]
for i in range(1, len(self.n_estimators_per_worker)):
sd = self.n_estimators_per_worker[i - 1] + seeds[i - 1]
seeds.append(sd)
self.rfs = {
worker: self.client.submit(
model_func,
n_estimators=self.n_estimators_per_worker[n],
random_state=seeds[n],
**kwargs,
pure=False,
workers=[worker],
)
for n, worker in enumerate(self.workers)
}
wait_and_raise_from_futures(list(self.rfs.values()))
def _estimators_per_worker(self, n_estimators):
n_workers = len(self.workers)
if n_estimators < n_workers:
raise ValueError(
"n_estimators cannot be lower than number of dask workers."
)
n_est_per_worker = math.floor(n_estimators / n_workers)
n_estimators_per_worker = [n_est_per_worker for i in range(n_workers)]
remaining_est = n_estimators - (n_est_per_worker * n_workers)
for i in range(remaining_est):
n_estimators_per_worker[i] = n_estimators_per_worker[i] + 1
return n_estimators_per_worker
def _fit(self, model, dataset, convert_dtype, broadcast_data):
data = DistributedDataHandler.create(dataset, client=self.client)
self.active_workers = data.workers
self.datatype = data.datatype
if self.datatype == "cudf":
has_float64 = (dataset[0].dtypes == np.float64).any()
else:
has_float64 = dataset[0].dtype == np.float64
if has_float64:
raise TypeError("To use Dask RF data should have dtype float32.")
labels = self.client.persist(dataset[1])
if self.datatype == "cudf":
self.num_classes = len(labels.unique())
else:
self.num_classes = len(dask.array.unique(labels).compute())
combined_data = (
list(map(lambda x: x[1], data.gpu_futures))
if broadcast_data
else None
)
futures = list()
for idx, (worker, worker_data) in enumerate(
data.worker_to_parts.items()
):
futures.append(
self.client.submit(
_func_fit,
model[worker],
combined_data if broadcast_data else worker_data,
convert_dtype,
workers=[worker],
pure=False,
)
)
self.n_active_estimators_per_worker = []
for worker in data.worker_to_parts.keys():
n = self.workers.index(worker)
n_est = self.n_estimators_per_worker[n]
self.n_active_estimators_per_worker.append(n_est)
if len(self.workers) > len(self.active_workers):
if self.ignore_empty_partitions:
curent_estimators = (
self.n_estimators
/ len(self.workers)
* len(self.active_workers)
)
warn_text = (
f"Data was not split among all workers "
f"using only {self.active_workers} workers to fit."
f"This will only train {curent_estimators}"
f" estimators instead of the requested "
f"{self.n_estimators}"
)
warnings.warn(warn_text)
else:
raise ValueError(
"Data was not split among all workers. "
"Re-run the code or "
"use ignore_empty_partitions=True"
" while creating model"
)
wait_and_raise_from_futures(futures)
return self
def _concat_treelite_models(self):
"""
Convert the cuML Random Forest model present in different workers to
the treelite format and then concatenate the different treelite models
to create a single model. The concatenated model is then converted to
bytes format.
"""
model_serialized_futures = list()
for w in self.active_workers:
model_serialized_futures.append(
dask.delayed(_get_serialized_model)(self.rfs[w])
)
mod_bytes = self.client.compute(model_serialized_futures, sync=True)
last_worker = w
model = self.rfs[last_worker].result()
all_tl_mod_handles = [
model._tl_handle_from_bytes(indiv_worker_model_bytes)
for indiv_worker_model_bytes in mod_bytes
]
model._concatenate_treelite_handle(all_tl_mod_handles)
for tl_handle in all_tl_mod_handles:
TreeliteModel.free_treelite_model(tl_handle)
return model
def _partial_inference(self, X, op_type, delayed, **kwargs):
data = DistributedDataHandler.create(X, client=self.client)
combined_data = list(map(lambda x: x[1], data.gpu_futures))
func = (
_func_predict_partial
if op_type == "regression"
else _func_predict_proba_partial
)
partial_infs = list()
for worker in self.active_workers:
partial_infs.append(
self.client.submit(
func,
self.rfs[worker],
combined_data,
**kwargs,
workers=[worker],
pure=False,
)
)
partial_infs = dask.delayed(dask.array.concatenate)(
partial_infs, axis=1, allow_unknown_chunksizes=True
)
return partial_infs
def _predict_using_fil(self, X, delayed, **kwargs):
if self._get_internal_model() is None:
self._set_internal_model(self._concat_treelite_models())
data = DistributedDataHandler.create(X, client=self.client)
if self._get_internal_model() is None:
self._set_internal_model(self._concat_treelite_models())
return self._predict(
X, delayed=delayed, output_collection_type=data.datatype, **kwargs
)
def _get_params(self, deep):
model_params = list()
for idx, worker in enumerate(self.workers):
model_params.append(
self.client.submit(
_func_get_params, self.rfs[worker], deep, workers=[worker]
)
)
params_of_each_model = self.client.gather(model_params, errors="raise")
return params_of_each_model
def _set_params(self, **params):
model_params = list()
for idx, worker in enumerate(self.workers):
model_params.append(
self.client.submit(
_func_set_params,
self.rfs[worker],
**params,
workers=[worker],
)
)
wait_and_raise_from_futures(model_params)
return self
def _get_summary_text(self):
"""
Obtain the summary of the forest as text
"""
futures = list()
for n, w in enumerate(self.workers):
futures.append(
self.client.submit(
_get_summary_text_func,
self.rfs[w],
workers=[w],
)
)
all_dump = self.client.gather(futures, errors="raise")
return "\n".join(all_dump)
def _get_detailed_text(self):
"""
Obtain the detailed information of the forest as text
"""
futures = list()
for n, w in enumerate(self.workers):
futures.append(
self.client.submit(
_get_detailed_text_func,
self.rfs[w],
workers=[w],
)
)
all_dump = self.client.gather(futures, errors="raise")
return "\n".join(all_dump)
def _get_json(self):
"""
Export the Random Forest model as a JSON string
"""
dump = list()
for n, w in enumerate(self.workers):
dump.append(
self.client.submit(
_get_json_func,
self.rfs[w],
workers=[w],
)
)
all_dump = self.client.gather(dump, errors="raise")
combined_dump = []
for e in all_dump:
obj = json.loads(e)
combined_dump.extend(obj)
return json.dumps(combined_dump)
def get_combined_model(self):
"""
Return single-GPU model for serialization.
Returns
-------
model : Trained single-GPU model or None if the model has not
yet been trained.
"""
# set internal model if it hasn't been accessed before
if self._get_internal_model() is None:
self._set_internal_model(self._concat_treelite_models())
internal_model = self._check_internal_model(self._get_internal_model())
if isinstance(self.internal_model, Iterable):
# This function needs to return a single instance of cuml.Base,
# even if the class is just a composite.
raise ValueError(
"Expected a single instance of cuml.Base "
"but got %s instead." % type(self.internal_model)
)
elif isinstance(self.internal_model, Future):
internal_model = self.internal_model.result()
return internal_model
def apply_reduction(self, reduce, partial_infs, datatype, delayed):
"""
Reduces the partial inferences to obtain the final result. The workers
didn't have the same number of trees to form their predictions. To
correct for this worker's predictions are weighted differently during
reduction.
"""
workers_weights = np.array(self.n_active_estimators_per_worker)
workers_weights = workers_weights[workers_weights != 0]
workers_weights = workers_weights / workers_weights.sum()
workers_weights = cp.array(workers_weights)
unique_classes = (
None
if not hasattr(self, "unique_classes")
else self.unique_classes
)
delayed_local_array = dask.delayed(reduce)(
partial_infs, workers_weights, unique_classes
)
delayed_res = dask.array.from_delayed(
delayed_local_array, shape=(np.nan, np.nan), dtype=np.float32
)
if delayed:
return delayed_res
else:
return delayed_res.persist()
def _func_fit(model, input_data, convert_dtype):
X = concatenate([item[0] for item in input_data])
y = concatenate([item[1] for item in input_data])
return model.fit(X, y, convert_dtype)
def _func_predict_partial(model, input_data, **kwargs):
"""
Whole dataset inference with part of the model (trees at disposal locally).
Transfer dataset instead of model. Interesting when model is larger
than dataset.
"""
X = concatenate(input_data)
with using_output_type("cupy"):
prediction = model.predict(X, **kwargs)
return cp.expand_dims(prediction, axis=1)
def _func_predict_proba_partial(model, input_data, **kwargs):
"""
Whole dataset inference with part of the model (trees at disposal locally).
Transfer dataset instead of model. Interesting when model is larger
than dataset.
"""
X = concatenate(input_data)
with using_output_type("cupy"):
prediction = model.predict_proba(X, **kwargs)
return cp.expand_dims(prediction, axis=1)
def _get_summary_text_func(model):
return model.get_summary_text()
def _get_detailed_text_func(model):
return model.get_detailed_text()
def _get_json_func(model):
return model.get_json()
def _func_get_params(model, deep):
return model.get_params(deep)
def _func_set_params(model, **params):
return model.set_params(**params)
def _get_serialized_model(model):
return model._get_serialized_model()
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/ensemble/randomforestregressor.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.ensemble import RandomForestRegressor as cuRFR
from cuml.dask.ensemble.base import BaseRandomForestModel
from cuml.dask.common.base import BaseEstimator
import dask
class RandomForestRegressor(
BaseRandomForestModel, DelayedPredictionMixin, BaseEstimator
):
"""
Experimental API implementing a multi-GPU Random Forest classifier
model which fits multiple decision tree classifiers in an
ensemble. This uses Dask to partition data over multiple GPUs
(possibly on different nodes).
Currently, this API makes the following assumptions:
* The set of Dask workers used between instantiation, fit,
and predict are all consistent
* Training data comes in the form of cuDF dataframes or Dask Arrays
distributed so that each worker has at least one partition.
* The get_summary_text and get_detailed_text functions provides the \
text representation of the forest on the worker.
Future versions of the API will support more flexible data
distribution and additional input types. User-facing APIs are
expected to change in upcoming versions.
The distributed algorithm uses an *embarrassingly-parallel*
approach. For a forest with `N` trees being built on `w` workers, each
worker simply builds `N/w` trees on the data it has available
locally. In many cases, partitioning the data so that each worker
builds trees on a subset of the total dataset works well, but
it generally requires the data to be well-shuffled in advance.
Alternatively, callers can replicate all of the data across
workers so that ``rf.fit`` receives `w` partitions, each containing the
same data. This would produce results approximately identical to
single-GPU fitting.
Please check the single-GPU implementation of Random Forest
classifier for more information about the underlying algorithm.
Parameters
----------
n_estimators : int (default = 100)
total number of trees in the forest (not per-worker)
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
split_criterion : int or string (default = ``2`` (``'mse'``))
The criterion used to split nodes.\n
* ``0`` or ``'gini'`` for gini impurity
* ``1`` or ``'entropy'`` for information gain (entropy)
* ``2`` or ``'mse'`` for mean squared error
* ``4`` or ``'poisson'`` for poisson half deviance
* ``5`` or ``'gamma'`` for gamma half deviance
* ``6`` or ``'inverse_gaussian'`` for inverse gaussian deviance
``0``, ``'gini'``, ``1``, ``'entropy'`` not valid for regression
bootstrap : boolean (default = True)
Control bootstrapping.\n
* If ``True``, each tree in the forest is built on a bootstrapped
sample with replacement.
* If ``False``, the whole dataset is used to build each tree.
max_samples : float (default = 1.0)
Ratio of dataset rows used while fitting each tree.
max_depth : int (default = 16)
Maximum tree depth. Must be greater than 0.
Unlimited depth (i.e, until leaves are pure)
is not supported.\n
.. note:: This default differs from scikit-learn's
random forest, which defaults to unlimited depth.
max_leaves : int (default = -1)
Maximum leaf nodes per tree. Soft constraint. Unlimited, If ``-1``.
max_features : float (default = 'auto')
Ratio of number of features (columns) to consider
per node split.\n
* If type ``int`` then ``max_features`` is the absolute count of
features to be used.
* If type ``float`` then ``max_features`` is a fraction.
* If ``'auto'`` then ``max_features=n_features = 1.0``.
* If ``'sqrt'`` then ``max_features=1/sqrt(n_features)``.
* If ``'log2'`` then ``max_features=log2(n_features)/n_features``.
* If ``None``, then ``max_features = 1.0``.
n_bins : int (default = 128)
Maximum number of bins used by the split algorithm per feature.
min_samples_leaf : int or float (default = 1)
The minimum number of samples (rows) in each leaf node.\n
* If type ``int``, then ``min_samples_leaf`` represents the minimum
number.
* If ``float``, then ``min_samples_leaf`` represents a fraction and
``ceil(min_samples_leaf * n_rows)`` is the minimum number of
samples for each leaf node.
min_samples_split : int or float (default = 2)
The minimum number of samples required to split an internal node.\n
* If type ``int``, then ``min_samples_split`` represents the minimum
number.
* If type ``float``, then ``min_samples_split`` represents a fraction
and ``ceil(min_samples_split * n_rows)`` is the minimum number of
samples for each split.
accuracy_metric : string (default = 'r2')
Decides the metric used to evaluate the performance of the model.
In the 0.16 release, the default scoring metric was changed
from mean squared error to r-squared.\n
* for r-squared : ``'r2'``
* for median of abs error : ``'median_ae'``
* for mean of abs error : ``'mean_ae'``
* for mean square error' : ``'mse'``
n_streams : int (default = 4 )
Number of parallel streams used for forest building
workers : optional, list of strings
Dask addresses of workers to use for computation.
If None, all available Dask workers will be used.
random_state : int (default = None)
Seed for the random number generator. Unseeded by default.
ignore_empty_partitions: Boolean (default = False)
Specify behavior when a worker does not hold any data
while splitting. When True, it returns the results from workers
with data (the number of trained estimators will be less than
n_estimators) When False, throws a RuntimeError.
This is an experimental parameter, and may be removed
in the future.
"""
def __init__(
self,
*,
workers=None,
client=None,
verbose=False,
n_estimators=100,
random_state=None,
ignore_empty_partitions=False,
**kwargs,
):
super().__init__(client=client, verbose=verbose, **kwargs)
self._create_model(
model_func=RandomForestRegressor._construct_rf,
client=client,
workers=workers,
n_estimators=n_estimators,
base_seed=random_state,
ignore_empty_partitions=ignore_empty_partitions,
**kwargs,
)
@staticmethod
def _construct_rf(n_estimators, random_state, **kwargs):
return cuRFR(
n_estimators=n_estimators, random_state=random_state, **kwargs
)
@staticmethod
def _predict_model_on_cpu(model, X, convert_dtype):
return model._predict_model_on_cpu(X, convert_dtype=convert_dtype)
def get_summary_text(self):
"""
Obtain the text summary of the random forest model
"""
return self._get_summary_text()
def get_detailed_text(self):
"""
Obtain the detailed information for the random forest model, as text
"""
return self._get_detailed_text()
def get_json(self):
"""
Export the Random Forest model as a JSON string
"""
return self._get_json()
def fit(self, X, y, convert_dtype=False, broadcast_data=False):
"""
Fit the input data with a Random Forest regression model
IMPORTANT: X is expected to be partitioned with at least one partition
on each Dask worker being used by the forest (self.workers).
When persisting data, you can use
`cuml.dask.common.utils.persist_across_workers` to simplify this:
.. code-block:: python
X_dask_cudf = dask_cudf.from_cudf(X_cudf, npartitions=n_workers)
y_dask_cudf = dask_cudf.from_cudf(y_cudf, npartitions=n_workers)
X_dask_cudf, y_dask_cudf = persist_across_workers(dask_client,
[X_dask_cudf,
y_dask_cudf])
This is equivalent to calling `persist` with the data and workers):
.. code-block:: python
X_dask_cudf, y_dask_cudf = dask_client.persist([X_dask_cudf,
y_dask_cudf],
workers={
X_dask_cudf:workers,
y_dask_cudf:workers
})
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
Labels of training examples.
**y must be partitioned the same way as X**
convert_dtype : bool, optional (default = False)
When set to True, the fit method will, when necessary, convert
y to be the same data type as X if they differ. This will increase
memory used for the method.
broadcast_data : bool, optional (default = False)
When set to True, the whole dataset is broadcasted
to train the workers, otherwise each worker
is trained on its partition
"""
self.internal_model = None
self._fit(
model=self.rfs,
dataset=(X, y),
convert_dtype=convert_dtype,
broadcast_data=broadcast_data,
)
return self
def predict(
self,
X,
predict_model="GPU",
algo="auto",
convert_dtype=True,
fil_sparse_format="auto",
delayed=True,
broadcast_data=False,
):
"""
Predicts the regressor outputs for X.
GPU-based prediction in a multi-node, multi-GPU context works
by sending the sub-forest from each worker to the client,
concatenating these into one forest with the full
`n_estimators` set of trees, and sending this combined forest to
the workers, which will each infer on their local set of data.
This allows inference to scale to large datasets, but the forest
transmission incurs overheads for very large trees. For inference
on small datasets, this overhead may dominate prediction time.
Within the worker, this uses the cuML Forest Inference Library
(cuml.fil) for high-throughput prediction.
The 'CPU' fallback method works with sub-forests in-place,
broadcasting the datasets to all workers and combining predictions
via an averaging method at the end. This method is slower
on a per-row basis but may be faster for problems with many trees
and few rows.
In the 0.15 cuML release, inference will be updated with much
faster tree transfer.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
algo : string (default = 'auto')
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be
more coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. (Default)
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
convert_dtype : bool, optional (default = True)
When set to True, the predict method will, when necessary, convert
the input to the data type which was used to train the model. This
will increase memory used for the method.
predict_model : String (default = 'GPU')
'GPU' to predict using the GPU, 'CPU' otherwise. The GPU can only
be used if the model was trained on float32 data and `X` is float32
or convert_dtype is set to True.
fil_sparse_format : boolean or string (default = auto)
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
broadcast_data : bool (default = False)
If broadcast_data=False, the trees are merged in a single model
before the workers perform inference on their share of the
prediction workload. When broadcast_data=True, trees aren't merged.
Instead each of the workers infer the whole prediction work
from trees at disposal. The results are reduced on the client.
May be advantageous when the model is larger than the data used
for inference.
Returns
-------
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
"""
if predict_model == "CPU":
preds = self.predict_model_on_cpu(X, convert_dtype=convert_dtype)
else:
if broadcast_data:
preds = self.partial_inference(
X,
algo=algo,
convert_dtype=convert_dtype,
fil_sparse_format=fil_sparse_format,
delayed=delayed,
)
else:
preds = self._predict_using_fil(
X,
algo=algo,
convert_dtype=convert_dtype,
fil_sparse_format=fil_sparse_format,
delayed=delayed,
)
return preds
def partial_inference(self, X, delayed, **kwargs):
partial_infs = self._partial_inference(
X=X, op_type="regression", delayed=delayed, **kwargs
)
def reduce(partial_infs, workers_weights, unique_classes=None):
regressions = dask.array.average(
partial_infs, axis=1, weights=workers_weights
)
merged_regressions = regressions.compute()
return merged_regressions
datatype = (
"daskArray" if isinstance(X, dask.array.Array) else "daskDataframe"
)
return self.apply_reduction(reduce, partial_infs, datatype, delayed)
def predict_using_fil(self, X, delayed, **kwargs):
if self._get_internal_model() is None:
self._set_internal_model(self._concat_treelite_models())
return self._predict_using_fil(X=X, delayed=delayed, **kwargs)
"""
TODO : Update function names used for CPU predict.
Cuml issue #1854 has been created to track this.
"""
def predict_model_on_cpu(self, X, convert_dtype):
workers = self.workers
X_Scattered = self.client.scatter(X)
futures = list()
for n, w in enumerate(workers):
futures.append(
self.client.submit(
RandomForestRegressor._predict_model_on_cpu,
self.rfs[w],
X_Scattered,
convert_dtype,
workers=[w],
)
)
rslts = self.client.gather(futures, errors="raise")
pred = list()
for i in range(len(X)):
pred_per_worker = 0.0
for d in range(len(rslts)):
pred_per_worker = pred_per_worker + rslts[d][i]
pred.append(pred_per_worker / len(rslts))
return pred
def get_params(self, deep=True):
"""
Returns the value of all parameters
required to configure this estimator as a dictionary.
Parameters
----------
deep : boolean (default = True)
"""
return self._get_params(deep)
def set_params(self, **params):
"""
Sets the value of parameters required to
configure this estimator, it functions similar to
the sklearn set_params.
Parameters
----------
params : dict of new params.
"""
return self._set_params(**params)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/solvers/cd.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import mnmg_import
from cuml.dask.common.base import SyncFitMixinLinearModel
from raft_dask.common.comms import get_raft_comm_state
from dask.distributed import get_worker
class CD(BaseEstimator, SyncFitMixinLinearModel, DelayedPredictionMixin):
"""
Model-Parallel Multi-GPU Linear Regression Model.
"""
def __init__(self, *, client=None, **kwargs):
"""
Initializes the linear regression class.
"""
super().__init__(client=client, **kwargs)
self._model_fit = False
self._consec_call = 0
def fit(self, X, y):
"""
Fit the model with X and y.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Features for regression
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
Labels (outcome values)
"""
models = self._fit(model_func=CD._create_model, data=(X, y))
self._set_internal_model(list(models.values())[0])
return self
def predict(self, X, delayed=True):
"""
Make predictions for X and returns a dask collection.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
Returns
-------
y : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, 1)
"""
return self._predict(X, delayed=delayed)
@staticmethod
@mnmg_import
def _create_model(sessionId, datatype, **kwargs):
from cuml.solvers.cd_mg import CDMG
handle = get_raft_comm_state(sessionId, get_worker())["handle"]
return CDMG(handle=handle, output_type=datatype, **kwargs)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/solvers/__init__.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.import_utils import has_dask
import warnings
if has_dask():
from cuml.dask.solvers.cd import CD # NOQA
else:
warnings.warn(
"Dask not found. All Dask-based multi-GPU operation is disabled."
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/neighbors/kneighbors_classifier.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.dask.common.input_utils import to_output
from cuml.dask.common import parts_to_ranks
from cuml.dask.common import flatten_grouped_results
from cuml.dask.common.utils import raise_mg_import_exception
from cuml.dask.common.utils import wait_and_raise_from_futures
from raft_dask.common.comms import get_raft_comm_state
from cuml.dask.neighbors import NearestNeighbors
from dask.dataframe import Series as DaskSeries
from dask.distributed import get_worker
import dask.array as da
from uuid import uuid1
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
cudf = gpu_only_import("cudf")
class KNeighborsClassifier(NearestNeighbors):
"""
Multi-node Multi-GPU K-Nearest Neighbors Classifier Model.
K-Nearest Neighbors Classifier is an instance-based learning technique,
that keeps training samples around for prediction, rather than trying
to learn a generalizable set of model parameters.
Parameters
----------
n_neighbors : int (default=5)
Default number of neighbors to query
batch_size: int (optional, default 2000000)
Maximum number of query rows processed at once. This parameter can
greatly affect the throughput of the algorithm. The optimal setting
of this value will vary for different layouts and index to query
ratios, but it will require `batch_size * n_features * 4` bytes of
additional memory on each worker hosting index partitions.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
"""
def __init__(
self, *, client=None, streams_per_handle=0, verbose=False, **kwargs
):
super().__init__(client=client, verbose=verbose, **kwargs)
self.streams_per_handle = streams_per_handle
def fit(self, X, y):
"""
Fit a multi-node multi-GPU K-Nearest Neighbors Classifier index
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Index data.
Acceptable formats: dask CuPy/NumPy/Numba Array
y : array-like (device or host) shape = (n_samples, n_features)
Index labels data.
Acceptable formats: dask CuPy/NumPy/Numba Array
Returns
-------
self : KNeighborsClassifier model
"""
if not isinstance(X._meta, (np.ndarray, pd.DataFrame, cudf.DataFrame)):
raise ValueError("This chunk type is not supported")
self.data_handler = DistributedDataHandler.create(
data=[X, y], client=self.client
)
# uniq_labels: set of possible labels for each labels column
# n_unique: number of possible labels for each labels column
uniq_labels = []
if self.data_handler.datatype == "cupy":
if y.ndim == 1:
uniq_labels.append(da.unique(y))
else:
n_targets = y.shape[1]
for i in range(n_targets):
uniq_labels.append(da.unique(y[:, i]))
else:
if isinstance(y, DaskSeries):
uniq_labels.append(y.unique())
else:
n_targets = len(y.columns)
for i in range(n_targets):
uniq_labels.append(y.iloc[:, i].unique())
uniq_labels = da.compute(uniq_labels)[0]
if hasattr(uniq_labels[0], "values_host"): # for cuDF Series
uniq_labels = list(map(lambda x: x.values_host, uniq_labels))
elif hasattr(uniq_labels[0], "values"): # for pandas Series
uniq_labels = list(map(lambda x: x.values, uniq_labels))
self.uniq_labels = np.sort(np.array(uniq_labels))
self.n_unique = list(map(lambda x: len(x), self.uniq_labels))
return self
@staticmethod
def _func_create_model(sessionId, **kwargs):
try:
from cuml.neighbors.kneighbors_classifier_mg import (
KNeighborsClassifierMG as cumlKNN,
)
except ImportError:
raise_mg_import_exception()
handle = get_raft_comm_state(sessionId, get_worker())["handle"]
return cumlKNN(handle=handle, **kwargs)
@staticmethod
def _func_predict(
model,
index,
index_parts_to_ranks,
index_nrows,
query,
query_parts_to_ranks,
query_nrows,
uniq_labels,
n_unique,
ncols,
rank,
convert_dtype,
probas_only,
):
if probas_only:
return model.predict_proba(
index,
index_parts_to_ranks,
index_nrows,
query,
query_parts_to_ranks,
query_nrows,
uniq_labels,
n_unique,
ncols,
rank,
convert_dtype,
)
else:
return model.predict(
index,
index_parts_to_ranks,
index_nrows,
query,
query_parts_to_ranks,
query_nrows,
uniq_labels,
n_unique,
ncols,
rank,
convert_dtype,
)
def predict(self, X, convert_dtype=True):
"""
Predict labels for a query from previously stored index
and index labels.
The process is done in a multi-node multi-GPU fashion.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Query data.
Acceptable formats: dask cuDF, dask CuPy/NumPy/Numba Array
convert_dtype : bool, optional (default = True)
When set to True, the predict method will automatically
convert the data to the right formats.
Returns
-------
predictions : Dask futures or Dask CuPy Arrays
"""
query_handler = DistributedDataHandler.create(
data=X, client=self.client
)
self.datatype = query_handler.datatype
comms = KNeighborsClassifier._build_comms(
self.data_handler, query_handler, self.streams_per_handle
)
worker_info = comms.worker_info(comms.worker_addresses)
"""
Build inputs and outputs
"""
self.data_handler.calculate_parts_to_sizes(comms=comms)
query_handler.calculate_parts_to_sizes(comms=comms)
data_parts_to_ranks, data_nrows = parts_to_ranks(
self.client, worker_info, self.data_handler.gpu_futures
)
query_parts_to_ranks, query_nrows = parts_to_ranks(
self.client, worker_info, query_handler.gpu_futures
)
"""
Each Dask worker creates a single model
"""
key = uuid1()
models = dict(
[
(
worker,
self.client.submit(
self._func_create_model,
comms.sessionId,
**self.kwargs,
workers=[worker],
key="%s-%s" % (key, idx),
),
)
for idx, worker in enumerate(comms.worker_addresses)
]
)
"""
Invoke knn_classify on Dask workers to perform distributed query
"""
key = uuid1()
knn_clf_res = dict(
[
(
worker_info[worker]["rank"],
self.client.submit(
self._func_predict,
models[worker],
self.data_handler.worker_to_parts[worker]
if worker in self.data_handler.workers
else [],
data_parts_to_ranks,
data_nrows,
query_handler.worker_to_parts[worker]
if worker in query_handler.workers
else [],
query_parts_to_ranks,
query_nrows,
self.uniq_labels,
self.n_unique,
X.shape[1],
worker_info[worker]["rank"],
convert_dtype,
False,
key="%s-%s" % (key, idx),
workers=[worker],
),
)
for idx, worker in enumerate(comms.worker_addresses)
]
)
wait_and_raise_from_futures(list(knn_clf_res.values()))
"""
Gather resulting partitions and return result
"""
out_futures = flatten_grouped_results(
self.client, query_parts_to_ranks, knn_clf_res
)
comms.destroy()
return to_output(out_futures, self.datatype).squeeze()
def score(self, X, y, convert_dtype=True):
"""
Predict labels for a query from previously stored index
and index labels.
The process is done in a multi-node multi-GPU fashion.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Query test data.
Acceptable formats: dask CuPy/NumPy/Numba Array
y : array-like (device or host) shape = (n_samples, n_features)
Labels test data.
Acceptable formats: dask CuPy/NumPy/Numba Array
Returns
-------
score
"""
y_pred = self.predict(X, convert_dtype=convert_dtype)
if not isinstance(y_pred, da.Array):
y_pred = y_pred.to_dask_array(lengths=True)
if not isinstance(y, da.Array):
y = y.to_dask_array(lengths=True)
y_true = y.squeeze()
matched = y_pred == y_true
mean_match = matched.mean()
return float(mean_match.compute())
def predict_proba(self, X, convert_dtype=True):
"""
Provide score by comparing predictions and ground truth.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Query data.
Acceptable formats: dask cuDF, dask CuPy/NumPy/Numba Array
convert_dtype : bool, optional (default = True)
When set to True, the predict method will automatically
convert the data to the right formats.
Returns
-------
probabilities : Dask futures or Dask CuPy Arrays
"""
query_handler = DistributedDataHandler.create(
data=X, client=self.client
)
self.datatype = query_handler.datatype
comms = KNeighborsClassifier._build_comms(
self.data_handler, query_handler, self.streams_per_handle
)
worker_info = comms.worker_info(comms.worker_addresses)
"""
Build inputs and outputs
"""
self.data_handler.calculate_parts_to_sizes(comms=comms)
query_handler.calculate_parts_to_sizes(comms=comms)
data_parts_to_ranks, data_nrows = parts_to_ranks(
self.client, worker_info, self.data_handler.gpu_futures
)
query_parts_to_ranks, query_nrows = parts_to_ranks(
self.client, worker_info, query_handler.gpu_futures
)
"""
Each Dask worker creates a single model
"""
key = uuid1()
models = dict(
[
(
worker,
self.client.submit(
self._func_create_model,
comms.sessionId,
**self.kwargs,
workers=[worker],
key="%s-%s" % (key, idx),
),
)
for idx, worker in enumerate(comms.worker_addresses)
]
)
"""
Invoke knn_classify on Dask workers to perform distributed query
"""
key = uuid1()
knn_prob_res = dict(
[
(
worker_info[worker]["rank"],
self.client.submit(
self._func_predict,
models[worker],
self.data_handler.worker_to_parts[worker]
if worker in self.data_handler.workers
else [],
data_parts_to_ranks,
data_nrows,
query_handler.worker_to_parts[worker]
if worker in query_handler.workers
else [],
query_parts_to_ranks,
query_nrows,
self.uniq_labels,
self.n_unique,
X.shape[1],
worker_info[worker]["rank"],
convert_dtype,
True,
key="%s-%s" % (key, idx),
workers=[worker],
),
)
for idx, worker in enumerate(comms.worker_addresses)
]
)
wait_and_raise_from_futures(list(knn_prob_res.values()))
n_outputs = len(self.n_unique)
def _custom_getter(o):
def func_get(f, idx):
return f[o][idx]
return func_get
"""
Gather resulting partitions and return result
"""
outputs = []
for o in range(n_outputs):
futures = flatten_grouped_results(
self.client,
query_parts_to_ranks,
knn_prob_res,
getter_func=_custom_getter(o),
)
outputs.append(to_output(futures, self.datatype))
comms.destroy()
if n_outputs == 1:
return da.concatenate(outputs, axis=0)
return tuple(outputs)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/neighbors/kneighbors_regressor.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.dask.common.input_utils import to_output
from cuml.dask.common import parts_to_ranks
from cuml.dask.common import flatten_grouped_results
from cuml.dask.common.utils import raise_mg_import_exception
from cuml.dask.common.utils import wait_and_raise_from_futures
from raft_dask.common.comms import get_raft_comm_state
from cuml.dask.neighbors import NearestNeighbors
from dask.distributed import get_worker
import dask.array as da
from uuid import uuid1
class KNeighborsRegressor(NearestNeighbors):
"""
Multi-node Multi-GPU K-Nearest Neighbors Regressor Model.
K-Nearest Neighbors Regressor is an instance-based learning technique,
that keeps training samples around for prediction, rather than trying
to learn a generalizable set of model parameters.
Parameters
----------
n_neighbors : int (default=5)
Default number of neighbors to query
batch_size: int (optional, default 2000000)
Maximum number of query rows processed at once. This parameter can
greatly affect the throughput of the algorithm. The optimal setting
of this value will vary for different layouts and index to query
ratios, but it will require `batch_size * n_features * 4` bytes of
additional memory on each worker hosting index partitions.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
"""
def __init__(
self, *, client=None, streams_per_handle=0, verbose=False, **kwargs
):
super().__init__(client=client, verbose=verbose, **kwargs)
self.streams_per_handle = streams_per_handle
def fit(self, X, y):
"""
Fit a multi-node multi-GPU K-Nearest Neighbors Regressor index
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Index data.
Acceptable formats: dask CuPy/NumPy/Numba Array
y : array-like (device or host) shape = (n_samples, n_features)
Index output data.
Acceptable formats: dask CuPy/NumPy/Numba Array
Returns
-------
self : KNeighborsRegressor model
"""
self.data_handler = DistributedDataHandler.create(
data=[X, y], client=self.client
)
self.n_outputs = y.shape[1] if y.ndim != 1 else 1
return self
@staticmethod
def _func_create_model(sessionId, **kwargs):
try:
from cuml.neighbors.kneighbors_regressor_mg import (
KNeighborsRegressorMG as cumlKNN,
)
except ImportError:
raise_mg_import_exception()
handle = get_raft_comm_state(sessionId, get_worker())["handle"]
return cumlKNN(handle=handle, **kwargs)
@staticmethod
def _func_predict(
model,
index,
index_parts_to_ranks,
index_nrows,
query,
query_parts_to_ranks,
query_nrows,
ncols,
rank,
n_output,
convert_dtype,
):
return model.predict(
index,
index_parts_to_ranks,
index_nrows,
query,
query_parts_to_ranks,
query_nrows,
ncols,
rank,
n_output,
convert_dtype,
)
def predict(self, X, convert_dtype=True):
"""
Predict outputs for a query from previously stored index
and outputs.
The process is done in a multi-node multi-GPU fashion.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Query data.
Acceptable formats: dask cuDF, dask CuPy/NumPy/Numba Array
convert_dtype : bool, optional (default = True)
When set to True, the predict method will automatically
convert the data to the right formats.
Returns
-------
predictions : Dask futures or Dask CuPy Arrays
"""
query_handler = DistributedDataHandler.create(
data=X, client=self.client
)
self.datatype = query_handler.datatype
comms = KNeighborsRegressor._build_comms(
self.data_handler, query_handler, self.streams_per_handle
)
worker_info = comms.worker_info(comms.worker_addresses)
"""
Build inputs and outputs
"""
self.data_handler.calculate_parts_to_sizes(comms=comms)
query_handler.calculate_parts_to_sizes(comms=comms)
data_parts_to_ranks, data_nrows = parts_to_ranks(
self.client, worker_info, self.data_handler.gpu_futures
)
query_parts_to_ranks, query_nrows = parts_to_ranks(
self.client, worker_info, query_handler.gpu_futures
)
"""
Each Dask worker creates a single model
"""
key = uuid1()
models = dict(
[
(
worker,
self.client.submit(
self._func_create_model,
comms.sessionId,
**self.kwargs,
workers=[worker],
key="%s-%s" % (key, idx),
),
)
for idx, worker in enumerate(comms.worker_addresses)
]
)
"""
Invoke knn_classify on Dask workers to perform distributed query
"""
key = uuid1()
knn_reg_res = dict(
[
(
worker_info[worker]["rank"],
self.client.submit(
self._func_predict,
models[worker],
self.data_handler.worker_to_parts[worker]
if worker in self.data_handler.workers
else [],
data_parts_to_ranks,
data_nrows,
query_handler.worker_to_parts[worker]
if worker in query_handler.workers
else [],
query_parts_to_ranks,
query_nrows,
X.shape[1],
self.n_outputs,
worker_info[worker]["rank"],
convert_dtype,
key="%s-%s" % (key, idx),
workers=[worker],
),
)
for idx, worker in enumerate(comms.worker_addresses)
]
)
wait_and_raise_from_futures(list(knn_reg_res.values()))
"""
Gather resulting partitions and return result
"""
out_futures = flatten_grouped_results(
self.client, query_parts_to_ranks, knn_reg_res
)
comms.destroy()
return to_output(out_futures, self.datatype).squeeze()
def score(self, X, y):
"""
Provide score by comparing predictions and ground truth.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Query test data.
Acceptable formats: dask CuPy/NumPy/Numba Array
y : array-like (device or host) shape = (n_samples, n_features)
Outputs test data.
Acceptable formats: dask CuPy/NumPy/Numba Array
Returns
-------
score
"""
y_pred = self.predict(X, convert_dtype=True)
if not isinstance(y_pred, da.Array):
y_pred = y_pred.to_dask_array(lengths=True)
if not isinstance(y, da.Array):
y = y.to_dask_array(lengths=True)
y_true = y.squeeze()
y_mean = y_true.mean(axis=0)
residual_sss = ((y_true - y_pred) ** 2).sum(axis=0, dtype="float64")
total_sss = ((y_true - y_mean) ** 2).sum(axis=0, dtype="float64")
r2_score = da.mean(1 - (residual_sss / total_sss))
return r2_score.compute()
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/neighbors/__init__.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.import_utils import has_dask
import warnings
if has_dask():
from cuml.dask.neighbors.nearest_neighbors import NearestNeighbors
from cuml.dask.neighbors.kneighbors_classifier import KNeighborsClassifier
from cuml.dask.neighbors.kneighbors_regressor import KNeighborsRegressor
else:
warnings.warn(
"Dask not found. All Dask-based multi-GPU operation is disabled."
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/neighbors/nearest_neighbors.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common import parts_to_ranks
from cuml.dask.common.utils import wait_and_raise_from_futures
from cuml.dask.common import flatten_grouped_results
from cuml.dask.common import raise_mg_import_exception
from cuml.dask.common.base import BaseEstimator
from dask.distributed import get_worker
from raft_dask.common.comms import get_raft_comm_state
from raft_dask.common.comms import Comms
from cuml.dask.common.input_utils import to_output
from cuml.dask.common.input_utils import DistributedDataHandler
from uuid import uuid1
class NearestNeighbors(BaseEstimator):
"""
Multi-node Multi-GPU NearestNeighbors Model.
Parameters
----------
n_neighbors : int (default=5)
Default number of neighbors to query
batch_size: int (optional, default 2000000)
Maximum number of query rows processed at once. This parameter can
greatly affect the throughput of the algorithm. The optimal setting
of this value will vary for different layouts index to query ratios,
but it will require `batch_size * n_features * 4` bytes of additional
memory on each worker hosting index partitions.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
"""
def __init__(self, *, client=None, streams_per_handle=0, **kwargs):
super().__init__(client=client, **kwargs)
self.streams_per_handle = streams_per_handle
def fit(self, X):
"""
Fit a multi-node multi-GPU Nearest Neighbors index
Parameters
----------
X : dask_cudf.Dataframe
Returns
-------
self: NearestNeighbors model
"""
self.X_handler = DistributedDataHandler.create(
data=X, client=self.client
)
self.datatype = self.X_handler.datatype
self.n_cols = X.shape[1]
# Brute force nearest neighbors does not set an internal model so
# calls to get_combined_model() will just return None.
# Approximate methods that build specialized indices, such as the
# FAISS product quantized methods, will be combined into an internal
# model.
return self
@staticmethod
def _func_create_model(sessionId, **kwargs):
try:
from cuml.neighbors.nearest_neighbors_mg import (
NearestNeighborsMG as cumlNN,
)
except ImportError:
raise_mg_import_exception()
handle = get_raft_comm_state(sessionId, get_worker())["handle"]
return cumlNN(handle=handle, **kwargs)
@staticmethod
def _func_kneighbors(
model,
index,
index_parts_to_ranks,
index_nrows,
query,
query_parts_to_ranks,
query_nrows,
ncols,
rank,
n_neighbors,
convert_dtype,
):
return model.kneighbors(
index,
index_parts_to_ranks,
index_nrows,
query,
query_parts_to_ranks,
query_nrows,
ncols,
rank,
n_neighbors,
convert_dtype,
)
@staticmethod
def _build_comms(index_handler, query_handler, streams_per_handle):
# Communicator clique needs to include the union of workers hosting
# query and index partitions
workers = set(index_handler.workers)
workers.update(query_handler.workers)
comms = Comms(comms_p2p=True, streams_per_handle=streams_per_handle)
comms.init(workers=workers)
return comms
def get_neighbors(self, n_neighbors):
"""
Returns the default n_neighbors, initialized from the constructor,
if n_neighbors is None.
Parameters
----------
n_neighbors : int
Number of neighbors
Returns
-------
n_neighbors: int
Default n_neighbors if parameter n_neighbors is none
"""
if n_neighbors is None:
if (
"n_neighbors" in self.kwargs
and self.kwargs["n_neighbors"] is not None
):
n_neighbors = self.kwargs["n_neighbors"]
else:
try:
from cuml.neighbors.nearest_neighbors_mg import (
NearestNeighborsMG as cumlNN,
)
except ImportError:
raise_mg_import_exception()
n_neighbors = cumlNN().n_neighbors
return n_neighbors
def _create_models(self, comms):
"""
Each Dask worker creates a single model
"""
key = uuid1()
nn_models = dict(
[
(
worker,
self.client.submit(
NearestNeighbors._func_create_model,
comms.sessionId,
**self.kwargs,
workers=[worker],
key="%s-%s" % (key, idx),
),
)
for idx, worker in enumerate(comms.worker_addresses)
]
)
return nn_models
def _query_models(
self, n_neighbors, comms, nn_models, index_handler, query_handler
):
worker_info = comms.worker_info(comms.worker_addresses)
"""
Build inputs and outputs
"""
index_handler.calculate_parts_to_sizes(comms=comms)
query_handler.calculate_parts_to_sizes(comms=comms)
idx_parts_to_ranks, _ = parts_to_ranks(
self.client, worker_info, index_handler.gpu_futures
)
query_parts_to_ranks, _ = parts_to_ranks(
self.client, worker_info, query_handler.gpu_futures
)
"""
Invoke kneighbors on Dask workers to perform distributed query
"""
key = uuid1()
nn_fit = dict(
[
(
worker_info[worker]["rank"],
self.client.submit(
NearestNeighbors._func_kneighbors,
nn_models[worker],
index_handler.worker_to_parts[worker]
if worker in index_handler.workers
else [],
idx_parts_to_ranks,
index_handler.total_rows,
query_handler.worker_to_parts[worker]
if worker in query_handler.workers
else [],
query_parts_to_ranks,
query_handler.total_rows,
self.n_cols,
worker_info[worker]["rank"],
n_neighbors,
False,
key="%s-%s" % (key, idx),
workers=[worker],
),
)
for idx, worker in enumerate(comms.worker_addresses)
]
)
wait_and_raise_from_futures(list(nn_fit.values()))
def _custom_getter(o):
def func_get(f, idx):
return f[o][idx]
return func_get
"""
Gather resulting partitions and return dask_cudfs
"""
out_d_futures = flatten_grouped_results(
self.client,
query_parts_to_ranks,
nn_fit,
getter_func=_custom_getter(0),
)
out_i_futures = flatten_grouped_results(
self.client,
query_parts_to_ranks,
nn_fit,
getter_func=_custom_getter(1),
)
return nn_fit, out_d_futures, out_i_futures
def kneighbors(
self,
X=None,
n_neighbors=None,
return_distance=True,
_return_futures=False,
):
"""
Query the distributed nearest neighbors index
Parameters
----------
X : dask_cudf.Dataframe
Vectors to query. If not provided, neighbors of each indexed point
are returned.
n_neighbors : int
Number of neighbors to query for each row in X. If not provided,
the n_neighbors on the model are used.
return_distance : boolean (default=True)
If false, only indices are returned
Returns
-------
ret : tuple (dask_cudf.DataFrame, dask_cudf.DataFrame)
First dask-cuDF DataFrame contains distances, second contains the
indices.
"""
n_neighbors = self.get_neighbors(n_neighbors)
query_handler = (
self.X_handler
if X is None
else DistributedDataHandler.create(data=X, client=self.client)
)
if query_handler is None:
raise ValueError(
"Model needs to be trained using fit() "
"before calling kneighbors()"
)
"""
Create communicator clique
"""
comms = NearestNeighbors._build_comms(
self.X_handler, query_handler, self.streams_per_handle
)
"""
Initialize models on workers
"""
nn_models = self._create_models(comms)
"""
Perform model query
"""
nn_fit, out_d_futures, out_i_futures = self._query_models(
n_neighbors, comms, nn_models, self.X_handler, query_handler
)
comms.destroy()
if _return_futures:
ret = nn_fit, out_i_futures if not return_distance else (
nn_fit,
out_d_futures,
out_i_futures,
)
else:
ret = (
to_output(out_i_futures, self.datatype)
if not return_distance
else (
to_output(out_d_futures, self.datatype),
to_output(out_i_futures, self.datatype),
)
)
return ret
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask/extended | rapidsai_public_repos/cuml/python/cuml/dask/extended/linear_model/__init__.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.import_utils import has_daskglm
import warnings
if has_daskglm():
from cuml.dask.extended.linear_model.logistic_regression import (
LogisticRegression,
)
else:
warnings.warn(
"Dask-glm not found. Multi-GPU logistic regression is disabled."
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask/extended | rapidsai_public_repos/cuml/python/cuml/dask/extended/linear_model/logistic_regression.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dask.utils import is_dataframe_like, is_series_like, is_arraylike
from cuml.internals.safe_imports import cpu_only_import
from cuml.dask.common.base import BaseEstimator
from cuml.common import with_cupy_rmm
from cuml.internals.import_utils import has_daskglm
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
cudf = gpu_only_import("cudf")
class LogisticRegression(BaseEstimator):
"""
Distributed Logistic Regression for Binary classification.
Parameters
----------
fit_intercept: boolean (default = True)
If True, the model tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
solver : 'admm'
Solver to use. Only admm is supported currently.
penalty : {'l1', 'l2', 'elastic_net'} (default = 'l2')
Regularization technique for the solver.
C: float (default = 1.0)
Inverse of regularization strength; must be a positive float.
max_iter: int (default = 100)
Maximum number of iterations taken for the solvers to converge.
verbose : int or boolean (default=False)
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
coef_: device array (n_features, 1)
The estimated coefficients for the logistic regression model.
intercept_: device array (1,)
The independent term. If `fit_intercept` is False, will be 0.
solver: string
Algorithm to use in the optimization process. Currently only `admm` is
supported.
Notes
------
This estimator is a wrapper class around Dask-GLM's
Logistic Regression estimator. Several methods in this wrapper class
duplicate code from Dask-GLM to create a user-friendly namespace.
"""
def __init__(
self,
*,
client=None,
fit_intercept=True,
solver="admm",
penalty="l2",
C=1.0,
max_iter=100,
verbose=False,
**kwargs,
):
super(LogisticRegression, self).__init__(
client=client, verbose=verbose, **kwargs
)
if not has_daskglm("0.2.1.dev"):
raise ImportError(
"dask-glm >= 0.2.1.dev was not found, please install it"
" to use multi-GPU logistic regression."
)
from dask_glm.estimators import (
LogisticRegression as LogisticRegressionGLM,
)
self.fit_intercept = fit_intercept
self.solver = solver
self.penalty = penalty
self.C = C
self.max_iter = max_iter
if self.penalty not in ("l2", "l1", "elastic_net"):
raise TypeError(
"Only l2, l1, and elastic_net penalties are"
" currently supported."
)
self.solver_model = LogisticRegressionGLM(
solver=self.solver,
fit_intercept=self.fit_intercept,
regularizer=self.penalty,
max_iter=self.max_iter,
lamduh=1 / self.C,
)
@with_cupy_rmm
def fit(self, X, y):
"""
Fit the model with X and y.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Features for regression
y : Dask cuDF Series or CuPy backed Dask Array (n_rows,)
Label (outcome values)
"""
X = self._input_to_dask_cupy_array(X)
y = self._input_to_dask_cupy_array(y)
self.solver_model.fit(X, y)
self._finalize_coefs()
return self
@with_cupy_rmm
def predict(self, X):
"""
Predicts the ŷ for X.
Parameters
----------
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
Returns
-------
y : Dask cuDF Series or CuPy backed Dask Array (n_rows,)
"""
return self.predict_proba(X) > 0.5
@with_cupy_rmm
def predict_proba(self, X):
from dask_glm.utils import sigmoid
X = self._input_to_dask_cupy_array(X)
return sigmoid(self.decision_function(X))
@with_cupy_rmm
def decision_function(self, X):
X = self._input_to_dask_cupy_array(X)
X_ = self._maybe_add_intercept(X)
return np.dot(X_, self._coef)
@with_cupy_rmm
def score(self, X, y):
from dask_glm.utils import accuracy_score
X = self._input_to_dask_cupy_array(X)
y = self._input_to_dask_cupy_array(y)
return accuracy_score(y, self.predict(X))
@with_cupy_rmm
def _finalize_coefs(self):
# _coef contains coefficients and (potentially) intercept
self._coef = cp.asarray(self.solver_model._coef)
if self.fit_intercept:
self.coef_ = self._coef[:-1]
self.intercept_ = self.solver_model._coef[-1]
else:
self.coef_ = self._coef
@with_cupy_rmm
def _maybe_add_intercept(self, X):
from dask_glm.utils import add_intercept
if self.fit_intercept:
return add_intercept(X)
else:
return X
@with_cupy_rmm
def _input_to_dask_cupy_array(self, X):
if (is_dataframe_like(X) or is_series_like(X)) and hasattr(X, "dask"):
if not isinstance(X._meta, (cudf.Series, cudf.DataFrame)):
raise TypeError(
"Please convert your Dask DataFrame"
" to a Dask-cuDF DataFrame using dask_cudf."
)
X = X.values
X._meta = cp.asarray(X._meta)
elif is_arraylike(X) and hasattr(X, "dask"):
if not isinstance(X._meta, cp.ndarray):
raise TypeError(
"Please convert your CPU Dask Array"
" to a GPU Dask Array using"
" arr.map_blocks(cp.asarray)."
)
else:
raise TypeError(
"Please pass a GPU backed Dask DataFrame" " or Dask Array."
)
X.compute_chunk_sizes()
return X
def get_param_names(self):
return list(self.kwargs.keys())
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/common/dask_arr_utils.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common import rmm_cupy_ary, has_scipy
from cuml.dask.common.part_utils import _extract_partitions
from dask.distributed import default_client
from cuml.dask.common.dask_df_utils import to_dask_cudf as df_to_dask_cudf
from cuml.internals.memory_utils import with_cupy_rmm
import dask.dataframe as dd
import dask
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
cudf = gpu_only_import("cudf")
def validate_dask_array(darray, client=None):
if len(darray.chunks) > 2:
raise ValueError("Input array cannot have more than two dimensions")
elif len(darray.chunks) == 2 and len(darray.chunks[1]) > 1:
raise ValueError("Input array cannot be chunked along axis 1")
def _conv_df_to_sparse(x):
cupy_ary = rmm_cupy_ary(cp.asarray, x.to_cupy(), dtype=x.dtypes[0])
return cupyx.scipy.sparse.csr_matrix(cupy_ary)
def _conv_array_to_sparse(arr):
"""
Converts an array (or cudf.DataFrame) to a sparse array
:param arr: scipy or cupy sparse matrix, cudf DataFrame,
dense numpy or cupy array
:return: cupy sparse CSR matrix
"""
if has_scipy():
from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
else:
from cuml.internals.import_utils import (
dummy_function_always_false as scipy_sparse_isspmatrix,
)
if scipy_sparse_isspmatrix(arr):
ret = cupyx.scipy.sparse.csr_matrix(arr.tocsr())
elif cupyx.scipy.sparse.isspmatrix(arr):
ret = arr
elif isinstance(arr, cudf.DataFrame):
ret = _conv_df_to_sparse(arr)
elif isinstance(arr, np.ndarray):
cupy_ary = rmm_cupy_ary(cp.asarray, arr, dtype=arr.dtype)
ret = cupyx.scipy.sparse.csr_matrix(cupy_ary)
elif isinstance(arr, cp.ndarray):
ret = cupyx.scipy.sparse.csr_matrix(arr)
else:
raise ValueError("Unexpected input type %s" % type(arr))
return ret
@with_cupy_rmm
def to_sparse_dask_array(cudf_or_array, client=None):
"""
Converts an array or cuDF to a sparse Dask array backed by sparse CuPy.
CSR matrices. Unfortunately, due to current limitations in Dask, there is
no direct path to convert a cupyx.scipy.sparse.spmatrix into a CuPy backed
dask.Array without copying to host.
NOTE: Until https://github.com/cupy/cupy/issues/2655 and
https://github.com/dask/dask/issues/5604 are implemented, compute()
will not be able to be called on a Dask.array that is backed with
sparse CuPy arrays because they lack the necessary functionality
to be stacked into a single array. The array returned from this
utility will, however, still be able to be passed into functions
that can make use of sparse CuPy-backed Dask.Array (eg. Distributed
Naive Bayes).
Relevant cuML issue: https://github.com/rapidsai/cuml/issues/1387
Parameters
----------
cudf_or_array : cuDF Dataframe, array-like sparse / dense array, or
Dask DataFrame/Array
client : dask.distributed.Client (optional) Dask client
dtype : output dtype
Returns
-------
dask_array : dask.Array backed by cupyx.scipy.sparse.csr_matrix
"""
client = default_client() if client is None else client
# Makes sure the MatDescriptor workaround for CuPy sparse arrays
# is loaded (since Dask lazy-loaded serialization in cuML is only
# executed when object from the cuML package needs serialization.
# This can go away once the MatDescriptor pickling bug is fixed
# in CuPy.
# Ref: https://github.com/cupy/cupy/issues/3061
from cuml.comm import serialize # NOQA
shape = cudf_or_array.shape
meta = cupyx.scipy.sparse.csr_matrix(rmm_cupy_ary(cp.zeros, 1))
ret = cudf_or_array
# If we have a Dask array, convert it to a Dask DataFrame
if isinstance(ret, dask.array.Array):
# At the time of developing this, using map_blocks will not work
# to convert a Dask.Array to CuPy sparse arrays underneath.
def _conv_np_to_df(x):
cupy_ary = rmm_cupy_ary(cp.asarray, x, dtype=x.dtype)
return cudf.DataFrame(cupy_ary)
parts = client.sync(_extract_partitions, ret)
futures = [
client.submit(_conv_np_to_df, part, workers=[w], pure=False)
for w, part in parts
]
ret = df_to_dask_cudf(futures)
# If we have a Dask Dataframe, use `map_partitions` to convert it
# to a Sparse Cupy-backed Dask Array. This will also convert the dense
# Dask array above to a Sparse Cupy-backed Dask Array, since we cannot
# use map_blocks on the array, but we can use `map_partitions` on the
# Dataframe.
if isinstance(ret, dask.dataframe.DataFrame):
ret = ret.map_partitions(
_conv_df_to_sparse, meta=dask.array.from_array(meta)
)
# This will also handle the input of dask.array.Array
return ret
else:
ret = _conv_array_to_sparse(ret)
# Push to worker
final_result = client.scatter(ret)
return dask.array.from_delayed(final_result, shape=shape, meta=meta)
def _get_meta(df):
ret = df.iloc[:0]
return ret
@dask.delayed
def _to_cudf(arr):
if arr.ndim == 2:
return cudf.DataFrame(arr)
elif arr.ndim == 1:
return cudf.Series(arr)
def to_dask_cudf(dask_arr, client=None):
client = default_client() if client is None else client
elms = [_to_cudf(dp) for dp in dask_arr.to_delayed().flatten()]
dfs = client.compute(elms)
meta = client.submit(_get_meta, dfs[0])
meta_local = meta.result()
return dd.from_delayed(dfs, meta=meta_local)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/common/input_utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask.dataframe as dd
from functools import reduce
from toolz import first
from dask.distributed import default_client
from dask.distributed import wait
from cuml.dask.common.part_utils import _extract_partitions
from cuml.dask.common.dask_arr_utils import validate_dask_array
from cuml.dask.common.dask_df_utils import to_dask_cudf
from cuml.dask.common.utils import get_client
from dask_cudf.core import Series as dcSeries
from dask.dataframe import Series as daskSeries
from dask.dataframe import DataFrame as daskDataFrame
from cudf import Series
from cuml.internals.safe_imports import gpu_only_import_from
from collections import OrderedDict
from cuml.internals.memory_utils import with_cupy_rmm
from collections.abc import Sequence
import dask.array as da
from cuml.internals.safe_imports import cpu_only_import
import cuml.internals.logger as logger
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
DataFrame = gpu_only_import_from("cudf", "DataFrame")
dcDataFrame = gpu_only_import_from("dask_cudf.core", "DataFrame")
class DistributedDataHandler:
"""
Class to centralize distributed data management. Functionalities include:
- Data colocation
- Worker information extraction
- GPU futures extraction,
Additional functionality can be added as needed. This class **does not**
contain the actual data, just the metadata necessary to handle it,
including common pieces of code that need to be performed to call
Dask functions.
The constructor is not meant to be used directly, but through the factory
method DistributedDataHandler.create
"""
def __init__(
self,
gpu_futures=None,
workers=None,
datatype=None,
multiple=False,
client=None,
):
self.client = get_client(client)
self.gpu_futures = gpu_futures
self.worker_to_parts = _workers_to_parts(gpu_futures)
self.workers = workers
self.datatype = datatype
self.multiple = multiple
self.worker_info = None
self.total_rows = None
self.ranks = None
self.parts_to_sizes = None
@classmethod
def get_client(cls, client=None):
return default_client() if client is None else client
""" Class methods for initialization """
@classmethod
def create(cls, data, client=None):
"""
Creates a distributed data handler instance with the given
distributed data set(s).
Parameters
----------
data : dask.array, dask.dataframe, or unbounded Sequence of
dask.array or dask.dataframe.
client : dask.distributedClient
"""
client = cls.get_client(client)
datatype, multiple = _get_datatype_from_inputs(data)
gpu_futures = client.sync(_extract_partitions, data, client)
workers = tuple(set(map(lambda x: x[0], gpu_futures)))
return DistributedDataHandler(
gpu_futures=gpu_futures,
workers=workers,
datatype=datatype,
multiple=multiple,
client=client,
)
""" Methods to calculate further attributes """
def calculate_worker_and_rank_info(self, comms):
self.worker_info = comms.worker_info(comms.worker_addresses)
self.ranks = dict()
for w, futures in self.worker_to_parts.items():
self.ranks[w] = self.worker_info[w]["rank"]
def calculate_parts_to_sizes(self, comms=None, ranks=None):
if self.worker_info is None and comms is not None:
self.calculate_worker_and_rank_info(comms)
self.total_rows = 0
self.parts_to_sizes = dict()
parts = [
(
wf[0],
self.client.submit(
_get_rows,
wf[1],
self.multiple,
workers=[wf[0]],
pure=False,
),
)
for idx, wf in enumerate(self.worker_to_parts.items())
]
sizes = self.client.compute(parts, sync=True)
for w, sizes_parts in sizes:
sizes, total = sizes_parts
self.parts_to_sizes[self.worker_info[w]["rank"]] = sizes
self.total_rows += total
def _get_datatype_from_inputs(data):
"""
Gets the datatype from a distributed data input.
Parameters
----------
data : dask.DataFrame, dask.Series, dask.Array, or
Iterable containing either.
Returns
-------
datatype : str {'cupy', 'cudf}
"""
multiple = isinstance(data, Sequence)
if isinstance(
first(data) if multiple else data,
(daskSeries, daskDataFrame, dcDataFrame, dcSeries),
):
datatype = "cudf"
else:
datatype = "cupy"
if multiple:
for d in data:
validate_dask_array(d)
else:
validate_dask_array(data)
return datatype, multiple
@with_cupy_rmm
def concatenate(objs, axis=0):
if isinstance(objs[0], DataFrame) or isinstance(objs[0], Series):
if len(objs) == 1:
return objs[0]
else:
return cudf.concat(objs)
elif isinstance(objs[0], cp.ndarray):
return cp.concatenate(objs, axis=axis)
elif isinstance(objs[0], np.ndarray):
return np.concatenate(objs, axis=axis)
# TODO: This should be delayed.
def to_output(futures, type, client=None):
if type == "cupy":
return to_dask_cupy(futures, client=client)
else:
return to_dask_cudf(futures, client=client)
def _get_meta(df):
"""
Return the metadata from a single dataframe
:param df: cudf.dataframe
:return: Row data from the first row of the dataframe
"""
ret = df[0].iloc[:0]
return ret
def _to_dask_cudf(futures, client=None):
"""
Convert a list of futures containing cudf Dataframes into a Dask.Dataframe
:param futures: list[cudf.Dataframe] list of futures containing dataframes
:param client: dask.distributed.Client Optional client to use
:return: dask.Dataframe a dask.Dataframe
"""
c = default_client() if client is None else client
# Convert a list of futures containing dfs back into a dask_cudf
dfs = [d for d in futures if d.type != type(None)] # NOQA
if logger.should_log_for(logger.level_debug):
logger.debug("to_dask_cudf dfs=%s" % str(dfs))
meta_future = c.submit(_get_meta, dfs[0], pure=False)
meta = meta_future.result()
return dd.from_delayed(dfs, meta=meta)
""" Internal methods, API subject to change """
def _workers_to_parts(futures):
"""
Builds an ordered dict mapping each worker to their list
of parts
:param futures: list of (worker, part) tuples
:return:
"""
w_to_p_map = OrderedDict()
for w, p in futures:
if w not in w_to_p_map:
w_to_p_map[w] = []
w_to_p_map[w].append(p)
return w_to_p_map
def _get_ary_meta(ary):
if isinstance(ary, (np.ndarray, cp.ndarray)):
return ary.shape, ary.dtype
elif isinstance(ary, cudf.DataFrame):
return ary.shape, first(set(ary.dtypes))
else:
raise ValueError(
"Expected dask.Dataframe " "or dask.Array, received %s" % type(ary)
)
def _get_rows(objs, multiple):
def get_obj(x):
return x[0] if multiple else x
total = list(map(lambda x: get_obj(x).shape[0], objs))
return total, reduce(lambda a, b: a + b, total)
def to_dask_cupy(futures, dtype=None, shapes=None, client=None):
wait(futures)
c = default_client() if client is None else client
meta = [c.submit(_get_ary_meta, future, pure=False) for future in futures]
objs = []
for i in range(len(futures)):
if not isinstance(futures[i].type, type(None)):
met_future = meta[i]
met = met_future.result()
obj = da.from_delayed(futures[i], shape=met[0], dtype=met[1])
objs.append(obj)
return da.concatenate(objs, axis=0)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/common/part_utils.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.utils import parse_host_port
from dask_cudf.core import Series as dcSeries
from cuml.internals.safe_imports import gpu_only_import_from
from dask.dataframe import Series as daskSeries
from dask.dataframe import DataFrame as daskDataFrame
from dask.array.core import Array as daskArray
from toolz import first
from dask.distributed import futures_of, default_client, wait
from collections.abc import Sequence
from tornado import gen
from functools import reduce
from collections import OrderedDict
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
dcDataFrame = gpu_only_import_from("dask_cudf.core", "DataFrame")
def hosts_to_parts(futures):
"""
Builds an ordered dict mapping each host to their list
of parts
:param futures: list of (worker, part) tuples
:return:
"""
w_to_p_map = OrderedDict()
for w, p in futures:
host, port = parse_host_port(w)
host_key = (host, port)
if host_key not in w_to_p_map:
w_to_p_map[host_key] = []
w_to_p_map[host_key].append(p)
return w_to_p_map
def workers_to_parts(futures):
"""
Builds an ordered dict mapping each worker to their list
of parts
:param futures: list of (worker, part) tuples
:return:
"""
w_to_p_map = OrderedDict()
for w, p in futures:
if w not in w_to_p_map:
w_to_p_map[w] = []
w_to_p_map[w].append(p)
return w_to_p_map
def _func_get_rows(df):
if isinstance(df, tuple):
return df[0].shape[0]
else:
return df.shape[0]
def parts_to_ranks(client, worker_info, part_futures):
"""
Builds a list of (rank, size) tuples of partitions
:param worker_info: dict of {worker, {"rank": rank }}. Note: \
This usually comes from the underlying communicator
:param part_futures: list of (worker, future) tuples
:return: [(part, size)] in the same order of part_futures
"""
futures = [
(
worker_info[wf[0]]["rank"],
client.submit(_func_get_rows, wf[1], workers=[wf[0]], pure=False),
)
for idx, wf in enumerate(part_futures)
]
sizes = client.compute(list(map(lambda x: x[1], futures)), sync=True)
total = reduce(lambda a, b: a + b, sizes)
return [(futures[idx][0], size) for idx, size in enumerate(sizes)], total
def _default_part_getter(f, idx):
return f[idx]
def flatten_grouped_results(
client, gpu_futures, worker_results_map, getter_func=_default_part_getter
):
"""
This function is useful when a series of partitions have been grouped by
the worker responsible for the data and the resulting partitions are
stored on each worker as a list. This happens when a communications
implementation is used which does not allow multiple ranks per device, so
the partitions need to be grouped on the ranks to be processed concurrently
using different streams.
:param client: Dask client
:param gpu_futures: [(future, part)] worker to part list of tuples
:param worker_results_map: { rank: future } where future is a list
of data partitions on a Dask worker
:param getter_func: a function that takes a future and partition index
as arguments and returns the data for a specific partitions
:return: the ordered list of futures holding each partition on the workers
"""
futures = []
completed_part_map = {}
for rank, part in gpu_futures:
if rank not in completed_part_map:
completed_part_map[rank] = 0
f = worker_results_map[rank]
futures.append(client.submit(getter_func, f, completed_part_map[rank]))
completed_part_map[rank] += 1
return futures
@gen.coroutine
def _extract_partitions(dask_obj, client=None):
client = default_client() if client is None else client
# dask.dataframe or dask.array
if isinstance(
dask_obj, (daskArray, daskSeries, daskDataFrame, dcSeries, dcDataFrame)
):
persisted = client.persist(dask_obj)
parts = futures_of(persisted)
# iterable of dask collections (need to colocate them)
elif isinstance(dask_obj, Sequence):
# NOTE: We colocate (X, y) here by zipping delayed
# n partitions of them as (X1, y1), (X2, y2)...
# and asking client to compute a single future for
# each tuple in the list
dela = [np.asarray(d.to_delayed()) for d in dask_obj]
# TODO: ravel() is causing strange behavior w/ delayed Arrays which are
# not yet backed by futures. Need to investigate this behavior.
# ref: https://github.com/rapidsai/cuml/issues/2045
raveled = [d.ravel() for d in dela]
parts = client.compute([p for p in zip(*raveled)])
yield wait(parts)
key_to_part = [(part.key, part) for part in parts]
who_has = yield client.who_has(parts)
raise gen.Return(
[(first(who_has[key]), part) for key, part in key_to_part]
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/common/dask_df_utils.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.internals.logger as logger
import dask.dataframe as dd
from dask.distributed import default_client
def get_meta(df):
"""
Return the metadata from a single dataframe
:param df: cudf.dataframe
:return: Row data from the first row of the dataframe
"""
ret = df.iloc[:0]
return ret
def to_dask_cudf(futures, client=None):
"""
Convert a list of futures containing cudf Dataframes into a Dask.Dataframe
:param futures: list[cudf.Dataframe] list of futures containing dataframes
:param client: dask.distributed.Client Optional client to use
:return: dask.Dataframe a dask.Dataframe
"""
c = default_client() if client is None else client
# Convert a list of futures containing dfs back into a dask_cudf
dfs = [d for d in futures if d.type != type(None)] # NOQA
if logger.should_log_for(logger.level_debug):
logger.debug("to_dask_cudf dfs=%s" % str(dfs))
meta = c.submit(get_meta, dfs[0])
meta_local = meta.result()
return dd.from_delayed(dfs, meta=meta_local)
def to_dask_df(dask_cudf, client=None):
"""
Convert a Dask-cuDF into a Pandas-backed Dask Dataframe.
:param dask_cudf : dask_cudf.DataFrame
:param client: dask.distributed.Client Optional client to use
:return : dask.DataFrame
"""
def to_pandas(df):
return df.to_pandas()
c = default_client() if client is None else client
delayed_ddf = dask_cudf.to_delayed()
gpu_futures = c.compute(delayed_ddf)
dfs = [
c.submit(to_pandas, f, pure=False) for idx, f in enumerate(gpu_futures)
]
meta = c.submit(get_meta, dfs[0])
# Using new variable for local result to stop race-condition in scheduler
# Ref: https://github.com/dask/dask/issues/6027
meta_local = meta.result()
return dd.from_delayed(dfs, meta=meta_local)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/common/__init__.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.dask_arr_utils import to_sparse_dask_array
from cuml.dask.common.dask_df_utils import get_meta
from cuml.dask.common.dask_df_utils import to_dask_cudf
from cuml.dask.common.dask_df_utils import to_dask_df
from cuml.dask.common.part_utils import flatten_grouped_results
from cuml.dask.common.part_utils import hosts_to_parts
from cuml.dask.common.part_utils import parts_to_ranks
from cuml.dask.common.part_utils import workers_to_parts
from cuml.dask.common.utils import raise_exception_from_futures
from cuml.dask.common.utils import raise_mg_import_exception
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/common/base.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distributed.client import Future
from functools import wraps
from dask_cudf.core import Series as dcSeries
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.base import Base
from cuml.internals import BaseMetaClass
from cuml.dask.common import parts_to_ranks
from cuml.dask.common.input_utils import DistributedDataHandler
from raft_dask.common.comms import Comms
from cuml.dask.common.utils import wait_and_raise_from_futures
from cuml.internals.array import CumlArray
from cuml.dask.common.utils import get_client
from collections.abc import Iterable
from toolz import first
from cuml.internals.safe_imports import cpu_only_import
import dask
import cudf.comm.serialize # noqa: F401
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
dask_cudf = gpu_only_import("dask_cudf")
dcDataFrame = gpu_only_import_from("dask_cudf.core", "DataFrame")
class BaseEstimator(object, metaclass=BaseMetaClass):
def __init__(self, *, client=None, verbose=False, **kwargs):
"""
Constructor for distributed estimators.
"""
self.client = get_client(client)
# set client verbosity
self.verbose = verbose
# kwargs transmits the verbosity level to workers
kwargs["verbose"] = verbose
self.kwargs = kwargs
self.internal_model = None
def __getstate__(self):
internal_model = self._get_internal_model().result()
state = {
"verbose": self.verbose,
"kwargs": self.kwargs,
"datatype": getattr(self, "datatype", None),
"internal_model": internal_model,
}
return state
def __setstate__(self, state):
self._set_internal_model(state.pop("internal_model"))
self.__dict__.update(state)
def get_combined_model(self):
"""
Return single-GPU model for serialization
Returns
-------
model : Trained single-GPU model or None if the model has not
yet been trained.
"""
internal_model = self._check_internal_model(self._get_internal_model())
if isinstance(self.internal_model, Iterable):
# This function needs to return a single instance of cuml.Base,
# even if the class is just a composite.
raise ValueError(
"Expected a single instance of cuml.Base "
"but got %s instead." % type(self.internal_model)
)
elif isinstance(self.internal_model, Future):
internal_model = self.internal_model.result()
return internal_model
def _set_internal_model(self, model):
"""
Assigns model (a Future or list of futures containins a single-GPU
model) to be an internal model.
This function standardizes upon the way we set the internal model
so that it could either be futures, a single future, or a class local
to the client.
In order for `get_combined model()` to provide a consistent output,
self.internal_model is expected to be either a single future
containing a cuml.Base instance or a local cuml.Base on the client.
An iterable can be passed into this method when a trained model
has been replicated across the workers. In this case, only the
first element of the iterable will be set as the internal_model
If multiple different parameters have been trained across the cluster,
such as in RandomForests or some approx. nearest neighbors algorithms,
they should be combined into a single model and the combined model
should be passed to `set_internal_model()`
Parameters
----------
model : distributed.client.Future[cuml.Base], cuml.Base, or None
"""
self.internal_model = self._check_internal_model(model)
@staticmethod
def _check_internal_model(model):
"""
Performs a brief validation that a model meets the requirements
to be set as an `internal_model`
Parameters
----------
model : distributed.client.Future[cuml.Base], cuml.Base, or None
Returns
-------
model : distributed.client.Future[cuml.Base], cuml.Base, or None
"""
if isinstance(model, Iterable):
# If model is iterable, just grab the first
model = first(model)
if isinstance(model, Future):
if model.type is None:
wait_and_raise_from_futures([model])
if not issubclass(model.type, Base):
raise ValueError(
"Dask Future expected to contain cuml.Base "
"but found %s instead." % model.type
)
elif model is not None and not isinstance(model, Base):
raise ValueError(
"Expected model of type cuml.Base but found %s "
"instead." % type(model)
)
return model
def _get_internal_model(self):
"""
Gets the internal model from the instance.
This function is a convenience for future maintenance and
should never perform any expensive operations like data
transfers between the client and the Dask cluster.
Returns
-------
internal_model : dask.client.Future[cuml.Base], cuml.Base or None
"""
return self.internal_model
@staticmethod
@dask.delayed
def _get_model_attr(model, name):
if hasattr(model, name):
return getattr(model, name)
# skip raising an error for ipython/jupyter related attributes
elif any([x in name for x in ("_ipython", "_repr")]):
pass
else:
raise AttributeError(
"Attribute %s does not exist on model %s" % (name, type(model))
)
def __getattr__(self, attr):
"""
Method gives access to the correct format of cuml Array attribute to
the users and proxies attributes to the underlying trained model.
If the attribute being requested is not directly on the local object,
this function will see if the local object contains the attribute
prefixed with an _. In the case the attribute does not exist on this
local instance, the request will be proxied to self.internal_model and
will be fetched either locally or remotely depending on whether
self.internal_model is a local object instance or a future.
"""
real_name = "_" + attr
ret_attr = None
# First check locally for _ prefixed attr
if real_name in self.__dict__:
ret_attr = self.__dict__[real_name]
# Otherwise, if the actual attribute name exists on the
# object, just return it.
elif attr in self.__dict__:
ret_attr = self.__dict__[attr]
# If we didn't have an attribute on the local model, we might
# have it on the distributed model.
internal_model = self._get_internal_model()
if ret_attr is None and internal_model is not None:
if isinstance(internal_model, Base):
# If model is not distributed, just return the
# requested attribute
ret_attr = getattr(internal_model, attr)
else:
# Otherwise, fetch the attribute from the distributed
# model and return it
ret_attr = BaseEstimator._get_model_attr(
internal_model, attr
).compute()
else:
raise AttributeError(
"Attribute %s not found in %s" % (attr, type(self))
)
if isinstance(ret_attr, CumlArray):
# Dask wrappers aren't meant to be pickled, so we can
# store the raw type on the instance
return ret_attr.to_output(self.output_type)
else:
return ret_attr
class DelayedParallelFunc(object):
def _run_parallel_func(
self,
func,
X,
n_dims=1,
delayed=True,
output_futures=False,
output_dtype=None,
output_collection_type=None,
**kwargs,
):
"""
Runs a function embarrassingly parallel on a set of workers while
reusing instances of models and constraining the number of
tasks that can execute concurrently on each worker.
Note that this mixin assumes the subclass has been trained and
includes a `self._get_internal_model()` function containing a subclass
of `cuml.Base`.
This is intended to abstract functions like predict, transform, and
score, which can execute embarrassingly parallel but need addition
execution constraints which result from the more limited GPU
resources.
Parameters
----------
func : dask.delayed function to propagate to the workers to execute
embarrassingly parallel, shared between tasks on each worker
X : Dask cuDF dataframe or CuPy backed Dask Array (n_rows, n_features)
Distributed dense matrix (floats or doubles) of shape
(n_samples, n_features).
delayed : bool return a lazy (delayed) object?
output_futures : bool returns the futures pointing the to the resuls
of the parallel function executions on the workers,
rather than a dask collection object.
output_collection_type : None or a string in {'cupy', 'cudf'}
Choose to return the resulting collection as a CuPy backed
dask.array or a dask_cudf.DataFrame. If None, will use the same
collection type as used in the input of fit.
Unused if output_futures=True.
Returns
-------
y : dask cuDF (n_rows, 1)
"""
X_d = X.to_delayed()
if output_collection_type is None:
output_collection_type = self.datatype
model_delayed = dask.delayed(
self._get_internal_model(), pure=True, traverse=False
)
func = dask.delayed(func, pure=False, nout=1)
if isinstance(X, dcDataFrame):
preds = [func(model_delayed, part, **kwargs) for part in X_d]
dtype = first(X.dtypes) if output_dtype is None else output_dtype
elif isinstance(X, dcSeries):
preds = [func(model_delayed, part, **kwargs) for part in X_d]
dtype = X.dtype if output_dtype is None else output_dtype
else:
preds = [func(model_delayed, part[0]) for part in X_d]
dtype = X.dtype if output_dtype is None else output_dtype
# TODO: Put the following conditionals in a
# `to_delayed_output()` function
# TODO: Add eager path back in
if output_collection_type == "cupy":
# todo: add parameter for option of not checking directly
shape = (np.nan,) * n_dims
preds_arr = [
dask.array.from_delayed(
pred,
meta=cp.zeros(1, dtype=dtype),
shape=shape,
dtype=dtype,
)
for pred in preds
]
if output_futures:
return self.client.compute(preds)
else:
output = dask.array.concatenate(
preds_arr, axis=0, allow_unknown_chunksizes=True
)
return output if delayed else output.persist()
elif output_collection_type == "cudf":
if output_futures:
return self.client.compute(preds)
else:
output = dask_cudf.from_delayed(preds)
return output if delayed else output.persist()
else:
raise ValueError(
"Expected cupy or cudf but found %s" % (output_collection_type)
)
class DelayedPredictionProbaMixin(DelayedParallelFunc):
def _predict_proba(self, X, delayed=True, **kwargs):
return self._run_parallel_func(
func=_predict_proba_func, X=X, n_dims=2, delayed=delayed, **kwargs
)
class DelayedPredictionMixin(DelayedParallelFunc):
def _predict(self, X, delayed=True, **kwargs):
return self._run_parallel_func(
func=_predict_func, X=X, n_dims=1, delayed=delayed, **kwargs
)
class DelayedTransformMixin(DelayedParallelFunc):
def _transform(self, X, n_dims=1, delayed=True, **kwargs):
return self._run_parallel_func(
func=_transform_func, X=X, n_dims=n_dims, delayed=delayed, **kwargs
)
class DelayedInverseTransformMixin(DelayedParallelFunc):
def _inverse_transform(self, X, n_dims=1, delayed=True, **kwargs):
return self._run_parallel_func(
func=_inverse_transform_func,
X=X,
n_dims=n_dims,
delayed=delayed,
**kwargs,
)
class SyncFitMixinLinearModel(object):
def _fit(self, model_func, data):
n_cols = data[0].shape[1]
data = DistributedDataHandler.create(data=data, client=self.client)
self.datatype = data.datatype
comms = Comms(comms_p2p=False)
comms.init(workers=data.workers)
data.calculate_parts_to_sizes(comms)
self.ranks = data.ranks
worker_info = comms.worker_info(comms.worker_addresses)
parts_to_sizes, _ = parts_to_ranks(
self.client, worker_info, data.gpu_futures
)
lin_models = dict(
[
(
data.worker_info[worker_data[0]]["rank"],
self.client.submit(
model_func,
comms.sessionId,
self.datatype,
**self.kwargs,
pure=False,
workers=[worker_data[0]],
),
)
for worker, worker_data in enumerate(
data.worker_to_parts.items()
)
]
)
fit_func = self._func_fit
lin_fit = dict(
[
(
worker_data[0],
self.client.submit(
fit_func,
lin_models[data.worker_info[worker_data[0]]["rank"]],
worker_data[1],
data.total_rows,
n_cols,
parts_to_sizes,
data.worker_info[worker_data[0]]["rank"],
pure=False,
workers=[worker_data[0]],
),
)
for worker, worker_data in enumerate(
data.worker_to_parts.items()
)
]
)
wait_and_raise_from_futures(list(lin_fit.values()))
comms.destroy()
return lin_models
@staticmethod
def _func_fit(f, data, n_rows, n_cols, partsToSizes, rank):
return f.fit(data, n_rows, n_cols, partsToSizes, rank)
def mnmg_import(func):
@wraps(func)
def check_cuml_mnmg(*args, **kwargs):
try:
return func(*args, **kwargs)
except ImportError:
raise RuntimeError(
"cuML has not been built with multiGPU support "
"enabled. Build with the --multigpu flag to"
" enable multiGPU support."
)
return check_cuml_mnmg
def _predict_func(model, data, **kwargs):
return model.predict(data, **kwargs)
def _predict_proba_func(model, data, **kwargs):
return model.predict_proba(data, **kwargs)
def _transform_func(model, data, **kwargs):
return model.transform(data, **kwargs)
def _inverse_transform_func(model, data, **kwargs):
return model.inverse_transform(data, **kwargs)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/common/utils.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from threading import Lock
from asyncio import InvalidStateError
from cuml.internals.import_utils import check_min_dask_version
from cuml.common import device_of_gpu_matrix
from dask.distributed import default_client, wait
import time
import random
import dask
import logging
import os
from cuml.internals.safe_imports import gpu_only_import
numba_cuda = gpu_only_import("numba.cuda")
def get_visible_devices():
"""
Return a list of the CUDA_VISIBLE_DEVICES
:return: list[int] visible devices
"""
# TODO: Shouldn't have to split on every call
return os.environ["CUDA_VISIBLE_DEVICES"].split(",")
def device_of_devicendarray(devicendarray):
"""
Returns the device that backs memory allocated on the given
deviceNDArray
:param devicendarray: devicendarray array to check
:return: int device id
"""
dev = device_of_gpu_matrix(devicendarray)
return get_visible_devices()[dev]
def get_device_id(canonical_name):
"""
Given a local device id, find the actual "global" id
:param canonical_name: the local device name in CUDA_VISIBLE_DEVICES
:return: the global device id for the system
"""
dev_order = get_visible_devices()
idx = 0
for dev in dev_order:
if dev == canonical_name:
return idx
idx += 1
return -1
def select_device(dev, close=True):
"""
Use numbas numba to select the given device, optionally
closing and opening up a new cuda context if it fails.
:param dev: int device to select
:param close: bool close the cuda context and create new one?
"""
if numba_cuda.get_current_device().id != dev:
logging.warning("Selecting device " + str(dev))
if close:
numba_cuda.close()
numba_cuda.select_device(dev)
if dev != numba_cuda.get_current_device().id:
logging.warning(
"Current device "
+ str(numba_cuda.get_current_device())
+ " does not match expected "
+ str(dev)
)
def get_client(client=None):
return default_client() if client is None else client
def parse_host_port(address):
"""
Given a string address with host/port, build a tuple(host, port)
:param address: string address to parse
:return: tuple(host, port)
"""
if "://" in address:
address = address.rsplit("://", 1)[1]
host, port = address.split(":")
port = int(port)
return host, port
def build_host_dict(workers):
"""
Builds a dict to map the set of ports running on each host to
the hostname.
:param workers: list(tuple(host, port)) list of worker addresses
:return: dict(host, set(port))
"""
hosts = set(map(lambda x: parse_host_port(x), workers))
hosts_dict = {}
for host, port in hosts:
if host not in hosts_dict:
hosts_dict[host] = set([port])
else:
hosts_dict[host].add(port)
return hosts_dict
def persist_across_workers(client, objects, workers=None):
"""
Calls persist on the 'objects' ensuring they are spread
across the workers on 'workers'.
Parameters
----------
client : dask.distributed.Client
objects : list
Dask distributed objects to be persisted
workers : list or None
List of workers across which to persist objects
If None, then all workers attached to 'client' will be used
"""
if workers is None:
workers = client.has_what().keys() # Default to all workers
if check_min_dask_version("2020.12.0"):
with dask.annotate(workers=set(workers)):
return client.persist(objects)
else:
return client.persist(objects, workers={o: workers for o in objects})
def raise_exception_from_futures(futures):
"""Raises a RuntimeError if any of the futures indicates an exception"""
errs = [f.exception() for f in futures if f.exception()]
if errs:
raise RuntimeError(
"%d of %d worker jobs failed: %s"
% (len(errs), len(futures), ", ".join(map(str, errs)))
)
def wait_and_raise_from_futures(futures):
"""
Returns the collected futures after all the futures
have finished and do not indicate any exceptions.
"""
wait(futures)
raise_exception_from_futures(futures)
return futures
def raise_mg_import_exception():
raise Exception(
"cuML has not been built with multiGPU support "
"enabled. Build with the --multigpu flag to"
" enable multiGPU support."
)
class MultiHolderLock:
"""
A per-process synchronization lock allowing multiple concurrent holders
at any one time. This is used in situations where resources might be
limited and it's important that the number of concurrent users of
the resources are constrained.
This lock is serializable, but relies on a Python threading.Lock
underneath to properly synchronize internal state across threads.
Note that this lock is only intended to be used per-process and
the underlying threading.Lock will not be serialized.
"""
def __init__(self, n):
"""
Initialize the lock
:param n : integer the maximum number of concurrent holders
"""
self.n = n
self.current_tasks = 0
self.lock = Lock()
def _acquire(self, blocking=True, timeout=10):
lock_acquired = False
inner_lock_acquired = self.lock.acquire(blocking, timeout)
if inner_lock_acquired and self.current_tasks < self.n - 1:
self.current_tasks += 1
lock_acquired = True
self.lock.release()
return lock_acquired
def acquire(self, blocking=True, timeout=10):
"""
Acquire the lock.
:param blocking : bool will block if True
:param timeout : a timeout (in seconds) to wait for the lock
before failing.
:return : True if lock was acquired successfully, False otherwise
"""
t = time.time()
lock_acquired = self._acquire(blocking, timeout)
while blocking and not lock_acquired:
if time.time() - t > timeout:
raise TimeoutError()
lock_acquired = self.acquire(blocking, timeout)
time.sleep(random.uniform(0, 0.01))
return lock_acquired
def __getstate__(self):
d = self.__dict__.copy()
if "lock" in d:
del d["lock"]
return d
def __setstate__(self, d):
d["lock"] = Lock()
self.__dict__ = d
def release(self, blocking=True, timeout=10):
"""
Release a hold on the lock to allow another holder. Note that
while Python's threading.Lock does not have options for blocking
or timeout in release(), this lock uses a threading.Lock
internally and so will need to acquire that lock in order
to properly synchronize the underlying state.
:param blocking : bool will bock if True
:param timeout : a timeout (in seconds) to wait for the lock
before failing.
:return : True if lock was released successfully, False otherwise.
"""
if self.current_tasks == 0:
raise InvalidStateError(
"Cannot release lock when no " "concurrent tasks are executing"
)
lock_acquired = self.lock.acquire(blocking, timeout)
if lock_acquired:
self.current_tasks -= 1
self.lock.release()
return lock_acquired
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/common/func.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.internals.logger as logger
import dask
from cuml.dask.common.utils import get_client
from cuml.dask.common.part_utils import hosts_to_parts
from cuml.dask.common.part_utils import workers_to_parts
from dask.delayed import Delayed
from dask.distributed import wait
from toolz import first
def reduce(futures, func, client=None):
"""
Performs a cluster-wide reduction by first
running function on worker->host->cluster. This
function takes locality into account by first
reducing partitions local to each worker before
reducing partitions on each host and, finally,
reducing the partitions across the cluster into
a single reduced partition.
Parameters
----------
futures : array-like of dask.Future futures to reduce
func : Python reduction function accepting list
of objects to reduce and returning a single
reduced object.
client : dask.distributed.Client to use for scheduling
Returns
-------
output : dask.Future a future containing the final reduce
object.
"""
client = get_client(client)
# Make sure input futures have been assigned to worker(s)
wait(futures)
for local_reduction_func in [workers_to_parts, hosts_to_parts]:
who_has = client.who_has(futures)
workers = [(first(who_has[m.key]), m) for m in futures]
worker_parts = local_reduction_func(workers)
# Short circuit when all parts already have preferred
# locality
if len(worker_parts) > 1:
# Local tree reduction for scalability
futures = client.compute(
[tree_reduce(p, func) for w, p in worker_parts.items()]
)
wait(futures)
# Merge across workers
ret = client.compute(tree_reduce(futures, func))
wait(ret)
return ret
def tree_reduce(objs, func=sum):
"""
Performs a binary tree reduce on an associative
and commutative function in parallel across
Dask workers. Since this supports dask.delayed
objects, which have yet been scheduled on workers,
it does not take locality into account. As a result,
any local reductions should be performed before
this function is called.
Parameters
----------
func : Python function or dask.delayed function
Function to use for reduction. The reduction function
acceps a list of objects to reduce as an argument and
produces a single reduced object
objs : array-like of dask.delayed or future
objects to reduce.
Returns
-------
reduced_result : dask.delayed or future
if func is delayed, the result will be delayed
if func is a future, the result will be a future
"""
func = dask.delayed(func) if not isinstance(func, Delayed) else func
while len(objs) > 1:
new_objs = []
n_objs = len(objs)
for i in range(0, n_objs, 2):
inputs = dask.delayed(objs[i : i + 2], pure=False)
obj = func(inputs)
new_objs.append(obj)
wait(new_objs)
objs = new_objs
logger.debug(str(objs))
return first(objs)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/cluster/dbscan.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.memory_utils import with_cupy_rmm
from cuml.dask.common.utils import wait_and_raise_from_futures
from raft_dask.common.comms import get_raft_comm_state
from raft_dask.common.comms import Comms
from cuml.dask.common.base import mnmg_import
from dask.distributed import get_worker
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import BaseEstimator
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
class DBSCAN(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin):
"""
Multi-Node Multi-GPU implementation of DBSCAN.
The whole dataset is copied to all the workers but the work is then
divided by giving "ownership" of a subset to each worker: each worker
computes a clustering by considering the relationships between those
points and the rest of the dataset, and partial results are merged at
the end to obtain the final clustering.
Parameters
----------
client : dask.distributed.Client
Dask client to use
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
min_samples : int (default = 5)
The number of samples in a neighborhood such that this group can be
considered as an important core point (including the point itself).
max_mbytes_per_batch : (optional) int64
Calculate batch size using no more than this number of megabytes for
the pairwise distance computation. This enables the trade-off between
runtime and memory usage for making the N^2 pairwise distance
computations more tractable for large numbers of samples.
If you are experiencing out of memory errors when running DBSCAN, you
can set this value based on the memory size of your device.
Note: this option does not set the maximum total memory used in the
DBSCAN computation and so this value will not be able to be set to
the total memory available on the device.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
calc_core_sample_indices : (optional) boolean (default = True)
Indicates whether the indices of the core samples should be calculated.
The the attribute `core_sample_indices_` will not be used, setting this
to False will avoid unnecessary kernel launches
Notes
-----
For additional docs, see the documentation of the single-GPU DBSCAN model
"""
def __init__(self, *, client=None, verbose=False, **kwargs):
super().__init__(client=client, verbose=verbose, **kwargs)
@staticmethod
@mnmg_import
def _func_fit(out_dtype):
def _func(sessionId, data, **kwargs):
from cuml.cluster.dbscan_mg import DBSCANMG as cumlDBSCAN
handle = get_raft_comm_state(sessionId, get_worker())["handle"]
return cumlDBSCAN(handle=handle, **kwargs).fit(
data, out_dtype=out_dtype
)
return _func
@with_cupy_rmm
def fit(self, X, out_dtype="int32"):
"""
Fit a multi-node multi-GPU DBSCAN model
Parameters
----------
X : array-like (device or host)
Dense matrix containing floats or doubles.
Acceptable formats: CUDA array interface compliant objects like
CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas
DataFrame/Series.
out_dtype: dtype Determines the precision of the output labels array.
default: "int32". Valid values are { "int32", np.int32,
"int64", np.int64}.
"""
if out_dtype not in ["int32", np.int32, "int64", np.int64]:
raise ValueError(
"Invalid value for out_dtype. "
"Valid values are {'int32', 'int64', "
"np.int32, np.int64}"
)
data = self.client.scatter(X, broadcast=True)
comms = Comms(comms_p2p=True)
comms.init()
dbscan_fit = [
self.client.submit(
DBSCAN._func_fit(out_dtype),
comms.sessionId,
data,
**self.kwargs,
workers=[worker],
pure=False,
)
for worker in comms.worker_addresses
]
wait_and_raise_from_futures(dbscan_fit)
comms.destroy()
self._set_internal_model(dbscan_fit[0])
return self
def fit_predict(self, X, out_dtype="int32"):
"""
Performs clustering on X and returns cluster labels.
Parameters
----------
X : array-like (device or host)
Dense matrix containing floats or doubles.
Acceptable formats: CUDA array interface compliant objects like
CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas
DataFrame/Series.
out_dtype: dtype Determines the precision of the output labels array.
default: "int32". Valid values are { "int32", np.int32,
"int64", np.int64}.
Returns
-------
labels: array-like (device or host)
Integer array of labels
"""
self.fit(X, out_dtype)
return self.get_combined_model().labels_
def get_param_names(self):
return list(self.kwargs.keys())
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/cluster/__init__.py | #
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.cluster.dbscan import DBSCAN
from cuml.dask.cluster.kmeans import KMeans
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/cluster/kmeans.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.memory_utils import with_cupy_rmm
from cuml.dask.common.utils import wait_and_raise_from_futures
from raft_dask.common.comms import get_raft_comm_state
from raft_dask.common.comms import Comms
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.dask.common.input_utils import concatenate
from cuml.dask.common.base import mnmg_import
from dask.distributed import get_worker
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.base import DelayedPredictionMixin
from cuml.dask.common.base import BaseEstimator
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
class KMeans(BaseEstimator, DelayedPredictionMixin, DelayedTransformMixin):
"""
Multi-Node Multi-GPU implementation of KMeans.
This version minimizes data transfer by sharing only
the centroids between workers in each iteration.
Predictions are done embarrassingly parallel, using cuML's
single-GPU version.
For more information on this implementation, refer to the
documentation for single-GPU K-Means.
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
n_clusters : int (default = 8)
The number of centroids or clusters you want.
max_iter : int (default = 300)
The more iterations of EM, the more accurate, but slower.
tol : float (default = 1e-4)
Stopping criterion when centroid means do not change much.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
random_state : int (default = 1)
If you want results to be the same when you restart Python,
select a state.
init : {'scalable-kmeans++', 'k-means||' , 'random' or an ndarray} \
(default = 'scalable-k-means++')
'scalable-k-means++' or 'k-means||': Uses fast and stable scalable
kmeans++ initialization.
'random': Choose 'n_cluster' observations (rows) at random
from data for the initial centroids. If an ndarray is passed,
it should be of shape (n_clusters, n_features) and gives the
initial centers.
oversampling_factor : int (default = 2)
The amount of points to sample in scalable k-means++ initialization for
potential centroids. Increasing this value can lead to better initial
centroids at the cost of memory. The total number of centroids sampled
in scalable k-means++ is oversampling_factor * n_clusters * 8.
max_samples_per_batch : int (default = 32768)
The number of data samples to use for batches of the pairwise distance
computation. This computation is done throughout both fit predict.
The default should suit most cases. The total number of elements in the
batched pairwise distance computation is max_samples_per_batch
* n_clusters. It might become necessary to lower this number when
n_clusters becomes prohibitively large.
Attributes
----------
cluster_centers_ : cuDF DataFrame or CuPy ndarray
The coordinates of the final clusters. This represents of "mean" of
each data cluster.
"""
def __init__(self, *, client=None, verbose=False, **kwargs):
super().__init__(client=client, verbose=verbose, **kwargs)
@staticmethod
@mnmg_import
def _func_fit(sessionId, objs, datatype, has_weights, **kwargs):
from cuml.cluster.kmeans_mg import KMeansMG as cumlKMeans
handle = get_raft_comm_state(sessionId, get_worker())["handle"]
if not has_weights:
inp_data = concatenate(objs)
inp_weights = None
else:
inp_data = concatenate([X for X, weights in objs])
inp_weights = concatenate([weights for X, weights in objs])
return cumlKMeans(handle=handle, output_type=datatype, **kwargs).fit(
inp_data, sample_weight=inp_weights
)
@staticmethod
def _score(model, data, sample_weight=None):
ret = model.score(data, sample_weight=sample_weight)
return ret
@staticmethod
def _check_normalize_sample_weight(sample_weight):
if sample_weight is not None:
n_samples = len(sample_weight)
scale = n_samples / sample_weight.sum()
sample_weight *= scale
return sample_weight
@with_cupy_rmm
def fit(self, X, sample_weight=None):
"""
Fit a multi-node multi-GPU KMeans model
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Training data to cluster.
sample_weight : Dask cuDF DataFrame or CuPy backed Dask Array \
shape = (n_samples,), default=None # noqa
The weights for each observation in X. If None, all observations
are assigned equal weight.
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
"""
sample_weight = self._check_normalize_sample_weight(sample_weight)
inputs = X if sample_weight is None else (X, sample_weight)
data = DistributedDataHandler.create(inputs, client=self.client)
self.datatype = data.datatype
# This needs to happen on the scheduler
comms = Comms(comms_p2p=False, client=self.client)
comms.init(workers=data.workers)
kmeans_fit = [
self.client.submit(
KMeans._func_fit,
comms.sessionId,
wf[1],
self.datatype,
data.multiple,
**self.kwargs,
workers=[wf[0]],
pure=False,
)
for idx, wf in enumerate(data.worker_to_parts.items())
]
wait_and_raise_from_futures(kmeans_fit)
comms.destroy()
self._set_internal_model(kmeans_fit[0])
return self
def fit_predict(self, X, sample_weight=None, delayed=True):
"""
Compute cluster centers and predict cluster index for each sample.
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing predictions
"""
return self.fit(X, sample_weight=sample_weight).predict(
X, sample_weight=sample_weight, delayed=delayed
)
def predict(self, X, sample_weight=None, delayed=True):
"""
Predict labels for the input
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to do a lazy prediction (and return Delayed objects) or an
eagerly executed one.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing predictions
"""
sample_weight = self._check_normalize_sample_weight(sample_weight)
return self._predict(
X,
delayed=delayed,
sample_weight=sample_weight,
normalize_weights=False,
)
def fit_transform(self, X, sample_weight=None, delayed=True):
"""
Calls fit followed by transform using a distributed KMeans model
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the transformed data
"""
return self.fit(X, sample_weight=sample_weight).transform(
X, delayed=delayed
)
def transform(self, X, delayed=True):
"""
Transforms the input into the learned centroid space
Parameters
----------
X : Dask cuDF DataFrame or CuPy backed Dask Array
Data to predict
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
result: Dask cuDF DataFrame or CuPy backed Dask Array
Distributed object containing the transformed data
"""
return self._transform(X, n_dims=2, delayed=delayed)
@with_cupy_rmm
def score(self, X, sample_weight=None):
"""
Computes the inertia score for the trained KMeans centroids.
Parameters
----------
X : dask_cudf.Dataframe
Dataframe to compute score
Returns
-------
Inertial score
"""
sample_weight = self._check_normalize_sample_weight(sample_weight)
scores = self._run_parallel_func(
KMeans._score,
X,
sample_weight=sample_weight,
n_dims=1,
delayed=False,
output_futures=True,
)
return -1 * cp.sum(
cp.asarray(self.client.compute(scores, sync=True)) * -1.0
)
def get_param_names(self):
return list(self.kwargs.keys())
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/manifold/umap.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.base import BaseEstimator, DelayedTransformMixin
from cuml.dask.common.input_utils import DistributedDataHandler
class UMAP(BaseEstimator, DelayedTransformMixin):
"""
Uniform Manifold Approximation and Projection
Finds a low dimensional embedding of the data that approximates
an underlying manifold.
Adapted from https://github.com/lmcinnes/umap/blob/master/umap/umap_.py
Examples
--------
.. code-block:: python
>>> from dask_cuda import LocalCUDACluster
>>> from dask.distributed import Client
>>> import dask.array as da
>>> from cuml.datasets import make_blobs
>>> from cuml.manifold import UMAP
>>> from cuml.dask.manifold import UMAP as MNMG_UMAP
>>> import numpy as np
>>> cluster = LocalCUDACluster(threads_per_worker=1)
>>> client = Client(cluster)
>>> X, y = make_blobs(1000, 10, centers=42, cluster_std=0.1,
... dtype=np.float32, random_state=10)
>>> local_model = UMAP(random_state=10)
>>> selection = np.random.RandomState(10).choice(1000, 100)
>>> X_train = X[selection]
>>> y_train = y[selection]
>>> local_model.fit(X_train, y=y_train)
UMAP()
>>> distributed_model = MNMG_UMAP(model=local_model)
>>> distributed_X = da.from_array(X, chunks=(500, -1))
>>> embedding = distributed_model.transform(distributed_X)
>>> result = embedding.compute()
>>> print(result) # doctest: +SKIP
[[ 4.1684933 4.1890593 ]
[ 5.0110254 -5.2143383 ]
[ 1.7776365 -17.665699 ]
...
[ -6.6378727 -0.15353012]
[ -3.1891193 -0.83906937]
[ -0.5042019 2.1454725 ]]
>>> client.close()
>>> cluster.close()
Notes
-----
This module is heavily based on Leland McInnes' reference UMAP package
[1]_.
However, there are a number of differences and features that are
not yet implemented in `cuml.umap`:
* Using a non-Euclidean distance metric (support for a fixed set
of non-Euclidean metrics is planned for an upcoming release).
* Using a pre-computed pairwise distance matrix (under consideration
for future releases)
* Manual initialization of initial embedding positions
In addition to these missing features, you should expect to see
the final embeddings differing between `cuml.umap` and the reference
UMAP. In particular, the reference UMAP uses an approximate kNN
algorithm for large data sizes while cuml.umap always uses exact
kNN.
**Known issue:** If a UMAP model has not yet been fit, it cannot be pickled
References
----------
.. [1] `Leland McInnes, John Healy, James Melville
UMAP: Uniform Manifold Approximation and Projection for Dimension
Reduction. <https://arxiv.org/abs/1802.03426>`_
"""
def __init__(self, *, model, client=None, **kwargs):
super().__init__(client=client, **kwargs)
self._set_internal_model(model)
def transform(self, X, convert_dtype=True):
"""
Transform X into the existing embedded space and return that
transformed output.
Please refer to the reference UMAP implementation for information
on the differences between fit_transform() and running fit()
transform().
Specifically, the transform() function is stochastic:
https://github.com/lmcinnes/umap/issues/158
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
New data to be transformed.
Acceptable formats: dask cuDF, dask CuPy/NumPy/Numba Array
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the new data in low-dimensional space.
"""
data = DistributedDataHandler.create(data=X, client=self.client)
self.datatype = data.datatype
return self._transform(X, convert_dtype=convert_dtype)
| 0 |
rapidsai_public_repos/cuml/python/cuml/dask | rapidsai_public_repos/cuml/python/cuml/dask/manifold/__init__.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from cuml.internals.import_utils import has_dask
if has_dask():
from cuml.dask.manifold.umap import UMAP
else:
warnings.warn(
"Dask not found. All Dask-based multi-GPU operation is disabled."
)
__all__ = ["UMAP"]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/global_settings.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import threading
from cuml.internals.available_devices import is_cuda_available
from cuml.internals.device_type import DeviceType
from cuml.internals.mem_type import MemoryType
from cuml.internals.safe_imports import cpu_only_import, gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
class _GlobalSettingsData(threading.local): # pylint: disable=R0903
"""Thread-local storage class with per-thread initialization of default
values for global settings"""
def __init__(self):
super().__init__()
# If RAPIDS_NO_INITIALIZE is set, then we do lazy initialization
if "RAPIDS_NO_INITIALIZE" not in os.environ:
if is_cuda_available():
default_device_type = DeviceType.device
default_memory_type = MemoryType.device
else:
default_device_type = DeviceType.host
default_memory_type = MemoryType.host
self.shared_state = {
"_output_type": None,
"_device_type": default_device_type,
"_memory_type": default_memory_type,
"root_cm": None,
}
else:
self.shared_state = {"_output_type": None, "root_cm": None}
_global_settings_data = _GlobalSettingsData()
class GlobalSettings:
"""A thread-local borg class for tracking cuML global settings
Because cuML makes use of internal context managers which try to minimize
the number of conversions among various array types during internal calls,
it is necessary to track certain settings globally. For instance, users can
set a global output type, and cuML will ensure that the output is converted
to the requested type *only* when a given API call returns to an external
caller. Tracking when this happens requires globally-managed state.
This class serves as a thread-local data store for any required global
state. It is a thread-local borg, so updating an attribute on any instance
of this class will update that attribute on *all* instances in the same
thread. This additional layer of indirection on top of an ordinary
`threading.local` object is to facilitate debugging of global settings
changes. New global setting attributes can be added as properties to this
object, and breakpoints or debugging statements can be added to a
property's method to track when and how those properties change.
In general, cuML developers should simply access `cuml.global_settings`
rather than re-instantiating separate instances of this class in order to
avoid the overhead of re-instantiation, but using a separate instance
should not cause any logical errors.
"""
def __init__(self):
self.__dict__ = _global_settings_data.shared_state
@property
def device_type(self):
try:
return self._device_type
except AttributeError:
if is_cuda_available():
self.device_type = DeviceType.device
else:
self.device_type = DeviceType.host
return self._device_type
@device_type.setter
def device_type(self, value):
self._device_type = value
# Only change the memory type if current value is incompatible with new
# device
if not self._device_type.is_compatible(self.memory_type):
self.memory_type = self._device_type.default_memory_type
@property
def memory_type(self):
try:
return self._memory_type
except AttributeError:
if is_cuda_available():
self.memory_type = MemoryType.device
else:
self.memory_type = MemoryType.host
return self._memory_type
@memory_type.setter
def memory_type(self, value):
self._memory_type = value
@property
def output_type(self):
"""The globally-defined default output type for cuML API calls"""
return self._output_type # pylint: disable=no-member
@output_type.setter
def output_type(self, value):
self._output_type = value
@property
def xpy(self):
return self.memory_type.xpy
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/device_type.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import Enum, auto
from cuml.internals.mem_type import MemoryType
class DeviceTypeError(Exception):
"""An exception thrown to indicate bad device type selection"""
class DeviceType(Enum):
host = auto()
device = auto()
@classmethod
def from_str(cls, device_type):
if isinstance(device_type, str):
device_type = device_type.lower()
if device_type in ("cpu", "host", DeviceType.host):
return cls.host
elif device_type in ("gpu", "device", DeviceType.device):
return cls.device
else:
raise ValueError(
'Parameter device_type must be one of "cpu" or ' '"gpu"'
)
def is_compatible(self, mem_type: MemoryType) -> bool:
if self is DeviceType.device:
return mem_type.is_device_accessible
else:
return mem_type.is_host_accessible
@property
def default_memory_type(self):
if self is DeviceType.device:
return MemoryType.device
else:
return MemoryType.host
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/safe_imports.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import traceback
from contextlib import contextmanager
from cuml.internals.device_support import CPU_ENABLED, GPU_ENABLED
from cuml.internals import logger
class UnavailableError(Exception):
"""Error thrown if a symbol is unavailable due to an issue importing it"""
def return_false(*args, **kwargs):
"""A placeholder function that always returns False"""
return False
@contextmanager
def null_decorator(*args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and callable(args[0]):
return args[0]
else:
def inner(func):
return func
return inner
class UnavailableMeta(type):
"""A metaclass for generating placeholder objects for unavailable symbols
This metaclass allows errors to be deferred from import time to the time
that a symbol is actually used in order to streamline the usage of optional
dependencies. This is particularly useful for attempted imports of GPU-only
modules which will only be invoked if GPU-only functionality is
specifically used.
If an attempt to import a symbol fails, this metaclass is used to generate
a class which stands in for that symbol. Any attempt to call the symbol
(instantiate the class) or access its attributes will throw an
UnavailableError exception. Furthermore, this class can be used in
e.g. isinstance checks, since it will (correctly) fail to match any
instance it is compared against.
In addition to calls and attribute access, a number of dunder methods are
implemented so that other common usages of imported symbols (e.g.
arithmetic) throw an UnavailableError, but this is not guaranteed for
all possible uses. In such cases, other exception types (typically
TypeErrors) will be thrown instead.
"""
def __new__(meta, name, bases, dct):
if dct.get("_msg", None) is None:
dct["_msg"] = f"{name} could not be imported"
name = f"MISSING{name}"
return super(UnavailableMeta, meta).__new__(meta, name, bases, dct)
def __call__(cls, *args, **kwargs):
raise UnavailableError(cls._msg)
def __getattr__(cls, name):
raise UnavailableError(cls._msg)
def __eq__(cls, other):
raise UnavailableError(cls._msg)
def __lt__(cls, other):
raise UnavailableError(cls._msg)
def __gt__(cls, other):
raise UnavailableError(cls._msg)
def __ne__(cls, other):
raise UnavailableError(cls._msg)
def __abs__(cls, other):
raise UnavailableError(cls._msg)
def __add__(cls, other):
raise UnavailableError(cls._msg)
def __radd__(cls, other):
raise UnavailableError(cls._msg)
def __iadd__(cls, other):
raise UnavailableError(cls._msg)
def __floordiv__(cls, other):
raise UnavailableError(cls._msg)
def __rfloordiv__(cls, other):
raise UnavailableError(cls._msg)
def __ifloordiv__(cls, other):
raise UnavailableError(cls._msg)
def __lshift__(cls, other):
raise UnavailableError(cls._msg)
def __rlshift__(cls, other):
raise UnavailableError(cls._msg)
def __mul__(cls, other):
raise UnavailableError(cls._msg)
def __rmul__(cls, other):
raise UnavailableError(cls._msg)
def __imul__(cls, other):
raise UnavailableError(cls._msg)
def __ilshift__(cls, other):
raise UnavailableError(cls._msg)
def __pow__(cls, other):
raise UnavailableError(cls._msg)
def __rpow__(cls, other):
raise UnavailableError(cls._msg)
def __ipow__(cls, other):
raise UnavailableError(cls._msg)
def __rshift__(cls, other):
raise UnavailableError(cls._msg)
def __rrshift__(cls, other):
raise UnavailableError(cls._msg)
def __irshift__(cls, other):
raise UnavailableError(cls._msg)
def __sub__(cls, other):
raise UnavailableError(cls._msg)
def __rsub__(cls, other):
raise UnavailableError(cls._msg)
def __isub__(cls, other):
raise UnavailableError(cls._msg)
def __truediv__(cls, other):
raise UnavailableError(cls._msg)
def __rtruediv__(cls, other):
raise UnavailableError(cls._msg)
def __itruediv__(cls, other):
raise UnavailableError(cls._msg)
def __divmod__(cls, other):
raise UnavailableError(cls._msg)
def __rdivmod__(cls, other):
raise UnavailableError(cls._msg)
def __neg__(cls):
raise UnavailableError(cls._msg)
def __invert__(cls):
raise UnavailableError(cls._msg)
def __hash__(cls):
raise UnavailableError(cls._msg)
def __index__(cls):
raise UnavailableError(cls._msg)
def __iter__(cls):
raise UnavailableError(cls._msg)
def __delitem__(cls, name):
raise UnavailableError(cls._msg)
def __setitem__(cls, name, value):
raise UnavailableError(cls._msg)
def __enter__(cls, *args, **kwargs):
raise UnavailableError(cls._msg)
def __get__(cls, *args, **kwargs):
raise UnavailableError(cls._msg)
def __delete__(cls, *args, **kwargs):
raise UnavailableError(cls._msg)
def __len__(cls):
raise UnavailableError(cls._msg)
def is_unavailable(obj):
"""Helper to check if given symbol is actually a placeholder"""
return type(obj) is UnavailableMeta
class UnavailableNullContext:
"""A placeholder class for unavailable context managers
This context manager will return a value which will throw an
UnavailableError if used in any way, but the context manager itself can be
safely invoked.
"""
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return UnavailableMeta(
"MissingContextValue",
(),
{
"_msg": "Attempted to make use of placeholder context return value."
},
)
def __exit__(self, *args, **kwargs):
pass
def safe_import(module, *, msg=None, alt=None):
"""A function used to import modules that may not be available
This function will attempt to import a module with the given name, but it
will not throw an ImportError if the module is not found. Instead, it will
return a placeholder object which will raise an exception only if used.
Parameters
----------
module: str
The name of the module to import.
msg: str or None
An optional error message to be displayed if this module is used
after a failed import.
alt: object
An optional module to be used in place of the given module if it
fails to import
Returns
-------
object
The imported module, the given alternate, or a class derived from
UnavailableMeta.
"""
try:
return importlib.import_module(module)
except ImportError:
exception_text = traceback.format_exc()
logger.debug(f"Import of {module} failed with: {exception_text}")
except Exception:
exception_text = traceback.format_exc()
logger.info(f"Import of {module} failed with: {exception_text}")
if msg is None:
msg = f"{module} could not be imported"
if alt is None:
return UnavailableMeta(module.rsplit(".")[-1], (), {"_msg": msg})
else:
return alt
def safe_import_from(module, symbol, *, msg=None, alt=None):
"""A function used to import symbols from modules that may not be available
This function will attempt to import a symbol with the given name from
the given module, but it will not throw an ImportError if the symbol is not
found. Instead, it will return a placeholder object which will raise an
exception only if used.
Parameters
----------
module: str
The name of the module in which the symbol is defined.
symbol: str
The name of the symbol to import.
msg: str or None
An optional error message to be displayed if this symbol is used
after a failed import.
alt: object
An optional object to be used in place of the given symbol if it fails
to import
Returns
-------
object
The imported symbol, the given alternate, or a class derived from
UnavailableMeta.
"""
try:
imported_module = importlib.import_module(module)
return getattr(imported_module, symbol)
except ImportError:
exception_text = traceback.format_exc()
logger.debug(f"Import of {module} failed with: {exception_text}")
except AttributeError:
exception_text = traceback.format_exc()
logger.debug(
f"Import of {symbol} from {module} failed with: {exception_text}"
)
except Exception:
exception_text = traceback.format_exc()
logger.info(
f"Import of {module}.{symbol} failed with: {exception_text}"
)
if msg is None:
msg = f"{module}.{symbol} could not be imported"
if alt is None:
return UnavailableMeta(symbol, (), {"_msg": msg})
else:
return alt
def gpu_only_import(module, *, alt=None):
"""A function used to import modules required only in GPU installs
This function will attempt to import a module with the given name, but it
will only throw an ImportError if the attempt fails AND this is not a
CPU-only build. This allows GPU-only dependencies to be cleanly
imported in CPU-only builds but guarantees that the correct exception
will be raised if a required dependency is unavailable. If the import
fails on a CPU-only build and no alternate module is indicated via the
keyword `alt` argument, a placeholder object will be returned which raises
an exception only if used.
Parameters
----------
module: str
The name of the module to import.
alt: object
An optional module to be used in place of the given module if it
fails to import in a non-GPU-enabled install
Returns
-------
object
The imported module, the given alternate, or a class derived from
UnavailableMeta.
"""
if GPU_ENABLED:
return importlib.import_module(module)
else:
return safe_import(
module,
msg=f"{module} is not installed in non GPU-enabled installations",
alt=alt,
)
def gpu_only_import_from(module, symbol, *, alt=None):
"""A function used to import symbols required only in GPU installs
This function will attempt to import a symbol from a module with the given
names, but it will only throw an ImportError if the attempt fails AND this
is not a CPU-only build. This allows GPU-only dependencies to be cleanly
imported in CPU-only builds but guarantees that the correct exception will
be raised if a required dependency is unavailable. If the import fails on a
CPU-only build and no alternate module is indicated via the keyword `alt`
argument, a placeholder object will be returned which raises an exception
only if used.
Parameters
----------
module: str
The name of the module to import.
symbol: str
The name of the symbol to import.
alt: object
An optional object to be used in place of the given symbol if it fails
to import in a non-GPU-enabled install
Returns
-------
object
The imported symbol, the given alternate, or a class derived from
UnavailableMeta.
"""
if GPU_ENABLED:
imported_module = importlib.import_module(module)
return getattr(imported_module, symbol)
else:
return safe_import_from(
module,
symbol,
msg=f"{module}.{symbol} is not available in CPU-only"
" installations",
alt=alt,
)
def cpu_only_import(module, *, alt=None):
"""A function used to import modules required only in CPU installs
This function will attempt to import a module with the given name, but it
will only throw an ImportError if the attempt fails AND this is not a
GPU-only build. This allows CPU-only dependencies to be cleanly
imported in GPU-only builds but guarantees that the correct exception
will be raised if a required dependency is unavailable. If the import
fails on a GPU-only build and no alternate is provided via the `alt`
keyword argument, a placeholder object will be returned which raises an
exception only if used.
Parameters
----------
module: str
The name of the module to import.
alt: object
An optional module to be used in place of the given module if it
fails to import
Returns
-------
object
The imported module, the given alternate, or a class derived from
UnavailableMeta.
"""
if CPU_ENABLED:
return importlib.import_module(module)
else:
return safe_import(
module,
msg=f"{module} is not installed in GPU-only installations",
alt=alt,
)
def cpu_only_import_from(module, symbol, *, alt=None):
"""A function used to import symbols required only in CPU installs
This function will attempt to import a symbol from a module with the given
names, but it will only throw an ImportError if the attempt fails AND this
is not a GPU-only build. This allows CPU-only dependencies to be cleanly
imported in GPU-only builds but guarantees that the correct exception will
be raised if a required dependency is unavailable. If the import fails on a
GPU-only build and no alternate is provided via the `alt` keyword
argument, a placeholder object will be returned which raises an exception
only if used.
Parameters
----------
module: str
The name of the module to import.
symbol: str
The name of the symbol to import.
alt: object
An optional object to be used in place of the given symbol if it fails
to import in a non-CPU-enabled install
Returns
-------
object
The imported symbol, the given alternate, or a class derived from
UnavailableMeta.
"""
if CPU_ENABLED:
imported_module = importlib.import_module(module)
return getattr(imported_module, symbol)
else:
return safe_import_from(
module,
symbol,
msg=f"{module}.{symbol} is not available in GPU-only"
" installations",
alt=alt,
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/input_utils.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
from cuml.internals.array import CumlArray
from cuml.internals.array_sparse import SparseCumlArray
from cuml.internals.global_settings import GlobalSettings
from cuml.internals.mem_type import MemoryType
from cuml.internals.safe_imports import (
cpu_only_import,
cpu_only_import_from,
gpu_only_import,
gpu_only_import_from,
safe_import,
safe_import_from,
null_decorator,
return_false,
UnavailableError,
)
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
global_settings = GlobalSettings()
numba_cuda = gpu_only_import("numba.cuda")
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
scipy_sparse = safe_import(
"scipy.sparse", msg="Optional dependency scipy is not installed"
)
cp_ndarray = gpu_only_import_from("cupy", "ndarray")
CudfSeries = gpu_only_import_from("cudf", "Series")
CudfDataFrame = gpu_only_import_from("cudf", "DataFrame")
DaskCudfSeries = gpu_only_import_from("dask_cudf.core", "Series")
DaskCudfDataFrame = gpu_only_import_from("dask_cudf.core", "DataFrame")
np_ndarray = cpu_only_import_from("numpy", "ndarray")
numba_devicearray = gpu_only_import_from("numba.cuda", "devicearray")
try:
NumbaDeviceNDArrayBase = numba_devicearray.DeviceNDArrayBase
except UnavailableError:
NumbaDeviceNDArrayBase = numba_devicearray
scipy_isspmatrix = safe_import_from(
"scipy.sparse", "isspmatrix", alt=return_false
)
cupyx_isspmatrix = gpu_only_import_from(
"cupyx.scipy.sparse", "isspmatrix", alt=return_false
)
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
PandasSeries = cpu_only_import_from("pandas", "Series")
PandasDataFrame = cpu_only_import_from("pandas", "DataFrame")
cuml_array = namedtuple("cuml_array", "array n_rows n_cols dtype")
_input_type_to_str = {
CumlArray: "cuml",
SparseCumlArray: "cuml",
np_ndarray: "numpy",
PandasSeries: "pandas",
PandasDataFrame: "pandas",
}
try:
_input_type_to_str[cp_ndarray] = "cupy"
_input_type_to_str[CudfSeries] = "cudf"
_input_type_to_str[CudfDataFrame] = "cudf"
_input_type_to_str[NumbaDeviceNDArrayBase] = "numba"
except UnavailableError:
pass
_input_type_to_mem_type = {
np_ndarray: MemoryType.host,
PandasSeries: MemoryType.host,
PandasDataFrame: MemoryType.host,
}
try:
_input_type_to_mem_type[cp_ndarray] = MemoryType.device
_input_type_to_mem_type[CudfSeries] = MemoryType.device
_input_type_to_mem_type[CudfDataFrame] = MemoryType.device
_input_type_to_mem_type[NumbaDeviceNDArrayBase] = MemoryType.device
except UnavailableError:
pass
_SPARSE_TYPES = [SparseCumlArray]
try:
_input_type_to_str[cupyx.scipy.sparse.spmatrix] = "cupy"
_SPARSE_TYPES.append(cupyx.scipy.sparse.spmatrix)
_input_type_to_mem_type[cupyx.scipy.sparse.spmatrix] = MemoryType.device
except UnavailableError:
pass
try:
_input_type_to_str[scipy_sparse.spmatrix] = "numpy"
_SPARSE_TYPES.append(scipy_sparse.spmatrix)
_input_type_to_mem_type[scipy_sparse.spmatrix] = MemoryType.device
except UnavailableError:
pass
def get_supported_input_type(X):
"""
Determines if the input object is a supported input array-like object or
not. If supported, the type is returned. Otherwise, `None` is returned.
Parameters
----------
X : object
Input object to test
Notes
-----
To closely match the functionality of
:func:`~cuml.internals.input_utils.input_to_cuml_array`, this method will
return `cupy.ndarray` for any object supporting
`__cuda_array_interface__` and `numpy.ndarray` for any object supporting
`__array_interface__`.
Returns
-------
array-like type or None
If the array-like object is supported, the type is returned.
Otherwise, `None` is returned.
"""
# Check CumlArray first to shorten search time
if isinstance(X, CumlArray):
return CumlArray
if isinstance(X, SparseCumlArray):
return SparseCumlArray
if isinstance(X, CudfSeries):
if X.null_count != 0:
return None
else:
return CudfSeries
if isinstance(X, PandasDataFrame):
return PandasDataFrame
if isinstance(X, PandasSeries):
return PandasSeries
if isinstance(X, CudfDataFrame):
return CudfDataFrame
try:
if numba_cuda.devicearray.is_cuda_ndarray(X):
return numba_cuda.devicearray.DeviceNDArrayBase
except UnavailableError:
pass
if hasattr(X, "__cuda_array_interface__"):
return cp.ndarray
if hasattr(X, "__array_interface__"):
# For some reason, numpy scalar types also implement
# `__array_interface__`. See numpy.generic.__doc__. Exclude those types
# as well as np.dtypes
if not isinstance(X, np.generic) and not isinstance(X, type):
return np.ndarray
try:
if cupyx.scipy.sparse.isspmatrix(X):
return cupyx.scipy.sparse.spmatrix
except UnavailableError:
pass
try:
if scipy_sparse.isspmatrix(X):
return scipy_sparse.spmatrix
except UnavailableError:
pass
# Return None if this type is not supported
return None
def determine_array_type(X):
if X is None:
return None
# Get the generic type
gen_type = get_supported_input_type(X)
return _input_type_to_str.get(gen_type, None)
def determine_array_dtype(X):
if X is None:
return None
if isinstance(X, (CudfDataFrame, PandasDataFrame)):
# Assume single-label target
dtype = X[X.columns[0]].dtype
else:
try:
dtype = X.dtype
except AttributeError:
dtype = None
return dtype
def determine_array_type_full(X):
"""
Returns a tuple of the array type, and a boolean if it is sparse
Parameters
----------
X : array-like
Input array to test
Returns
-------
(string, bool) Returns a tuple of the array type string and a boolean if it
is a sparse array.
"""
if X is None:
return None, None
# Get the generic type
gen_type = get_supported_input_type(X)
if gen_type is None:
return None, None
return _input_type_to_str[gen_type], gen_type in _SPARSE_TYPES
def is_array_like(X):
if (
hasattr(X, "__cuda_array_interface__")
or (
hasattr(X, "__array_interface__")
and not (
isinstance(X, global_settings.xpy.generic)
or isinstance(X, type)
)
)
or isinstance(
X,
(
SparseCumlArray,
CudfSeries,
PandasSeries,
CudfDataFrame,
PandasDataFrame,
),
)
):
return True
try:
if cupyx_isspmatrix(X):
return True
except UnavailableError:
pass
try:
if scipy_isspmatrix(X):
return True
except UnavailableError:
pass
try:
if numba_cuda.devicearray.is_cuda_ndarray(X):
return True
except UnavailableError:
pass
return False
@nvtx_annotate(
message="common.input_utils.input_to_cuml_array",
category="utils",
domain="cuml_python",
)
def input_to_cuml_array(
X,
order="F",
deepcopy=False,
check_dtype=False,
convert_to_dtype=False,
check_mem_type=False,
convert_to_mem_type=None,
safe_dtype_conversion=True,
check_cols=False,
check_rows=False,
fail_on_order=False,
force_contiguous=True,
):
"""
Convert input X to CumlArray.
Acceptable input formats:
* cuDF Dataframe - returns a deep copy always.
* cuDF Series - returns by reference or a deep copy depending on
`deepcopy`.
* Numpy array - returns a copy in device always
* cuda array interface compliant array (like Cupy) - returns a
reference unless `deepcopy`=True.
* numba device array - returns a reference unless deepcopy=True
Parameters
----------
X : cuDF.DataFrame, cuDF.Series, NumPy array, Pandas DataFrame, Pandas
Series or any cuda_array_interface (CAI) compliant array like CuPy,
Numba or pytorch.
order: 'F', 'C' or 'K' (default: 'F')
Whether to return a F-major ('F'), C-major ('C') array or Keep ('K')
the order of X. Used to check the order of the input. If
fail_on_order=True, the method will raise ValueError,
otherwise it will convert X to be of order `order` if needed.
deepcopy: boolean (default: False)
Set to True to always return a deep copy of X.
check_dtype: np.dtype (default: False)
Set to a np.dtype to throw an error if X is not of dtype `check_dtype`.
convert_to_dtype: np.dtype (default: False)
Set to a dtype if you want X to be converted to that dtype if it is
not that dtype already.
safe_convert_to_dtype: bool (default: True)
Set to True to check whether a typecasting performed when
convert_to_dtype is True will cause information loss. This has a
performance implication that might be significant for very fast
methods like FIL and linear models inference.
check_cols: int (default: False)
Set to an int `i` to check that input X has `i` columns. Set to False
(default) to not check at all.
check_rows: boolean (default: False)
Set to an int `i` to check that input X has `i` columns. Set to False
(default) to not check at all.
fail_on_order: boolean (default: False)
Set to True if you want the method to raise a ValueError if X is not
of order `order`.
force_contiguous: boolean (default: True)
Set to True to force CumlArray produced to be contiguous. If `X` is
non contiguous then a contiguous copy will be done.
If False, and `X` doesn't need to be converted and is not contiguous,
the underlying memory underneath the CumlArray will be non contiguous.
Only affects CAI inputs. Only affects CuPy and Numba device array
views, all other input methods produce contiguous CumlArrays.
Returns
-------
`cuml_array`: namedtuple('cuml_array', 'array n_rows n_cols dtype')
A new CumlArray and associated data.
"""
arr = CumlArray.from_input(
X,
order=order,
deepcopy=deepcopy,
check_dtype=check_dtype,
convert_to_dtype=convert_to_dtype,
check_mem_type=check_mem_type,
convert_to_mem_type=convert_to_mem_type,
safe_dtype_conversion=safe_dtype_conversion,
check_cols=check_cols,
check_rows=check_rows,
fail_on_order=fail_on_order,
force_contiguous=force_contiguous,
)
try:
shape = arr.__cuda_array_interface__["shape"]
except AttributeError:
shape = arr.__array_interface__["shape"]
n_rows = shape[0]
if len(shape) > 1:
n_cols = shape[1]
else:
n_cols = 1
return cuml_array(array=arr, n_rows=n_rows, n_cols=n_cols, dtype=arr.dtype)
@nvtx_annotate(
message="common.input_utils.input_to_cupy_array",
category="utils",
domain="cuml_python",
)
def input_to_cupy_array(
X,
order="F",
deepcopy=False,
check_dtype=False,
convert_to_dtype=False,
check_cols=False,
check_rows=False,
fail_on_order=False,
force_contiguous=True,
fail_on_null=True,
) -> cuml_array:
"""
Identical to input_to_cuml_array but it returns a cupy array instead of
CumlArray
"""
if not fail_on_null:
if isinstance(X, (CudfDataFrame, CudfSeries)):
try:
X = X.values
except ValueError:
X = X.astype("float64", copy=False)
X.fillna(cp.nan, inplace=True)
X = X.values
out_data = input_to_cuml_array(
X,
order=order,
deepcopy=deepcopy,
check_dtype=check_dtype,
convert_to_dtype=convert_to_dtype,
check_cols=check_cols,
check_rows=check_rows,
fail_on_order=fail_on_order,
force_contiguous=force_contiguous,
convert_to_mem_type=MemoryType.device,
)
return out_data._replace(array=out_data.array.to_output("cupy"))
@nvtx_annotate(
message="common.input_utils.input_to_host_array",
category="utils",
domain="cuml_python",
)
def input_to_host_array(
X,
order="F",
deepcopy=False,
check_dtype=False,
convert_to_dtype=False,
check_cols=False,
check_rows=False,
fail_on_order=False,
force_contiguous=True,
fail_on_null=True,
) -> cuml_array:
"""
Identical to input_to_cuml_array but it returns a host (NumPy array instead
of CumlArray
"""
if not fail_on_null and isinstance(X, (CudfDataFrame, CudfSeries)):
try:
X = X.values
except ValueError:
X = X.astype("float64", copy=False)
X.fillna(cp.nan, inplace=True)
X = X.values
out_data = input_to_cuml_array(
X,
order=order,
deepcopy=deepcopy,
check_dtype=check_dtype,
convert_to_dtype=convert_to_dtype,
check_cols=check_cols,
check_rows=check_rows,
fail_on_order=fail_on_order,
force_contiguous=force_contiguous,
convert_to_mem_type=MemoryType.host,
)
return out_data._replace(array=out_data.array.to_output("numpy"))
def input_to_host_array_with_sparse_support(X):
_array_type, is_sparse = determine_array_type_full(X)
if is_sparse:
if _array_type == "cupy":
return SparseCumlArray(X).to_output(output_type="scipy")
elif _array_type == "cuml":
return X.to_output(output_type="scipy")
elif _array_type == "numpy":
return X
else:
raise ValueError(f"Unsupported sparse array type: {_array_type}.")
return input_to_host_array(X).array
def convert_dtype(X, to_dtype=np.float32, legacy=True, safe_dtype=True):
"""
Convert X to be of dtype `dtype`, raising a TypeError
if the conversion would lose information.
"""
if isinstance(X, (DaskCudfSeries, DaskCudfDataFrame)):
# TODO: Warn, but not when using dask_sql
X = X.compute()
if safe_dtype:
cur_dtype = determine_array_dtype(X)
if not global_settings.xpy.can_cast(cur_dtype, to_dtype):
try:
target_dtype_range = global_settings.xpy.iinfo(to_dtype)
except ValueError:
target_dtype_range = global_settings.xpy.finfo(to_dtype)
out_of_range = (
(X < target_dtype_range.min) | (X > target_dtype_range.max)
).any()
try:
out_of_range = out_of_range.any()
except AttributeError:
pass
if out_of_range:
raise TypeError("Data type conversion would lose information.")
try:
if numba_cuda.is_cuda_array(X):
arr = cp.asarray(X, dtype=to_dtype)
if legacy:
return numba_cuda.as_cuda_array(arr)
else:
return CumlArray(data=arr)
except UnavailableError:
pass
try:
return X.astype(to_dtype, copy=False)
except AttributeError:
raise TypeError("Received unsupported input type: %s" % type(X))
def order_to_str(order):
if order == "F":
return "column ('F')"
elif order == "C":
return "row ('C')"
def sparse_scipy_to_cp(sp, dtype):
"""
Convert object of scipy.sparse to
cupyx.scipy.sparse.coo_matrix
"""
coo = sp.tocoo()
values = coo.data
r = cp.asarray(coo.row)
c = cp.asarray(coo.col)
v = cp.asarray(values, dtype=dtype)
return cupyx.scipy.sparse.coo_matrix((v, (r, c)), sp.shape)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/base_helpers.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from inspect import Parameter, signature
import typing
from cuml.internals.api_decorators import (
api_base_return_generic,
api_base_return_array,
api_base_return_sparse_array,
api_base_return_any,
api_return_any,
_deprecate_pos_args,
)
from cuml.internals.array import CumlArray
from cuml.internals.array_sparse import SparseCumlArray
from cuml.internals.base_return_types import _get_base_return_type
from cuml.internals.constants import CUML_WRAPPED_FLAG
def _process_generic(gen_type):
# Check if the type is not a generic. If not, must return "generic" if
# subtype is CumlArray otherwise None
if not isinstance(gen_type, typing._GenericAlias):
if issubclass(gen_type, CumlArray):
return "generic"
# We don't handle SparseCumlArray at this time
if issubclass(gen_type, SparseCumlArray):
raise NotImplementedError(
"Generic return types with SparseCumlArray are not supported "
"at this time"
)
# Otherwise None (keep processing)
return None
# Its a generic type by this point. Support Union, Tuple, Dict and List
supported_gen_types = [
tuple,
dict,
list,
typing.Union,
]
if gen_type.__origin__ in supported_gen_types:
# Check for a CumlArray type in the args
for arg in gen_type.__args__:
inner_type = _process_generic(arg)
if inner_type is not None:
return inner_type
else:
raise NotImplementedError("Unknow generic type: {}".format(gen_type))
return None
def _wrap_attribute(class_name: str, attribute_name: str, attribute, **kwargs):
# Skip items marked with autowrap_ignore
if attribute.__dict__.get(CUML_WRAPPED_FLAG, False):
return attribute
return_type = _get_base_return_type(class_name, attribute)
if return_type == "generic":
attribute = api_base_return_generic(**kwargs)(attribute)
elif return_type == "array":
attribute = api_base_return_array(**kwargs)(attribute)
elif return_type == "sparsearray":
attribute = api_base_return_sparse_array(**kwargs)(attribute)
elif return_type == "base":
attribute = api_base_return_any(**kwargs)(attribute)
elif not attribute_name.startswith("_"):
# Only replace public functions with return any
attribute = api_return_any()(attribute)
return attribute
def _check_and_wrap_init(attribute, **kwargs):
# Check if the decorator has already been added
if attribute.__dict__.get(_deprecate_pos_args.FLAG_NAME):
return attribute
# Get the signature to test if all args are keyword only
sig = signature(attribute)
incorrect_params = [
n
for n, p in sig.parameters.items()
if n != "self"
and (
p.kind == Parameter.POSITIONAL_ONLY
or p.kind == Parameter.POSITIONAL_OR_KEYWORD
)
]
assert len(incorrect_params) == 0, (
"Error in `{}`!. Positional arguments for estimators (that derive "
"from `Base`) have been deprecated but parameters '{}' can still "
"be used as positional arguments. Please specify all parameters "
"after `self` as keyword only by using the `*` argument"
).format(attribute.__qualname__, ", ".join(incorrect_params))
return _deprecate_pos_args(**kwargs)(attribute)
class BaseMetaClass(type):
"""
Metaclass for all estimators in cuML. This metaclass will get called for
estimators deriving from `cuml.common.Base` as well as
`cuml.dask.common.BaseEstimator`. It serves 2 primary functions:
1. Set the `@_deprecate_pos_args()` decorator on all `__init__` functions
2. Wrap any functions and properties in the API decorators
[`cuml.common.Base` only]
"""
def __new__(cls, classname, bases, classDict):
is_dask_module = classDict["__module__"].startswith("cuml.dask")
for attributeName, attribute in classDict.items():
# If attributeName is `__init__`, wrap in the decorator to
# deprecate positional args
if attributeName == "__init__":
attribute = _check_and_wrap_init(attribute, version="21.06")
classDict[attributeName] = attribute
# For now, skip all additional processing if we are a dask
# estimator
if is_dask_module:
continue
# Must be a function
if callable(attribute):
classDict[attributeName] = _wrap_attribute(
classname, attributeName, attribute
)
elif isinstance(attribute, property):
# Need to wrap the getter if it exists
if hasattr(attribute, "fget") and attribute.fget is not None:
classDict[attributeName] = attribute.getter(
_wrap_attribute(
classname,
attributeName,
attribute.fget,
input_arg=None,
)
)
return type.__new__(cls, classname, bases, classDict)
class _tags_class_and_instance:
"""
Decorator for Base class to allow for dynamic and static _get_tags.
In general, most methods are either dynamic or static, so this decorator
is only meant to be used in the Base estimator _get_tags.
"""
def __init__(self, _class, _instance=None):
self._class = _class
self._instance = _instance
def instance_method(self, _instance):
"""
Factory to create a _tags_class_and_instance instance method with
the existing class associated.
"""
return _tags_class_and_instance(self._class, _instance)
def __get__(self, _instance, _class):
# if the caller had no instance (i.e. it was a class) or there is no
# instance associated we the method we return the class call
if _instance is None or self._instance is None:
return self._class.__get__(_class, None)
# otherwise return instance call
return self._instance.__get__(_instance, _class)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/output_type.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
VALID_OUTPUT_TYPES = (
"array",
"numba",
"dataframe",
"series",
"df_obj",
"cupy",
"numpy",
"cudf",
"pandas",
)
INTERNAL_VALID_OUTPUT_TYPES = ("input", *VALID_OUTPUT_TYPES)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources
base.pyx
device_support.pyx
internals.pyx
logger.pyx
)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
MODULE_PREFIX internals_
ASSOCIATED_TARGETS cuml
)
# We need to include for callbacks_implements.h in the internals folder
target_include_directories(internals_internals PRIVATE ${CMAKE_CURRENT_LIST_DIR})
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/device_support.pyx | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
import sklearn # noqa: F401 # no-cython-lint
CPU_ENABLED = True
except ImportError:
CPU_ENABLED = False
IF GPUBUILD == 1:
GPU_ENABLED = True
ELSE:
GPU_ENABLED = False
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/base.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import os
import inspect
import numbers
from importlib import import_module
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import_from,
null_decorator
)
np = cpu_only_import('numpy')
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
try:
from sklearn.utils import estimator_html_repr
except ImportError:
estimator_html_repr = None
import cuml
import cuml.common
import cuml.internals.logger as logger
import cuml.internals
import cuml.internals.input_utils
from cuml.internals.available_devices import is_cuda_available
from cuml.internals.device_type import DeviceType
from cuml.internals.input_utils import (
determine_array_type,
input_to_cuml_array,
input_to_host_array,
is_array_like
)
from cuml.internals.memory_utils import determine_array_memtype
from cuml.internals.mem_type import MemoryType
from cuml.internals.memory_utils import using_memory_type
from cuml.internals.output_type import (
INTERNAL_VALID_OUTPUT_TYPES,
VALID_OUTPUT_TYPES
)
from cuml.internals.array import CumlArray
from cuml.internals.safe_imports import (
gpu_only_import, gpu_only_import_from
)
from cuml.internals.mixins import TagsMixin
cp_ndarray = gpu_only_import_from('cupy', 'ndarray')
cp = gpu_only_import('cupy')
IF GPUBUILD == 1:
import pylibraft.common.handle
import cuml.common.cuda
class Base(TagsMixin,
metaclass=cuml.internals.BaseMetaClass):
"""
Base class for all the ML algos. It handles some of the common operations
across all algos. Every ML algo class exposed at cython level must inherit
from this class.
Typical estimator design using Base requires three main things:
1. Call the base __init__ method explicitly from inheriting estimators in
their __init__.
2. Attributes that users will want to access, and are array-like should
use cuml.internals.array, and have a preceding underscore `_` before
the name the user expects. That way the __getattr__ of Base will
convert it automatically to the appropriate output format for the
user. For example, in DBSCAN the user expects to be able to access
`model.labels_`, so the code actually has an attribute
`model._labels_` that gets converted at the moment the user accesses
`labels_` automatically. No need for extra code in inheriting classes
as long as they follow that naming convention. It is recommended to
create the attributes in the constructor assigned to None, and
add a note for users that might look into the code to see what
attributes the class might have. For example, in KMeans:
.. code-block:: python
def __init__(...)
super(KMeans, self).__init__(handle, verbose, output_type)
# initialize numeric variables
# internal array attributes
self._labels_ = None # accessed via estimator.labels_
self._cluster_centers_ = None # accessed via estimator.cluster_centers_ # noqa
3. To appropriately work for outputs mirroring the format of inputs of the
user when appropriate, the code in the inheriting estimator must call
the following methods, with input being the data sent by the user:
- `self._set_output_type(input)` in `fit` methods that modify internal
structures. This will allow users to receive the correct format when
accessing internal attributes of the class (eg. labels_ in KMeans).:
.. code-block:: python
def fit(self, X):
self._set_output_type(X)
# rest of the fit code
- `out_type = self._get_output_type(input)` in `predict`/`transform` style
methods, that don't modify class attributes. out_type then can be used
to return the correct format to the user. For example, in KMeans:
.. code-block:: python
def transform(self, X, convert_dtype=False):
out_type = self._get_output_type(X)
X_m, n_rows, n_cols, dtype = input_to_cuml_array(X ...)
preds = CumlArray.zeros(...)
# method code and call to C++ and whatever else is needed
return preds.to_output(out_type)
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
output_mem_type : {'host', 'device'}, default=None
Return results with memory of the indicated type and use the
indicated memory type for estimator attributes. If None, the memory
type set at the module level (`cuml.global_settings.memory_type`) will
be used.
Examples
--------
.. code-block:: python
from cuml import Base
# assuming this ML algo has separate 'fit' and 'predict' methods
class MyAlgo(Base):
def __init__(self, ...):
super(MyAlgo, self).__init__(...)
# other setup logic
def fit(self, data, ...):
# check output format
self._check_output_type(data)
# train logic goes here
def predict(self, data, ...):
# check output format
self._check_output_type(data)
# inference logic goes here
def get_param_names(self):
# return a list of hyperparam names supported by this algo
# stream and handle example:
stream = cuml.common.cuda.Stream()
handle = pylibraft.common.Handle(stream=stream)
algo = MyAlgo(handle=handle)
algo.fit(...)
result = algo.predict(...)
# final sync of all gpu-work launched inside this object
# this is same as `cuml.cuda.Stream.sync()` call, but safer in case
# the default stream inside the `raft::handle_t` is being used
base.handle.sync()
del base # optional!
"""
def __init__(self, *,
handle=None,
verbose=False,
output_type=None,
output_mem_type=None):
"""
Constructor. All children must call init method of this base class.
"""
IF GPUBUILD == 1:
self.handle = pylibraft.common.handle.Handle() if handle is None \
else handle
ELSE:
self.handle = None
IF GPUBUILD == 1:
# Internally, self.verbose follows the spdlog/c++ standard of
# 0 is most logging, and logging decreases from there.
# So if the user passes an int value for logging, we convert it.
if verbose is True:
self.verbose = logger.level_debug
elif verbose is False:
self.verbose = logger.level_info
else:
self.verbose = verbose
ELSE:
self.verbose = verbose
self.output_type = _check_output_type_str(
cuml.global_settings.output_type
if output_type is None else output_type)
if output_mem_type is None:
self.output_mem_type = cuml.global_settings.memory_type
else:
self.output_mem_type = MemoryType.from_str(output_mem_type)
self._input_type = None
self._input_mem_type = None
self.target_dtype = None
self.n_features_in_ = None
nvtx_benchmark = os.getenv('NVTX_BENCHMARK')
if nvtx_benchmark and nvtx_benchmark.lower() == 'true':
self.set_nvtx_annotations()
def __repr__(self):
"""
Pretty prints the arguments of a class using Scikit-learn standard :)
"""
cdef list signature = inspect.getfullargspec(self.__init__).args
if len(signature) > 0 and signature[0] == 'self':
del signature[0]
cdef dict state = self.__dict__
cdef str string = self.__class__.__name__ + '('
cdef str key
for key in signature:
if key not in state:
continue
if type(state[key]) is str:
string += "{}='{}', ".format(key, state[key])
else:
if hasattr(state[key], "__str__"):
string += "{}={}, ".format(key, state[key])
string = string.rstrip(', ')
output = string + ')'
if hasattr(self, 'sk_model_'):
output += ' <sk_model_ attribute used>'
return output
def get_param_names(self):
"""
Returns a list of hyperparameter names owned by this class. It is
expected that every child class overrides this method and appends its
extra set of parameters that it in-turn owns. This is to simplify the
implementation of `get_params` and `set_params` methods.
"""
return ["handle", "verbose", "output_type"]
def get_params(self, deep=True):
"""
Returns a dict of all params owned by this class. If the child class
has appropriately overridden the `get_param_names` method and does not
need anything other than what is there in this method, then it doesn't
have to override this method
"""
params = dict()
variables = self.get_param_names()
for key in variables:
var_value = getattr(self, key, None)
params[key] = var_value
return params
def set_params(self, **params):
"""
Accepts a dict of params and updates the corresponding ones owned by
this class. If the child class has appropriately overridden the
`get_param_names` method and does not need anything other than what is,
there in this method, then it doesn't have to override this method
"""
if not params:
return self
variables = self.get_param_names()
for key, value in params.items():
if key not in variables:
raise ValueError("Bad param '%s' passed to set_params" % key)
else:
setattr(self, key, value)
return self
def __getstate__(self):
# getstate and setstate are needed to tell pickle to treat this
# as regular python classes instead of triggering __getattr__
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __getattr__(self, attr):
"""
Redirects to `solver_model` if the attribute exists.
"""
if attr == "solver_model":
return self.__dict__['solver_model']
if "solver_model" in self.__dict__.keys():
return getattr(self.solver_model, attr)
else:
raise AttributeError(attr)
def _set_base_attributes(self,
output_type=None,
target_dtype=None,
n_features=None):
"""
Method to set the base class attributes - output type,
target dtype and n_features. It combines the three different
function calls. It's called in fit function from estimators.
Parameters
--------
output_type : DataFrame (default = None)
Is output_type is passed, aets the output_type on the
dataframe passed
target_dtype : Target column (default = None)
If target_dtype is passed, we call _set_target_dtype
on it
n_features: int or DataFrame (default=None)
If an int is passed, we set it to the number passed
If dataframe, we set it based on the passed df.
Examples
--------
.. code-block:: python
# To set output_type and n_features based on X
self._set_base_attributes(output_type=X, n_features=X)
# To set output_type on X and n_features to 10
self._set_base_attributes(output_type=X, n_features=10)
# To only set target_dtype
self._set_base_attributes(output_type=X, target_dtype=y)
"""
if output_type is not None:
self._set_output_type(output_type)
self._set_output_mem_type(output_type)
if target_dtype is not None:
self._set_target_dtype(target_dtype)
if n_features is not None:
self._set_n_features_in(n_features)
def _set_output_type(self, inp):
self._input_type = determine_array_type(inp)
def _set_output_mem_type(self, inp):
self._input_mem_type = determine_array_memtype(
inp
)
def _get_output_type(self, inp):
"""
Method to be called by predict/transform methods of inheriting classes.
Returns the appropriate output type depending on the type of the input,
class output type and global output type.
"""
# Default to the global type
output_type = cuml.global_settings.output_type
# If its None, default to our type
if (output_type is None or output_type == "mirror"):
output_type = self.output_type
# If we are input, get the type from the input
if output_type == 'input':
output_type = determine_array_type(inp)
return output_type
def _get_output_mem_type(self, inp):
"""
Method to be called by predict/transform methods of inheriting classes.
Returns the appropriate memory type depending on the type of the input,
class output type and global output type.
"""
# Default to the global type
mem_type = cuml.global_settings.memory_type
# If we are input, get the type from the input
if cuml.global_settings.output_type == 'input':
mem_type = determine_array_memtype(inp)
return mem_type
def _set_target_dtype(self, target):
self.target_dtype = cuml.internals.input_utils.determine_array_dtype(
target)
def _get_target_dtype(self):
"""
Method to be called by predict/transform methods of
inheriting classifier classes. Returns the appropriate output
dtype depending on the dtype of the target.
"""
try:
out_dtype = self.target_dtype
except AttributeError:
out_dtype = None
return out_dtype
def _set_n_features_in(self, X):
if isinstance(X, int):
self.n_features_in_ = X
else:
self.n_features_in_ = X.shape[1]
def _more_tags(self):
# 'preserves_dtype' tag's Scikit definition currently only applies to
# transformers and whether the transform method conserves the dtype
# (in that case returns an empty list, otherwise the dtype it
# casts to).
# By default, our transform methods convert to self.dtype, but
# we need to check whether the tag has been defined already.
if hasattr(self, 'transform') and hasattr(self, 'dtype'):
return {'preserves_dtype': [self.dtype]}
return {}
def _repr_mimebundle_(self, **kwargs):
"""Prepare representations used by jupyter kernels to display estimator"""
if estimator_html_repr is not None:
output = {"text/plain": repr(self)}
output["text/html"] = estimator_html_repr(self)
return output
def set_nvtx_annotations(self):
for func_name in ['fit', 'transform', 'predict', 'fit_transform',
'fit_predict']:
if hasattr(self, func_name):
msg = '{class_name}.{func_name} [{addr}]'
msg = msg.format(class_name=self.__class__.__module__,
func_name=func_name,
addr=hex(id(self)))
msg = msg[5:] # remove cuml.
func = getattr(self, func_name)
func = nvtx_annotate(message=msg, domain="cuml_python")(func)
setattr(self, func_name, func)
# Internal, non class owned helper functions
def _check_output_type_str(output_str):
if (output_str is None):
return "input"
assert output_str != "mirror", \
("Cannot pass output_type='mirror' in Base.__init__(). Did you forget "
"to pass `output_type=self.output_type` to a child estimator? "
"Currently `cuml.global_settings.output_type==`{}`"
).format(cuml.global_settings.output_type)
if isinstance(output_str, str):
output_type = output_str.lower()
# Check for valid output types + "input"
if output_type in INTERNAL_VALID_OUTPUT_TYPES:
# Return the original version if nothing has changed, otherwise
# return the lowered. This is to try and keep references the same
# to support sklearn.base.clone() where possible
return output_str if output_type == output_str else output_type
valid_output_types_str = ', '.join(
[f"'{x}'" for x in VALID_OUTPUT_TYPES]
)
raise ValueError(
f'output_type must be one of {valid_output_types_str}'
f' Got: {output_str}'
)
def _determine_stateless_output_type(output_type, input_obj):
"""
This function determines the output type using the same steps that are
performed in `cuml.common.base.Base`. This can be used to mimic the
functionality in `Base` for stateless functions or objects that do not
derive from `Base`.
"""
# Default to the global type if not specified, otherwise, check the
# output_type string
temp_output = cuml.global_settings.output_type if output_type is None \
else _check_output_type_str(output_type)
# If we are using 'input', determine the the type from the input object
if temp_output == 'input':
temp_output = determine_array_type(input_obj)
return temp_output
class UniversalBase(Base):
def import_cpu_model(self):
# skip the CPU estimator has been imported already
if hasattr(self, '_cpu_model_class'):
return
if hasattr(self, '_cpu_estimator_import_path'):
# if import path differs from the one of sklearn
# look for _cpu_estimator_import_path
estimator_path = self._cpu_estimator_import_path.split('.')
model_path = '.'.join(estimator_path[:-1])
model_name = estimator_path[-1]
else:
# import from similar path to the current estimator
# class
model_path = 'sklearn' + self.__class__.__module__[4:]
model_name = self.__class__.__name__
self._cpu_model_class = getattr(import_module(model_path), model_name)
# Save list of available CPU estimator hyperparameters
self._cpu_hyperparams = list(
inspect.signature(self._cpu_model_class.__init__).parameters.keys()
)
def build_cpu_model(self):
if hasattr(self, '_cpu_model'):
return
filtered_kwargs = {}
for keyword, arg in self._full_kwargs.items():
if keyword in self._cpu_hyperparams:
filtered_kwargs[keyword] = arg
else:
logger.info("Unused keyword parameter: {} "
"during CPU estimator "
"initialization".format(keyword))
# initialize model
self._cpu_model = self._cpu_model_class(**filtered_kwargs)
def gpu_to_cpu(self):
# transfer attributes from GPU to CPU estimator
for attr in self.get_attr_names():
# check presence of attribute
if hasattr(self, attr) or \
isinstance(getattr(type(self), attr, None), property):
# get the cuml attribute
if hasattr(self, attr):
cu_attr = getattr(self, attr)
else:
cu_attr = getattr(type(self), attr).fget(self)
# if the cuml attribute is a CumlArrayDescriptorMeta
if hasattr(cu_attr, 'get_input_value'):
# extract the actual value from the
# CumlArrayDescriptorMeta
cu_attr_value = cu_attr.get_input_value()
# check if descriptor is empty
if cu_attr_value is not None:
if cu_attr.input_type == 'cuml':
# transform cumlArray to numpy and set it
# as an attribute in the CPU estimator
setattr(self._cpu_model, attr,
cu_attr_value.to_output('numpy'))
else:
# transfer all other types of attributes
# directly
setattr(self._cpu_model, attr,
cu_attr_value)
elif isinstance(cu_attr, CumlArray):
# transform cumlArray to numpy and set it
# as an attribute in the CPU estimator
setattr(self._cpu_model, attr,
cu_attr.to_output('numpy'))
elif isinstance(cu_attr, cp_ndarray):
# transform cupy to numpy and set it
# as an attribute in the CPU estimator
setattr(self._cpu_model, attr,
cp.asnumpy(cu_attr))
else:
# transfer all other types of attributes directly
setattr(self._cpu_model, attr, cu_attr)
def cpu_to_gpu(self):
# transfer attributes from CPU to GPU estimator
with using_memory_type(
(MemoryType.host, MemoryType.device)[
is_cuda_available()
]
):
for attr in self.get_attr_names():
# check presence of attribute
if hasattr(self._cpu_model, attr) or \
isinstance(getattr(type(self._cpu_model),
attr, None), property):
# get the cpu attribute
if hasattr(self._cpu_model, attr):
cpu_attr = getattr(self._cpu_model, attr)
else:
cpu_attr = getattr(type(self._cpu_model),
attr).fget(self._cpu_model)
# if the cpu attribute is an array
if isinstance(cpu_attr, np.ndarray):
# get data order wished for by
# CumlArrayDescriptor
if hasattr(self, attr + '_order'):
order = getattr(self, attr + '_order')
else:
order = 'K'
# transfer array to gpu and set it as a cuml
# attribute
cuml_array = input_to_cuml_array(
cpu_attr,
order=order,
convert_to_mem_type=(
MemoryType.host,
MemoryType.device
)[is_cuda_available()]
)[0]
setattr(self, attr, cuml_array)
else:
# transfer all other types of attributes
# directly
setattr(self, attr, cpu_attr)
def args_to_cpu(self, *args, **kwargs):
# put all the args on host
new_args = tuple(input_to_host_array(arg)[0] for arg in args)
# put all the kwargs on host
new_kwargs = dict()
for kw, arg in kwargs.items():
# if array-like, ensure array-like is on the host
if is_array_like(arg):
new_kwargs[kw] = input_to_host_array(arg)[0]
# if Real or string, pass as is
elif isinstance(arg, (numbers.Real, str)):
new_kwargs[kw] = arg
else:
raise ValueError(f"Unable to process argument {kw}")
return new_args, new_kwargs
def dispatch_func(self, func_name, gpu_func, *args, **kwargs):
"""
This function will dispatch calls to training and inference according
to the global configuration. It should work for all estimators
sufficiently close the scikit-learn implementation as it uses
it for training and inferences on host.
Parameters
----------
func_name : string
name of the function to be dispatched
gpu_func : function
original cuML function
args : arguments
arguments to be passed to the function for the call
kwargs : keyword arguments
keyword arguments to be passed to the function for the call
"""
# look for current device_type
device_type = cuml.global_settings.device_type
# GPU case
if device_type == DeviceType.device:
# call the function from the GPU estimator
return gpu_func(self, *args, **kwargs)
# CPU case
elif device_type == DeviceType.host:
# check if a CPU model already exists
if not hasattr(self, '_cpu_model'):
# import CPU estimator from library
self.import_cpu_model()
# create an instance of the estimator
self.build_cpu_model()
# new CPU model + CPU inference
if func_name not in ['fit', 'fit_transform', 'fit_predict']:
# transfer trained attributes from GPU to CPU
self.gpu_to_cpu()
# ensure args and kwargs are on the CPU
args, kwargs = self.args_to_cpu(*args, **kwargs)
# get the function from the GPU estimator
cpu_func = getattr(self._cpu_model, func_name)
# call the function from the GPU estimator
res = cpu_func(*args, **kwargs)
# CPU training
if func_name in ['fit', 'fit_transform', 'fit_predict']:
# mirror input type
self._set_output_type(args[0])
self._set_output_mem_type(args[0])
# transfer trained attributes from CPU to GPU
self.cpu_to_gpu()
# return the cuml estimator when training
if func_name == 'fit':
return self
# return function result
return res
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/array_sparse.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.array import CumlArray
from cuml.internals.global_settings import GlobalSettings
from cuml.internals.mem_type import MemoryType
from cuml.internals.memory_utils import class_with_cupy_rmm
from cuml.internals.logger import debug
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import,
gpu_only_import_from,
null_decorator,
UnavailableError,
)
from collections import namedtuple
cpx_sparse = gpu_only_import("cupyx.scipy.sparse")
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
scipy_sparse = cpu_only_import("scipy.sparse")
sparse_matrix_classes = []
try:
sparse_matrix_classes.append(cpx_sparse.csr_matrix)
except UnavailableError:
pass
try:
sparse_matrix_classes.append(scipy_sparse.csr_matrix)
except UnavailableError:
pass
sparse_matrix_classes = tuple(sparse_matrix_classes)
SparseCumlArrayInput = namedtuple(
"SparseCumlArrayInput",
["indptr", "indices", "data", "nnz", "dtype", "shape"],
)
@class_with_cupy_rmm()
class SparseCumlArray:
"""
SparseCumlArray abstracts sparse array allocations. This will
accept either a Scipy or Cupy sparse array and construct CumlArrays
out of the underlying index and data arrays. Currently, this class
only supports the CSR array format and input in any other sparse
format will be converted to CSR by default. Set `convert_format=False`
to disable automatic conversion to CSR.
Parameters
----------
data : scipy.sparse.spmatrix or cupyx.scipy.sparse.spmatrix
A Scipy or Cupy sparse matrix
convert_to_dtype : data-type or False, optional
Any object that can be interpreted as a numpy or cupy data type.
Specifies whether to convert the data array to a different dtype.
convert_index : data-type or False (default: np.int32), optional
Any object that can be interpreted as a numpy or cupy data type.
Specifies whether to convert the indices to a different dtype. By
default, it is preferred to use 32-bit indexing.
convert_format : bool, optional (default: True)
Specifies whether to convert any non-CSR inputs to CSR. If False,
an exception is thrown.
Attributes
----------
indptr : CumlArray
Compressed row index array
indices : CumlArray
Column indices array
data : CumlArray
Data array
dtype : dtype
Data type of data array
shape : tuple of ints
Shape of the array
nnz : int
Number of nonzeros in underlying arrays
"""
@nvtx_annotate(
message="common.SparseCumlArray.__init__",
category="utils",
domain="cuml_python",
)
def __init__(
self,
data=None,
convert_to_dtype=False,
convert_to_mem_type=None,
convert_index=None,
convert_format=True,
):
if not isinstance(data, SparseCumlArrayInput):
is_sparse = False
try:
is_sparse = cpx_sparse.isspmatrix(data)
from_mem_type = MemoryType.device
except UnavailableError:
pass
if not is_sparse:
try:
is_sparse = scipy_sparse.isspmatrix(data)
from_mem_type = MemoryType.host
except UnavailableError:
pass
if not is_sparse:
raise ValueError(
"A sparse matrix is expected as input. "
"Received %s" % type(data)
)
if not isinstance(data, sparse_matrix_classes):
if convert_format:
debug(
"Received sparse matrix in {} format but CSR is "
"expected. Data will be converted to CSR, but this "
"will require additional memory copies. If this "
"conversion is not desired, set "
"set_convert_format=False to raise an exception "
"instead.".format(type(data))
)
data = data.tocsr() # currently only CSR is supported
else:
raise ValueError(
"Expected CSR matrix but received {}".format(
type(data)
)
)
if not convert_to_dtype:
convert_to_dtype = data.dtype
if convert_to_mem_type:
convert_to_mem_type = MemoryType.from_str(convert_to_mem_type)
else:
convert_to_mem_type = GlobalSettings().memory_type
if convert_to_mem_type is MemoryType.mirror or not convert_to_mem_type:
convert_to_mem_type = from_mem_type
self._mem_type = convert_to_mem_type
if convert_index is None:
convert_index = GlobalSettings().xpy.int32
if not convert_index:
convert_index = data.indptr.dtype
# Note: Only 32-bit indexing is supported currently.
# In CUDA11, Cusparse provides 64-bit function calls
# but these are not yet used in RAFT/Cuml
self.indptr = CumlArray.from_input(
data.indptr,
convert_to_dtype=convert_index,
convert_to_mem_type=convert_to_mem_type,
)
self.indices = CumlArray.from_input(
data.indices,
convert_to_dtype=convert_index,
convert_to_mem_type=convert_to_mem_type,
)
self.data = CumlArray.from_input(
data.data,
convert_to_dtype=convert_to_dtype,
convert_to_mem_type=convert_to_mem_type,
)
self.shape = data.shape
self.dtype = self.data.dtype
self.nnz = data.nnz
self.index = None
@nvtx_annotate(
message="common.SparseCumlArray.to_output",
category="utils",
domain="cuml_python",
)
def to_output(
self,
output_type="cupy",
output_format=None,
output_dtype=None,
output_mem_type=None,
):
"""
Convert array to output format
Parameters
----------
output_type : string
Format to convert the array to. Acceptable formats are:
- 'cupy' - to cupy array
- 'scipy' - to scipy (host) array
- 'numpy' - to scipy (host) array
- 'array' - to cupy or scipy array depending on
output_mem_type
output_format : string, optional { 'coo', 'csc' }
Optionally convert the output to the specified format.
output_dtype : string, optional
Optionally cast the array to a specified dtype, creating
a copy if necessary.
output_mem_type : {'host, 'device'}, optional
Optionally convert array to given memory type. If `output_type`
already indicates a specific memory type, `output_type` takes
precedence. If the memory type is not otherwise indicated, the data
are kept on their current device.
"""
if output_mem_type is None:
output_mem_type = GlobalSettings().memory_type
else:
output_mem_type = MemoryType.from_str(output_mem_type)
# Treat numpy and scipy as the same
if output_type in ("numpy", "scipy"):
if GlobalSettings().memory_type.is_host_accessible:
output_mem_type = GlobalSettings().memory_type
else:
output_mem_type = MemoryType.host
elif output_type == "cupy":
if GlobalSettings().memory_type.is_device_accessible:
output_mem_type = GlobalSettings().memory_type
else:
output_mem_type = MemoryType.device
elif output_mem_type is MemoryType.mirror:
output_mem_type = self.mem_type
data = self.data.to_output(
"array", output_dtype=output_dtype, output_mem_type=output_mem_type
)
indices = self.indices.to_output(
"array", output_mem_type=output_mem_type
)
indptr = self.indptr.to_output(
"array", output_mem_type=output_mem_type
)
if output_type in ("scipy", "numpy"):
constructor = scipy_sparse.csr_matrix
elif output_mem_type.is_device_accessible:
constructor = cpx_sparse.csr_matrix
else:
constructor = scipy_sparse.csr_matrix
ret = constructor(
(data, indices, indptr), dtype=output_dtype, shape=self.shape
)
if output_format is not None:
if output_format == "coo":
ret = ret.tocoo()
elif output_format == "csc":
ret = ret.tocsc()
else:
raise ValueError(
"Output format %s not supported" % output_format
)
return ret
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/api_decorators.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import functools
import inspect
import typing
import warnings
# TODO: Try to resolve circular import that makes this necessary:
from cuml.internals import input_utils as iu
from cuml.internals.api_context_managers import BaseReturnAnyCM
from cuml.internals.api_context_managers import BaseReturnArrayCM
from cuml.internals.api_context_managers import BaseReturnGenericCM
from cuml.internals.api_context_managers import BaseReturnSparseArrayCM
from cuml.internals.api_context_managers import InternalAPIContextBase
from cuml.internals.api_context_managers import ReturnAnyCM
from cuml.internals.api_context_managers import ReturnArrayCM
from cuml.internals.api_context_managers import ReturnGenericCM
from cuml.internals.api_context_managers import ReturnSparseArrayCM
from cuml.internals.api_context_managers import set_api_output_dtype
from cuml.internals.api_context_managers import set_api_output_type
from cuml.internals.constants import CUML_WRAPPED_FLAG
from cuml.internals.global_settings import GlobalSettings
from cuml.internals.memory_utils import using_output_type
from cuml.internals.type_utils import _DecoratorType, wraps_typed
from cuml.internals import logger
def _wrap_once(wrapped, *args, **kwargs):
"""Prevent wrapping functions multiple times."""
setattr(wrapped, CUML_WRAPPED_FLAG, True)
return functools.wraps(wrapped, *args, **kwargs)
def _has_self(sig):
return "self" in sig.parameters and list(sig.parameters)[0] == "self"
def _find_arg(sig, arg_name, default_position):
params = list(sig.parameters)
# Check for default name in input args
if arg_name in sig.parameters:
return arg_name, params.index(arg_name)
# Otherwise use argument in list by position
elif arg_name is ...:
index = int(_has_self(sig)) + default_position
return params[index], index
else:
raise ValueError(f"Unable to find parameter '{arg_name}'.")
def _get_value(args, kwargs, name, index):
"""Determine value for a given set of args, kwargs, name and index."""
try:
return kwargs[name]
except KeyError:
try:
return args[index]
except IndexError:
raise IndexError(
f"Specified arg idx: {index}, and argument name: {name}, "
"were not found in args or kwargs."
)
def _make_decorator_function(
context_manager_cls: InternalAPIContextBase,
process_return=True,
needs_self: bool = False,
**defaults,
) -> typing.Callable[..., _DecoratorType]:
# This function generates a function to be applied as decorator to a
# wrapped function. For example:
#
# a_decorator = _make_decorator_function(...)
#
# ...
#
# @a_decorator(...) # apply decorator where appropriate
# def fit(X, y):
# ...
#
# Note: The decorator function can be partially closed by directly
# providing keyword arguments to this function to be used as defaults.
def decorator_function(
input_arg: str = ...,
target_arg: str = ...,
get_output_type: bool = False,
set_output_type: bool = False,
get_output_dtype: bool = False,
set_output_dtype: bool = False,
set_n_features_in: bool = False,
) -> _DecoratorType:
def decorator_closure(func):
# This function constitutes the closed decorator that will return
# the wrapped function. It performs function introspection at
# function definition time. The code within the wrapper function is
# executed at function execution time.
# Prepare arguments
sig = inspect.signature(func, follow_wrapped=True)
has_self = _has_self(sig)
if needs_self and not has_self:
raise Exception("No self found on function!")
if input_arg is not None and (
set_output_type
or set_output_dtype
or set_n_features_in
or get_output_type
):
input_arg_ = _find_arg(sig, input_arg or "X", 0)
else:
input_arg_ = None
if set_output_dtype or (get_output_dtype and not has_self):
target_arg_ = _find_arg(sig, target_arg or "y", 1)
else:
target_arg_ = None
@_wrap_once(func)
def wrapper(*args, **kwargs):
# Wraps the decorated function, executed at runtime.
with context_manager_cls(func, args) as cm:
self_val = args[0] if has_self else None
if input_arg_:
input_val = _get_value(args, kwargs, *input_arg_)
else:
input_val = None
if target_arg_:
target_val = _get_value(args, kwargs, *target_arg_)
else:
target_val = None
if set_output_type:
assert self_val is not None
self_val._set_output_type(input_val)
if set_output_dtype:
assert self_val is not None
self_val._set_target_dtype(target_val)
if set_n_features_in and len(input_val.shape) >= 2:
assert self_val is not None
self_val._set_n_features_in(input_val)
if get_output_type:
if self_val is None:
assert input_val is not None
out_type = iu.determine_array_type(input_val)
elif input_val is None:
out_type = self_val.output_type
if out_type == "input":
out_type = self_val._input_type
else:
out_type = self_val._get_output_type(input_val)
set_api_output_type(out_type)
if get_output_dtype:
if self_val is None:
assert target_val is not None
output_dtype = iu.determine_array_dtype(target_val)
else:
output_dtype = self_val._get_target_dtype()
set_api_output_dtype(output_dtype)
if process_return:
ret = func(*args, **kwargs)
else:
return func(*args, **kwargs)
return cm.process_return(ret)
return wrapper
return decorator_closure
return functools.partial(decorator_function, **defaults)
api_return_any = _make_decorator_function(ReturnAnyCM, process_return=False)
api_base_return_any = _make_decorator_function(
BaseReturnAnyCM,
needs_self=True,
set_output_type=True,
set_n_features_in=True,
)
api_return_array = _make_decorator_function(ReturnArrayCM, process_return=True)
api_base_return_array = _make_decorator_function(
BaseReturnArrayCM,
needs_self=True,
process_return=True,
get_output_type=True,
)
api_return_generic = _make_decorator_function(
ReturnGenericCM, process_return=True
)
api_base_return_generic = _make_decorator_function(
BaseReturnGenericCM,
needs_self=True,
process_return=True,
get_output_type=True,
)
api_base_fit_transform = _make_decorator_function(
# TODO: add tests for this decorator(
BaseReturnArrayCM,
needs_self=True,
process_return=True,
get_output_type=True,
set_output_type=True,
set_n_features_in=True,
)
api_return_sparse_array = _make_decorator_function(
ReturnSparseArrayCM, process_return=True
)
api_base_return_sparse_array = _make_decorator_function(
BaseReturnSparseArrayCM,
needs_self=True,
process_return=True,
get_output_type=True,
)
api_base_return_any_skipall = api_base_return_any(
set_output_type=False, set_n_features_in=False
)
api_base_return_array_skipall = api_base_return_array(get_output_type=False)
api_base_return_generic_skipall = api_base_return_generic(
get_output_type=False
)
@contextlib.contextmanager
def exit_internal_api():
assert GlobalSettings().root_cm is not None
try:
old_root_cm = GlobalSettings().root_cm
GlobalSettings().root_cm = None
# Set the global output type to the previous value to pretend we never
# entered the API
with using_output_type(old_root_cm.prev_output_type):
yield
finally:
GlobalSettings().root_cm = old_root_cm
def mirror_args(
wrapped: _DecoratorType,
assigned=("__doc__", "__annotations__"),
updated=functools.WRAPPER_UPDATES,
) -> typing.Callable[[_DecoratorType], _DecoratorType]:
return _wrap_once(wrapped=wrapped, assigned=assigned, updated=updated)
class _deprecate_pos_args:
"""
Decorator that issues a warning when using positional args that should be
keyword args. Mimics sklearn's `_deprecate_positional_args` with added
functionality.
For any class that derives from `cuml.Base`, this decorator will be
automatically added to `__init__`. In this scenario, its assumed that all
arguments are keyword arguments. To override the functionality this
decorator can be manually added, allowing positional arguments if
necessary.
Parameters
----------
version : str
This version will be specified in the warning message as the
version when positional arguments will be removed
"""
FLAG_NAME: typing.ClassVar[str] = "__cuml_deprecated_pos"
def __init__(self, version: str):
self._version = version
def __call__(self, func: _DecoratorType) -> _DecoratorType:
sig = inspect.signature(func)
kwonly_args = []
all_args = []
# Store all the positional and keyword only args
for name, param in sig.parameters.items():
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps_typed(func)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args > 0:
# ignore first 'self' argument for instance methods
args_msg = [
"{}={}".format(name, arg)
for name, arg in zip(
kwonly_args[:extra_args], args[-extra_args:]
)
]
warnings.warn(
"Pass {} as keyword args. From version {}, "
"passing these as positional arguments will "
"result in an error".format(
", ".join(args_msg), self._version
),
FutureWarning,
stacklevel=2,
)
# Convert all positional args to keyword
kwargs.update({k: arg for k, arg in zip(sig.parameters, args)})
return func(**kwargs)
# Set this flag to prevent auto adding this decorator twice
inner_f.__dict__[_deprecate_pos_args.FLAG_NAME] = True
return inner_f
def device_interop_preparation(init_func):
"""
This function serves as a decorator for cuML estimators that implement
the CPU/GPU interoperability feature. It processes the estimator's
hyperparameters by saving them and filtering them for GPU execution.
"""
@functools.wraps(init_func)
def processor(self, *args, **kwargs):
# if child class is already prepared for interop, skip
if hasattr(self, "_full_kwargs"):
return init_func(self, *args, **kwargs)
# Save all kwargs
self._full_kwargs = kwargs
# Generate list of available cuML hyperparameters
gpu_hyperparams = list(inspect.signature(init_func).parameters.keys())
# Filter provided parameters for cuML estimator initialization
filtered_kwargs = {}
for keyword, arg in self._full_kwargs.items():
if keyword in gpu_hyperparams:
filtered_kwargs[keyword] = arg
else:
logger.info(
"Unused keyword parameter: {} "
"during cuML estimator "
"initialization".format(keyword)
)
return init_func(self, *args, **filtered_kwargs)
return processor
def enable_device_interop(gpu_func):
@functools.wraps(gpu_func)
def dispatch(self, *args, **kwargs):
# check that the estimator implements CPU/GPU interoperability
if hasattr(self, "dispatch_func"):
func_name = gpu_func.__name__
return self.dispatch_func(func_name, gpu_func, *args, **kwargs)
else:
return gpu_func(self, *args, **kwargs)
return dispatch
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/memory_utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import operator
import re
from functools import wraps
from cuml.internals.global_settings import GlobalSettings
from cuml.internals.device_support import GPU_ENABLED
from cuml.internals.mem_type import MemoryType
from cuml.internals.output_type import (
INTERNAL_VALID_OUTPUT_TYPES,
VALID_OUTPUT_TYPES,
)
from cuml.internals.safe_imports import (
cpu_only_import_from,
gpu_only_import_from,
UnavailableNullContext,
)
CudfSeries = gpu_only_import_from("cudf", "Series")
CudfDataFrame = gpu_only_import_from("cudf", "DataFrame")
cupy_using_allocator = gpu_only_import_from(
"cupy.cuda", "using_allocator", alt=UnavailableNullContext
)
PandasSeries = cpu_only_import_from("pandas", "Series")
PandasDataFrame = cpu_only_import_from("pandas", "DataFrame")
rmm_cupy_allocator = gpu_only_import_from(
"rmm.allocators.cupy", "rmm_cupy_allocator"
)
def set_global_memory_type(memory_type):
GlobalSettings().memory_type = MemoryType.from_str(memory_type)
class using_memory_type:
def __init__(self, mem_type):
self.mem_type = mem_type
self.prev_mem_type = None
def __enter__(self):
self.prev_mem_type = GlobalSettings().memory_type
set_global_memory_type(self.mem_type)
def __exit__(self, *_):
set_global_memory_type(self.prev_mem_type)
def with_cupy_rmm(func):
"""
Decorator to call CuPy functions with RMM memory management. Use it
to decorate any function that will call CuPy functions. This will ensure
that those calls use RMM for memory allocation instead of the default
CuPy pool. Example:
.. code-block:: python
@with_cupy_rmm
def fx(...):
a = cp.arange(10) # uses RMM for allocation
"""
if func.__dict__.get("__cuml_rmm_wrapped", False):
return func
@wraps(func)
def cupy_rmm_wrapper(*args, **kwargs):
if GPU_ENABLED:
with cupy_using_allocator(rmm_cupy_allocator):
return func(*args, **kwargs)
return func(*args, **kwargs)
# Mark the function as already wrapped
cupy_rmm_wrapper.__dict__["__cuml_rmm_wrapped"] = True
return cupy_rmm_wrapper
def class_with_cupy_rmm(
skip_init=False,
skip_private=True,
skip_dunder=True,
ignore_pattern: list = [],
):
regex_list = ignore_pattern
if skip_private:
# Match private but not dunder
regex_list.append(r"^_(?!(_))\w+$")
if skip_dunder:
if not skip_init:
# Make sure to not match __init__
regex_list.append(r"^__(?!(init))\w+__$")
else:
# Match all dunder
regex_list.append(r"^__\w+__$")
elif skip_init:
regex_list.append(r"^__init__$")
final_regex = "(?:%s)" % "|".join(regex_list)
def inner(klass):
for attributeName, attribute in klass.__dict__.items():
# Skip patters that dont match
if re.match(final_regex, attributeName):
continue
if callable(attribute):
# Passed the ignore patters. Wrap the function (will do nothing
# if already wrapped)
setattr(klass, attributeName, with_cupy_rmm(attribute))
# Class/Static methods work differently since they are descriptors
# (and not callable). Instead unwrap the function, and rewrap it
elif isinstance(attribute, classmethod):
unwrapped = attribute.__func__
setattr(
klass, attributeName, classmethod(with_cupy_rmm(unwrapped))
)
elif isinstance(attribute, staticmethod):
unwrapped = attribute.__func__
setattr(
klass,
attributeName,
staticmethod(with_cupy_rmm(unwrapped)),
)
return klass
return inner
def rmm_cupy_ary(cupy_fn, *args, **kwargs):
"""
Function to call CuPy functions with RMM memory management
Parameters
----------
cupy_fn : cupy function,
CuPy function to execute, for example cp.array
*args :
Non keyword arguments to pass to the CuPy function
**kwargs :
Keyword named arguments to pass to the CuPy function
.. note:: this function should be used if the result of cupy_fn creates
a new array. Functions to create a new CuPy array by reference to
existing device array (through __cuda_array_interface__) can be used
directly.
Examples
--------
>>> from cuml.common import rmm_cupy_ary
>>> import cupy as cp
>>>
>>> # Get a new array filled with 0, column major
>>> a = rmm_cupy_ary(cp.zeros, 5, order='F')
>>> a
array([0., 0., 0., 0., 0.])
"""
if GPU_ENABLED:
with cupy_using_allocator(rmm_cupy_allocator):
result = cupy_fn(*args, **kwargs)
else:
result = cupy_fn(*args, **kwargs)
return result
def _get_size_from_shape(shape, dtype):
"""
Calculates size based on shape and dtype, returns (None, None) if either
shape or dtype are None
"""
if shape is None or dtype is None:
return (None, None)
itemsize = GlobalSettings().xpy.dtype(dtype).itemsize
if isinstance(shape, int):
size = itemsize * shape
shape = (shape,)
elif isinstance(shape, tuple):
size = functools.reduce(operator.mul, shape)
size = size * itemsize
else:
raise ValueError("Shape must be int or tuple of ints.")
return (size, shape)
def set_global_output_type(output_type):
"""
Method to set cuML's single GPU estimators global output type.
It will be used by all estimators unless overridden in their initialization
with their own output_type parameter. Can also be overridden by the context
manager method :func:`using_output_type`.
Parameters
----------
output_type : {'input', 'cudf', 'cupy', 'numpy'} (default = 'input')
Desired output type of results and attributes of the estimators.
* ``'input'`` will mean that the parameters and methods will mirror the
format of the data sent to the estimators/methods as much as
possible. Specifically:
+---------------------------------------+--------------------------+
| Input type | Output type |
+=======================================+==========================+
| cuDF DataFrame or Series | cuDF DataFrame or Series |
+---------------------------------------+--------------------------+
| NumPy arrays | NumPy arrays |
+---------------------------------------+--------------------------+
| Pandas DataFrame or Series | NumPy arrays |
+---------------------------------------+--------------------------+
| Numba device arrays | Numba device arrays |
+---------------------------------------+--------------------------+
| CuPy arrays | CuPy arrays |
+---------------------------------------+--------------------------+
| Other `__cuda_array_interface__` objs | CuPy arrays |
+---------------------------------------+--------------------------+
* ``'cudf'`` will return cuDF Series for single dimensional results and
DataFrames for the rest.
* ``'cupy'`` will return CuPy arrays.
* ``'numpy'`` will return NumPy arrays.
Examples
--------
>>> import cuml
>>> import cupy as cp
>>> ary = [[1.0, 4.0, 4.0], [2.0, 2.0, 2.0], [5.0, 1.0, 1.0]]
>>> ary = cp.asarray(ary)
>>> prev_output_type = cuml.global_settings.output_type
>>> cuml.set_global_output_type('cudf')
>>> dbscan_float = cuml.DBSCAN(eps=1.0, min_samples=1)
>>> dbscan_float.fit(ary)
DBSCAN()
>>>
>>> # cuML output type
>>> dbscan_float.labels_
0 0
1 1
2 2
dtype: int32
>>> type(dbscan_float.labels_)
<class 'cudf.core.series.Series'>
>>> cuml.set_global_output_type(prev_output_type)
Notes
-----
``'cupy'`` and ``'numba'`` options (as well as ``'input'`` when using Numba
and CuPy ndarrays for input) have the least overhead. cuDF add memory
consumption and processing time needed to build the Series and DataFrames.
``'numpy'`` has the biggest overhead due to the need to transfer data to
CPU memory.
"""
if isinstance(output_type, str):
output_type = output_type.lower()
# Check for allowed types. Allow 'cuml' to support internal estimators
if (
output_type is not None
and output_type != "cuml"
and output_type not in INTERNAL_VALID_OUTPUT_TYPES
):
valid_output_types_str = ", ".join(
[f"'{x}'" for x in VALID_OUTPUT_TYPES]
)
raise ValueError(
f"output_type must be one of {valid_output_types_str}"
f" or None. Got: {output_type}"
)
GlobalSettings().output_type = output_type
class using_output_type:
"""
Context manager method to set cuML's global output type inside a `with`
statement. It gets reset to the prior value it had once the `with` code
block is executer.
Parameters
----------
output_type : {'input', 'cudf', 'cupy', 'numpy'} (default = 'input')
Desired output type of results and attributes of the estimators.
* ``'input'`` will mean that the parameters and methods will mirror the
format of the data sent to the estimators/methods as much as
possible. Specifically:
+---------------------------------------+--------------------------+
| Input type | Output type |
+=======================================+==========================+
| cuDF DataFrame or Series | cuDF DataFrame or Series |
+---------------------------------------+--------------------------+
| NumPy arrays | NumPy arrays |
+---------------------------------------+--------------------------+
| Pandas DataFrame or Series | NumPy arrays |
+---------------------------------------+--------------------------+
| Numba device arrays | Numba device arrays |
+---------------------------------------+--------------------------+
| CuPy arrays | CuPy arrays |
+---------------------------------------+--------------------------+
| Other `__cuda_array_interface__` objs | CuPy arrays |
+---------------------------------------+--------------------------+
* ``'cudf'`` will return cuDF Series for single dimensional results and
DataFrames for the rest.
* ``'cupy'`` will return CuPy arrays.
* ``'numpy'`` will return NumPy arrays.
Examples
--------
>>> import cuml
>>> import cupy as cp
>>> ary = [[1.0, 4.0, 4.0], [2.0, 2.0, 2.0], [5.0, 1.0, 1.0]]
>>> ary = cp.asarray(ary)
>>> with cuml.using_output_type('cudf'):
... dbscan_float = cuml.DBSCAN(eps=1.0, min_samples=1)
... dbscan_float.fit(ary)
...
... print("cuML output inside 'with' context")
... print(dbscan_float.labels_)
... print(type(dbscan_float.labels_))
...
DBSCAN()
cuML output inside 'with' context
0 0
1 1
2 2
dtype: int32
<class 'cudf.core.series.Series'>
>>> # use cuml again outside the context manager
>>> dbscan_float2 = cuml.DBSCAN(eps=1.0, min_samples=1)
>>> dbscan_float2.fit(ary)
DBSCAN()
>>> # cuML default output
>>> dbscan_float2.labels_
array([0, 1, 2], dtype=int32)
>>> isinstance(dbscan_float2.labels_, cp.ndarray)
True
"""
def __init__(self, output_type):
self.output_type = output_type
def __enter__(self):
self.prev_output_type = GlobalSettings().output_type
set_global_output_type(self.output_type)
return self.prev_output_type
def __exit__(self, *_):
GlobalSettings().output_type = self.prev_output_type
def determine_array_memtype(X):
try:
return X.mem_type
except AttributeError:
pass
if hasattr(X, "__cuda_array_interface__"):
return MemoryType.device
if hasattr(X, "__array_interface__"):
return MemoryType.host
if isinstance(X, (CudfDataFrame, CudfSeries)):
return MemoryType.device
if isinstance(X, (PandasDataFrame, PandasSeries)):
return MemoryType.host
return None
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.