repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_train_test_split.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.datasets import make_classification
from cuml.model_selection import train_test_split
import pytest
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
cuda = gpu_only_import_from("numba", "cuda")
test_array_input_types = ["numba", "cupy"]
test_seeds = ["int", "cupy", "numpy"]
@pytest.mark.parametrize("train_size", [0.2, 0.6, 0.8])
@pytest.mark.parametrize("shuffle", [True, False])
def test_split_dataframe(train_size, shuffle):
X = cudf.DataFrame({"x": range(100)})
y = cudf.Series(([0] * (100 // 2)) + ([1] * (100 // 2)))
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_size, shuffle=shuffle
)
assert len(X_train) == len(y_train) == pytest.approx(train_size * len(X))
assert (
len(X_test) == len(y_test) == pytest.approx((1 - train_size) * len(X))
)
assert all(X_train.index.to_pandas() == y_train.index.to_pandas())
assert all(X_test.index.to_pandas() == y_test.index.to_pandas())
X_reconstructed = cudf.concat([X_train, X_test]).sort_values(by=["x"])
y_reconstructed = y_train.append(y_test).sort_values()
assert all(X_reconstructed.reset_index(drop=True) == X)
out = y_reconstructed.reset_index(drop=True).values_host == y.values_host
assert all(out)
@pytest.mark.parametrize("y_type", ["cudf", "cupy"])
def test_split_dataframe_array(y_type):
X = cudf.DataFrame({"x": range(100)})
y = cudf.Series(([0] * (100 // 2)) + ([1] * (100 // 2)))
if y_type == "cupy":
X_train, X_test, y_train, y_test = train_test_split(X, y.values)
assert isinstance(X_train, cudf.DataFrame)
assert isinstance(X_test, cudf.DataFrame)
assert isinstance(y_train, cp.ndarray)
assert isinstance(y_test, cp.ndarray)
elif y_type == "cudf":
X_train, X_test, y_train, y_test = train_test_split(X, y)
assert isinstance(X_train, cudf.DataFrame)
assert isinstance(X_test, cudf.DataFrame)
assert isinstance(y_train, cudf.Series)
assert isinstance(y_test, cudf.Series)
def test_split_column():
data = cudf.DataFrame(
{
"x": range(100),
"y": ([0] * (100 // 2)) + ([1] * (100 // 2)),
}
)
train_size = 0.8
X_train, X_test, y_train, y_test = train_test_split(
data, "y", train_size=train_size
)
assert (
len(X_train) == len(y_train) == pytest.approx(train_size * len(data))
)
assert (
len(X_test)
== len(y_test)
== pytest.approx((1 - train_size) * len(data))
)
X_reconstructed = cudf.concat([X_train, X_test]).sort_values(by=["x"])
y_reconstructed = y_train.append(y_test).sort_values()
assert all(
data
== X_reconstructed.assign(y=y_reconstructed).reset_index(drop=True)
)
def test_split_size_mismatch():
X = cudf.DataFrame({"x": range(3)})
y = cudf.Series([0, 1])
with pytest.raises(ValueError):
train_test_split(X, y)
@pytest.mark.parametrize("train_size", [1.2, 100])
def test_split_invalid_proportion(train_size):
X = cudf.DataFrame({"x": range(10)})
y = cudf.Series([0] * 10)
with pytest.raises(ValueError):
train_test_split(X, y, train_size=train_size)
@pytest.mark.parametrize("seed_type", test_seeds)
def test_random_state(seed_type):
for i in range(10):
seed_n = np.random.randint(0, int(1e9))
if seed_type == "int":
seed = seed_n
if seed_type == "cupy":
seed = cp.random.RandomState(seed=seed_n)
if seed_type == "numpy":
seed = np.random.RandomState(seed=seed_n)
X = cudf.DataFrame({"x": range(100)})
y = cudf.Series(([0] * (100 // 2)) + ([1] * (100 // 2)))
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=seed
)
if seed_type == "cupy":
seed = cp.random.RandomState(seed=seed_n)
if seed_type == "numpy":
seed = np.random.RandomState(seed=seed_n)
X_train2, X_test2, y_train2, y_test2 = train_test_split(
X, y, random_state=seed
)
assert X_train.equals(X_train2)
assert X_test.equals(X_test2)
assert y_train.equals(y_train2)
assert y_test.equals(y_test2)
@pytest.mark.parametrize("type", test_array_input_types)
@pytest.mark.parametrize("test_size", [0.2, 0.4, None])
@pytest.mark.parametrize("train_size", [0.6, 0.8, None])
@pytest.mark.parametrize("shuffle", [True, False])
def test_array_split(type, test_size, train_size, shuffle):
X = np.zeros((100, 10)) + np.arange(100).reshape(100, 1)
y = np.arange(100).reshape(100, 1)
if type == "cupy":
X = cp.asarray(X)
y = cp.asarray(y)
if type == "numba":
X = cuda.to_device(X)
y = cuda.to_device(y)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
train_size=train_size,
test_size=test_size,
shuffle=shuffle,
random_state=0,
)
if type == "cupy":
assert isinstance(X_train, cp.ndarray)
assert isinstance(X_test, cp.ndarray)
assert isinstance(y_train, cp.ndarray)
assert isinstance(y_test, cp.ndarray)
if type in ["numba", "rmm"]:
assert cuda.devicearray.is_cuda_ndarray(X_train)
assert cuda.devicearray.is_cuda_ndarray(X_test)
assert cuda.devicearray.is_cuda_ndarray(y_train)
assert cuda.devicearray.is_cuda_ndarray(y_test)
if train_size is not None:
assert X_train.shape[0] == X.shape[0] * train_size
assert y_train.shape[0] == y.shape[0] * train_size
if test_size is not None:
assert X_test.shape[0] == X.shape[0] * test_size
assert y_test.shape[0] == y.shape[0] * test_size
if shuffle is None:
assert X_train == X[0:train_size]
assert y_train == y[0:train_size]
assert X_test == X[-1 * test_size :]
assert y_test == y[-1 * test_size :]
X_rec = cp.sort(cp.concatenate(X_train, X_test))
y_rec = cp.sort(cp.concatenate(y_train, y_test))
assert X_rec == X
assert y_rec == y
def test_default_values():
X = np.zeros((100, 10)) + np.arange(100).reshape(100, 1)
y = np.arange(100).reshape(100, 1)
X = cp.asarray(X)
y = cp.asarray(y)
X_train, X_test, y_train, y_test = train_test_split(X, y)
assert isinstance(X_train, cp.ndarray)
assert isinstance(X_test, cp.ndarray)
assert isinstance(y_train, cp.ndarray)
assert isinstance(y_test, cp.ndarray)
assert X_train.shape[0] == X.shape[0] * 0.75
assert y_train.shape[0] == y.shape[0] * 0.75
assert X_test.shape[0] == X.shape[0] * 0.25
assert y_test.shape[0] == y.shape[0] * 0.25
@pytest.mark.parametrize("test_size", [0.2, 0.4, None])
@pytest.mark.parametrize("train_size", [0.6, 0.8, None])
@pytest.mark.parametrize("shuffle", [True, False])
def test_split_df_single_argument(test_size, train_size, shuffle):
X = cudf.DataFrame({"x": range(50)})
X_train, X_test = train_test_split(
X,
train_size=train_size,
test_size=test_size,
shuffle=shuffle,
random_state=0,
)
if train_size is not None:
assert X_train.shape[0] == (int)(X.shape[0] * train_size)
if test_size is not None:
assert X_test.shape[0] == (int)(X.shape[0] * test_size)
@pytest.mark.parametrize("type", test_array_input_types)
@pytest.mark.parametrize("test_size", [0.2, 0.4, None])
@pytest.mark.parametrize("train_size", [0.6, 0.8, None])
@pytest.mark.parametrize("shuffle", [True, False])
def test_split_array_single_argument(type, test_size, train_size, shuffle):
X = np.zeros((100, 10)) + np.arange(100).reshape(100, 1)
if type == "cupy":
X = cp.asarray(X)
if type == "numba":
X = cuda.to_device(X)
X_train, X_test = train_test_split(
X,
train_size=train_size,
test_size=test_size,
shuffle=shuffle,
random_state=0,
)
if type == "cupy":
assert isinstance(X_train, cp.ndarray)
assert isinstance(X_test, cp.ndarray)
if type in ["numba", "rmm"]:
assert cuda.devicearray.is_cuda_ndarray(X_train)
assert cuda.devicearray.is_cuda_ndarray(X_test)
if train_size is not None:
assert X_train.shape[0] == (int)(X.shape[0] * train_size)
if test_size is not None:
assert X_test.shape[0] == (int)(X.shape[0] * test_size)
if shuffle is None:
assert X_train == X[0:train_size]
assert X_test == X[-1 * test_size :]
X_rec = cp.sort(cp.concatenate(X_train, X_test))
assert X_rec == X
@pytest.mark.parametrize("type", test_array_input_types)
@pytest.mark.parametrize("test_size", [0.2, 0.4, None])
@pytest.mark.parametrize("train_size", [0.6, 0.8, None])
def test_stratified_split(type, test_size, train_size):
# For more tolerance and reliable estimates
X, y = make_classification(n_samples=10000)
if type == "cupy":
X = cp.asarray(X)
y = cp.asarray(y)
if type == "numba":
X = cuda.to_device(X)
y = cuda.to_device(y)
def counts(y):
_, y_indices = cp.unique(y, return_inverse=True)
class_counts = cp.bincount(y_indices)
total = cp.sum(class_counts)
percent_counts = []
for count in class_counts:
percent_counts.append(
cp.around(float(count) / total.item(), decimals=2).item()
)
return percent_counts
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_size, test_size=test_size, stratify=y
)
original_counts = counts(y)
split_counts = counts(y_train)
assert cp.isclose(
original_counts, split_counts, equal_nan=False, rtol=0.1
).all()
if type == "cupy":
assert isinstance(X_train, cp.ndarray)
assert isinstance(X_test, cp.ndarray)
if type in ["numba"]:
assert cuda.devicearray.is_cuda_ndarray(X_train)
assert cuda.devicearray.is_cuda_ndarray(X_test)
@pytest.mark.parametrize("seed_type", test_seeds)
def test_stratified_random_seed(seed_type):
for i in range(10):
seed_n = np.random.randint(0, int(1e9))
if seed_type == "int":
seed = seed_n
if seed_type == "cupy":
seed = cp.random.RandomState(seed=seed_n)
if seed_type == "numpy":
seed = np.random.RandomState(seed=seed_n)
X = cudf.DataFrame({"x": range(100)})
y = cudf.Series(([0] * (100 // 2)) + ([1] * (100 // 2)))
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=seed, stratify=y
)
if seed_type == "cupy":
seed = cp.random.RandomState(seed=seed_n)
if seed_type == "numpy":
seed = np.random.RandomState(seed=seed_n)
X_train2, X_test2, y_train2, y_test2 = train_test_split(
X, y, random_state=seed, stratify=y
)
assert X_train.equals(X_train2)
assert X_test.equals(X_test2)
assert y_train.equals(y_train2)
assert y_test.equals(y_test2)
# Ensure that data is shuffled
assert not (X.head().index.values == X_train.head().index.values).all()
def monotonic_inc(x):
dx = cp.diff(x.values, axis=0)
return cp.all(dx == 1)
assert not monotonic_inc(X_train)
@pytest.mark.parametrize("test_size", [0.2, 0.4, None])
@pytest.mark.parametrize("train_size", [0.6, 0.8, None])
def test_stratify_retain_index(test_size, train_size):
X = cudf.DataFrame({"x": range(10)})
y = cudf.Series(([0] * (10 // 2)) + ([1] * (10 // 2)))
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
train_size=train_size,
test_size=test_size,
shuffle=True,
stratify=y,
random_state=15,
)
assert (X_train["x"].to_numpy() == X_train.index.to_numpy()).all()
assert (X_test["x"].to_numpy() == X_test.index.to_numpy()).all()
if train_size is not None:
assert X_train.shape[0] == (int)(X.shape[0] * train_size)
elif test_size is not None:
assert X_test.shape[0] == (int)(X.shape[0] * test_size)
def test_stratified_binary_classification():
X = cp.array(
[
[0.37487513, -2.3031888, 1.662633, 0.7671007],
[-0.49796826, -1.0621182, -0.32518214, -0.20583323],
[-1.0104885, -2.4997945, 2.8952584, 1.4712684],
[2.008748, -2.4520662, 0.5557737, 0.07749569],
[0.97350526, -0.3403474, -0.58081895, -0.23199573],
]
)
# Needs to fail when we have just 1 occurrence of a label
y = cp.array([0, 0, 0, 0, 1])
with pytest.raises(ValueError):
train_test_split(X, y, train_size=0.75, stratify=y, shuffle=True)
y = cp.array([0, 0, 0, 1, 1])
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, stratify=y, random_state=15
)
_, y_counts = cp.unique(y, return_counts=True)
_, train_counts = cp.unique(y_train, return_counts=True)
_, test_counts = cp.unique(y_test, return_counts=True)
# Ensure we have preserve the number of labels
cp.testing.assert_array_equal(train_counts + test_counts, y_counts)
@pytest.mark.parametrize("test_size", [0.2, 0.4, None])
@pytest.mark.parametrize("train_size", [0.6, 0.8, None])
def test_stratify_any_input(test_size, train_size):
X = cudf.DataFrame({"x": range(10)})
X["test_col"] = cudf.Series([10, 0, 0, 10, 10, 10, 0, 0, 10, 10])
y = cudf.Series(([0] * (10 // 2)) + ([1] * (10 // 2)))
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
train_size=train_size,
test_size=test_size,
shuffle=True,
stratify=X["test_col"],
random_state=15,
)
assert (X_train["x"].to_numpy() == X_train.index.to_numpy()).all()
assert (X_test["x"].to_numpy() == X_test.index.to_numpy()).all()
if train_size is not None:
assert X_train.shape[0] == (int)(X.shape[0] * train_size)
elif test_size is not None:
assert X_test.shape[0] == (int)(X.shape[0] * test_size)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_linear_svm.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.internals.logger as logger
import cuml
import cuml.svm as cu
import sklearn.svm as sk
from cuml.testing.utils import unit_param, quality_param, stress_param
from queue import Empty
import cuml.model_selection as dsel
import cuml.datasets as data
import pytest
from cuml.internals.safe_imports import cpu_only_import
import gc
import multiprocessing as mp
import time
import math
from cuml.internals.safe_imports import gpu_only_import
from cuml.common import input_to_cuml_array
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
SEED = 42
ERROR_TOLERANCE_REL = 0.1
ERROR_TOLERANCE_ABS = 0.01
SKLEARN_TIMEOUT_FACTOR = 10
def good_enough(myscore: float, refscore: float, training_size: int):
myerr = 1.0 - myscore
referr = 1.0 - refscore
# Extra discount for uncertainty based on the training data.
# Totally empirical; for <10 samples, the error is allowed
# to be ~50%, which is a total randomness. But this is ok,
# since we don't expect the model to be trained from this few
# samples.
c = (10000 + training_size) / (100 + 5 * training_size)
thresh_rel = referr * (1 + ERROR_TOLERANCE_REL * c)
thresh_abs = referr + ERROR_TOLERANCE_ABS * c
good_rel = myerr <= thresh_rel
good_abs = myerr <= thresh_abs
assert good_rel or good_abs, (
f"The model is surely not good enough "
f"(cuml error = {myerr} > "
f"min(abs threshold = {thresh_abs}; rel threshold = {thresh_rel}))"
)
def with_timeout(timeout, target, args=(), kwargs={}):
"""Don't wait if the sklearn function takes really too long."""
try:
ctx = mp.get_context("fork")
except ValueError:
logger.warn(
'"fork" multiprocessing start method is not available. '
"The sklearn model will run in the same process and "
"cannot be killed if it runs too long."
)
return target(*args, **kwargs)
q = ctx.Queue()
def target_res():
try:
q.put((True, target(*args, **kwargs)))
except BaseException as e: # noqa E722
print("Test subprocess failed with an exception: ", e)
q.put((False, None))
p = ctx.Process(target=target_res)
p.start()
try:
success, val = q.get(True, timeout)
if success:
return val
else:
raise RuntimeError("Got an exception in the subprocess.")
except Empty:
p.terminate()
raise TimeoutError()
def make_regression_dataset(datatype, nrows, ncols):
ninformative = max(min(ncols, 5), int(math.ceil(ncols / 5)))
X, y = data.make_regression(
dtype=datatype,
n_samples=nrows + 1000,
n_features=ncols,
random_state=SEED,
n_informative=ninformative,
)
return dsel.train_test_split(X, y, random_state=SEED, train_size=nrows)
def make_classification_dataset(datatype, nrows, ncols, nclasses):
n_real_features = min(ncols, int(max(nclasses * 2, math.ceil(ncols / 10))))
n_clusters_per_class = min(2, max(1, int(2**n_real_features / nclasses)))
n_redundant = min(ncols - n_real_features, max(2, math.ceil(ncols / 20)))
try:
X, y = data.make_classification(
dtype=datatype,
n_samples=nrows + 1000,
n_features=ncols,
random_state=SEED,
class_sep=1.0,
n_informative=n_real_features,
n_clusters_per_class=n_clusters_per_class,
n_redundant=n_redundant,
n_classes=nclasses,
)
r = dsel.train_test_split(X, y, random_state=SEED, train_size=nrows)
if len(cp.unique(r[2])) < nclasses:
raise ValueError("Training data does not have all classes.")
return r
except ValueError:
pytest.skip(
"Skipping the test for invalid combination of ncols/nclasses"
)
def run_regression(datatype, loss, eps, dims):
nrows, ncols = dims
X_train, X_test, y_train, y_test = make_regression_dataset(
datatype, nrows, ncols
)
# solving in primal is not supported by sklearn for this loss type.
skdual = loss == "epsilon_insensitive"
# limit the max iterations for sklearn to reduce the max test time
cuit = 10000
skit = max(10, min(cuit, cuit * 1000 / nrows))
t = time.perf_counter()
cum = cu.LinearSVR(loss=loss, epsilon=eps, max_iter=cuit)
cum.fit(X_train, y_train)
cus = cum.score(X_test, y_test)
t = max(5, (time.perf_counter() - t) * SKLEARN_TIMEOUT_FACTOR)
# cleanup cuml objects so that we can more easily fork the process
# and test sklearn
del cum
X_train = X_train.get()
X_test = X_test.get()
y_train = y_train.get()
y_test = y_test.get()
gc.collect()
try:
def run_sklearn():
skm = sk.LinearSVR(
loss=loss, epsilon=eps, max_iter=skit, dual=skdual
)
skm.fit(X_train, y_train)
return skm.score(X_test, y_test)
sks = with_timeout(timeout=t, target=run_sklearn)
good_enough(cus, sks, nrows)
except TimeoutError:
pytest.skip(f"sklearn did not finish within {t} seconds.")
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"loss", ["epsilon_insensitive", "squared_epsilon_insensitive"]
)
@pytest.mark.parametrize(
"dims",
[
unit_param((3, 1)),
unit_param((100, 1)),
unit_param((1000, 10)),
unit_param((100, 100)),
unit_param((100, 300)),
quality_param((10000, 10)),
quality_param((10000, 50)),
stress_param((100000, 1000)),
],
)
def test_regression_basic(datatype, loss, dims):
run_regression(datatype, loss, 0, dims)
@pytest.mark.parametrize(
"loss", ["epsilon_insensitive", "squared_epsilon_insensitive"]
)
@pytest.mark.parametrize("epsilon", [0, 0.001, 0.1])
@pytest.mark.parametrize(
"dims",
[
quality_param((10000, 10)),
quality_param((10000, 50)),
quality_param((10000, 500)),
],
)
def test_regression_eps(loss, epsilon, dims):
run_regression(np.float32, loss, epsilon, dims)
def run_classification(datatype, penalty, loss, dims, nclasses, class_weight):
t = time.perf_counter()
nrows, ncols = dims
X_train, X_test, y_train, y_test = make_classification_dataset(
datatype, nrows, ncols, nclasses
)
logger.debug(f"Data generation time: {time.perf_counter() - t} s.")
# solving in primal is not supported by sklearn for this loss type.
skdual = loss == "hinge" and penalty == "l2"
if loss == "hinge" and penalty == "l1":
pytest.skip(
"sklearn does not support this combination of loss and penalty"
)
# limit the max iterations for sklearn to reduce the max test time
cuit = 10000
skit = int(max(10, min(cuit, cuit * 1000 / nrows)))
t = time.perf_counter()
handle = cuml.Handle(n_streams=0)
cum = cu.LinearSVC(
handle=handle,
loss=loss,
penalty=penalty,
max_iter=cuit,
class_weight=class_weight,
)
cum.fit(X_train, y_train)
cus = cum.score(X_test, y_test)
cud = cum.decision_function(X_test)
handle.sync()
t = time.perf_counter() - t
logger.debug(f"Cuml time: {t} s.")
t = max(5, t * SKLEARN_TIMEOUT_FACTOR)
# cleanup cuml objects so that we can more easily fork the process
# and test sklearn
del cum
X_train = X_train.get()
X_test = X_test.get()
y_train = y_train.get()
y_test = y_test.get()
cud = cud.get()
gc.collect()
try:
def run_sklearn():
skm = sk.LinearSVC(
loss=loss,
penalty=penalty,
max_iter=skit,
dual=skdual,
class_weight=class_weight,
)
skm.fit(X_train, y_train)
return skm.score(X_test, y_test), skm.decision_function(X_test)
sks, skd = with_timeout(timeout=t, target=run_sklearn)
good_enough(cus, sks, nrows)
# always confirm correct shape of decision function
assert cud.shape == skd.shape, (
f"The decision_function returned different shape "
f"cud.shape = {cud.shape}; skd.shape = {skd.shape}))"
)
except TimeoutError:
pytest.skip(f"sklearn did not finish within {t} seconds.")
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"dims",
[
unit_param((3, 1)),
unit_param((1000, 10)),
],
)
@pytest.mark.parametrize("nclasses", [2, 7])
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_decision_function(datatype, dims, nclasses, fit_intercept):
# The decision function is not stable to compare given random
# input data and models that are similar but not equal.
# This test will only check the cuml decision function
# implementation based on an imported model from sklearn.
nrows, ncols = dims
X_train, X_test, y_train, y_test = make_classification_dataset(
datatype, nrows, ncols, nclasses
)
skm = sk.LinearSVC(
max_iter=10,
dual=False,
fit_intercept=fit_intercept,
)
skm.fit(X_train.get(), y_train.get())
skd = skm.decision_function(X_test.get())
handle = cuml.Handle(n_streams=0)
cum = cu.LinearSVC(
handle=handle,
max_iter=10,
fit_intercept=fit_intercept,
)
cum.fit(X_train, y_train)
handle.sync()
# override model attributes
sk_coef_m, _, _, _ = input_to_cuml_array(
skm.coef_, convert_to_dtype=datatype, order="F"
)
cum.model_.coef_ = sk_coef_m
if fit_intercept:
sk_intercept_m, _, _, _ = input_to_cuml_array(
skm.intercept_, convert_to_dtype=datatype, order="F"
)
cum.model_.intercept_ = sk_intercept_m
cud = cum.decision_function(X_test)
assert np.allclose(
cud.get(), skd, atol=1e-4
), "The decision_function returned different values"
# cleanup cuml objects so that we can more easily fork the process
# and test sklearn
del cum
X_train = X_train.get()
X_test = X_test.get()
y_train = y_train.get()
y_test = y_test.get()
cud = cud.get()
gc.collect()
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("penalty", ["l1", "l2"])
@pytest.mark.parametrize("loss", ["hinge", "squared_hinge"])
@pytest.mark.parametrize(
"dims",
[
unit_param((3, 1)),
unit_param((100, 1)),
unit_param((1000, 10)),
unit_param((100, 100)),
unit_param((100, 300)),
quality_param((10000, 10)),
quality_param((10000, 50)),
stress_param((100000, 1000)),
],
)
def test_classification_1(datatype, penalty, loss, dims):
run_classification(datatype, penalty, loss, dims, 2, None)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"dims",
[
unit_param((3, 1)),
unit_param((100, 1)),
unit_param((1000, 10)),
unit_param((100, 100)),
unit_param((100, 300)),
quality_param((10000, 10)),
quality_param((10000, 50)),
stress_param((100000, 1000)),
],
)
@pytest.mark.parametrize("nclasses", [2, 3, 5, 8])
def test_classification_2(datatype, dims, nclasses):
run_classification(datatype, "l2", "hinge", dims, nclasses, "balanced")
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"dims",
[
unit_param((3, 1)),
unit_param((100, 1)),
unit_param((1000, 10)),
unit_param((100, 100)),
unit_param((100, 300)),
quality_param((10000, 10)),
quality_param((10000, 50)),
stress_param((100000, 1000)),
],
)
@pytest.mark.parametrize("class_weight", [{0: 0.5, 1: 1.5}])
def test_classification_3(datatype, dims, class_weight):
run_classification(datatype, "l2", "hinge", dims, 2, class_weight)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_text_feature_extraction.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import cpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import_from
from sklearn.feature_extraction.text import TfidfVectorizer as SkTfidfVect
from sklearn.feature_extraction.text import HashingVectorizer as SkHashVect
from sklearn.feature_extraction.text import CountVectorizer as SkCountVect
import pytest
from cuml.feature_extraction.text import CountVectorizer
from cuml.feature_extraction.text import TfidfVectorizer
from cuml.feature_extraction.text import HashingVectorizer
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
Series = gpu_only_import_from("cudf", "Series")
assert_array_equal = cpu_only_import_from(
"numpy.testing", "assert_array_equal"
)
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
def test_count_vectorizer():
corpus = [
"This is the first document.",
"This document is the second document.",
"And this is the third one.",
"Is this the first document?",
]
res = CountVectorizer().fit_transform(Series(corpus))
ref = SkCountVect().fit_transform(corpus)
cp.testing.assert_array_equal(res.todense(), ref.toarray())
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
EMPTY_DOCS = ("",)
DOCS = JUNK_FOOD_DOCS + EMPTY_DOCS + NOTJUNK_FOOD_DOCS + EMPTY_DOCS
DOCS_GPU = Series(DOCS)
NGRAM_RANGES = [(1, 1), (1, 2), (2, 3)]
NGRAM_IDS = [f"ngram_range={str(r)}" for r in NGRAM_RANGES]
@pytest.mark.skip(
reason="scikit-learn replaced get_feature_names with "
"get_feature_names_out"
"https://github.com/rapidsai/cuml/issues/5159"
)
@pytest.mark.parametrize("ngram_range", NGRAM_RANGES, ids=NGRAM_IDS)
def test_word_analyzer(ngram_range):
v = CountVectorizer(ngram_range=ngram_range).fit(DOCS_GPU)
ref = SkCountVect(ngram_range=ngram_range).fit(DOCS)
assert (
ref.get_feature_names() == v.get_feature_names().to_arrow().to_pylist()
)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
vocab_gpu = Series(vocab.keys())
ref = SkCountVect(vocabulary=vocab).fit_transform(DOCS)
X = CountVectorizer(vocabulary=vocab_gpu).fit_transform(DOCS_GPU)
cp.testing.assert_array_equal(X.todense(), ref.toarray())
def test_countvectorizer_stop_words():
ref = SkCountVect(stop_words="english").fit_transform(DOCS)
X = CountVectorizer(stop_words="english").fit_transform(DOCS_GPU)
cp.testing.assert_array_equal(X.todense(), ref.toarray())
def test_countvectorizer_empty_vocabulary():
v = CountVectorizer(max_df=1.0, stop_words="english")
# fitting only on stopwords will result in an empty vocabulary
with pytest.raises(ValueError):
v.fit(Series(["to be or not to be", "and me too", "and so do you"]))
def test_countvectorizer_stop_words_ngrams():
stop_words_doc = Series(["and me too andy andy too"])
expected_vocabulary = ["andy andy"]
v = CountVectorizer(ngram_range=(2, 2), stop_words="english")
v.fit(stop_words_doc)
assert expected_vocabulary == v.get_feature_names().to_arrow().to_pylist()
def test_countvectorizer_max_features():
expected_vocabulary = {"burger", "beer", "salad", "pizza"}
expected_stop_words = {
"celeri",
"tomato",
"copyright",
"coke",
"sparkling",
"water",
"the",
}
# test bounded number of extracted features
vec = CountVectorizer(max_df=0.6, max_features=4)
vec.fit(DOCS_GPU)
assert (
set(vec.get_feature_names().to_arrow().to_pylist())
== expected_vocabulary
)
assert set(vec.stop_words_.to_arrow().to_pylist()) == expected_stop_words
def test_countvectorizer_max_features_counts():
JUNK_FOOD_DOCS_GPU = Series(JUNK_FOOD_DOCS)
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS_GPU).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS_GPU).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS_GPU).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert 7 == counts_1.max()
assert 7 == counts_3.max()
assert 7 == counts_None.max()
# The most common feature should be the same
def as_index(x):
return x.astype(cp.int32).item()
assert "the" == features_1[as_index(cp.argmax(counts_1))]
assert "the" == features_3[as_index(cp.argmax(counts_3))]
assert "the" == features_None[as_index(cp.argmax(counts_None))]
def test_countvectorizer_max_df():
test_data = Series(["abc", "dea", "eat"])
vect = CountVectorizer(analyzer="char", max_df=1.0)
vect.fit(test_data)
assert "a" in vect.vocabulary_.to_arrow().to_pylist()
assert len(vect.vocabulary_.to_arrow().to_pylist()) == 6
assert len(vect.stop_words_) == 0
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert "a" not in vect.vocabulary_.to_arrow().to_pylist() # {ae} ignored
assert len(vect.vocabulary_.to_arrow().to_pylist()) == 4 # {bcdt} remain
assert "a" in vect.stop_words_.to_arrow().to_pylist()
assert len(vect.stop_words_) == 2
vect.max_df = 1
vect.fit(test_data)
assert "a" not in vect.vocabulary_.to_arrow().to_pylist() # {ae} ignored
assert len(vect.vocabulary_.to_arrow().to_pylist()) == 4 # {bcdt} remain
assert "a" in vect.stop_words_.to_arrow().to_pylist()
assert len(vect.stop_words_) == 2
def test_vectorizer_min_df():
test_data = Series(["abc", "dea", "eat"])
vect = CountVectorizer(analyzer="char", min_df=1)
vect.fit(test_data)
assert "a" in vect.vocabulary_.to_arrow().to_pylist()
assert len(vect.vocabulary_.to_arrow().to_pylist()) == 6
assert len(vect.stop_words_) == 0
vect.min_df = 2
vect.fit(test_data)
assert "c" not in vect.vocabulary_.to_arrow().to_pylist() # {bcdt} ignored
assert len(vect.vocabulary_.to_arrow().to_pylist()) == 2 # {ae} remain
assert "c" in vect.stop_words_.to_arrow().to_pylist()
assert len(vect.stop_words_) == 4
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
# {bcdet} ignored
assert "c" not in vect.vocabulary_.to_arrow().to_pylist()
assert len(vect.vocabulary_.to_arrow().to_pylist()) == 1 # {a} remains
assert "c" in vect.stop_words_.to_arrow().to_pylist()
assert len(vect.stop_words_) == 5
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = Series(["aaabc", "abbde"])
vect = CountVectorizer(analyzer="char", max_df=1.0)
X = cp.asnumpy(vect.fit_transform(test_data).todense())
assert_array_equal(
["a", "b", "c", "d", "e"],
vect.get_feature_names().to_arrow().to_pylist(),
)
assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True)
X = cp.asnumpy(vect.fit_transform(test_data).todense())
assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(
analyzer="char", max_df=1.0, binary=True, dtype=cp.float32
)
X = vect.fit_transform(test_data)
assert X.dtype == cp.float32
def test_vectorizer_inverse_transform():
vectorizer = CountVectorizer()
transformed_data = vectorizer.fit_transform(DOCS_GPU)
inversed_data = vectorizer.inverse_transform(transformed_data)
sk_vectorizer = SkCountVect()
sk_transformed_data = sk_vectorizer.fit_transform(DOCS)
sk_inversed_data = sk_vectorizer.inverse_transform(sk_transformed_data)
for doc, sk_doc in zip(inversed_data, sk_inversed_data):
doc = np.sort(doc.to_arrow().to_pylist())
sk_doc = np.sort(sk_doc)
if len(doc) + len(sk_doc) == 0:
continue
assert_array_equal(doc, sk_doc)
@pytest.mark.skip(
reason="scikit-learn replaced get_feature_names with "
"get_feature_names_out"
"https://github.com/rapidsai/cuml/issues/5159"
)
@pytest.mark.parametrize("ngram_range", NGRAM_RANGES, ids=NGRAM_IDS)
def test_space_ngrams(ngram_range):
data = ["abc def. 123 456 789"]
data_gpu = Series(data)
vec = CountVectorizer(ngram_range=ngram_range).fit(data_gpu)
ref = SkCountVect(ngram_range=ngram_range).fit(data)
assert (
ref.get_feature_names()
) == vec.get_feature_names().to_arrow().to_pylist()
def test_empty_doc_after_limit_features():
data = ["abc abc def", "def abc", "ghi"]
data_gpu = Series(data)
count = CountVectorizer(min_df=2).fit_transform(data_gpu)
ref = SkCountVect(min_df=2).fit_transform(data)
cp.testing.assert_array_equal(count.todense(), ref.toarray())
def test_countvectorizer_separate_fit_transform():
res = CountVectorizer().fit(DOCS_GPU).transform(DOCS_GPU)
ref = SkCountVect().fit(DOCS).transform(DOCS)
cp.testing.assert_array_equal(res.todense(), ref.toarray())
def test_non_ascii():
non_ascii = ("This is ascii,", "but not this Αγγλικά.")
non_ascii_gpu = Series(non_ascii)
cv = CountVectorizer()
res = cv.fit_transform(non_ascii_gpu)
ref = SkCountVect().fit_transform(non_ascii)
assert "αγγλικά" in set(cv.get_feature_names().to_arrow().to_pylist())
cp.testing.assert_array_equal(res.todense(), ref.toarray())
def test_sngle_len():
single_token_ser = ["S I N G L E T 0 K E N Example", "1 2 3 4 5 eg"]
single_token_gpu = Series(single_token_ser)
cv = CountVectorizer()
res = cv.fit_transform(single_token_gpu)
ref = SkCountVect().fit_transform(single_token_ser)
cp.testing.assert_array_equal(res.todense(), ref.toarray())
def test_only_delimiters():
data = ["abc def. 123", " ", "456 789"]
data_gpu = Series(data)
res = CountVectorizer().fit_transform(data_gpu)
ref = SkCountVect().fit_transform(data)
cp.testing.assert_array_equal(res.todense(), ref.toarray())
@pytest.mark.skip(
reason="scikit-learn replaced get_feature_names with "
"get_feature_names_out"
"https://github.com/rapidsai/cuml/issues/5159"
)
@pytest.mark.parametrize("analyzer", ["char", "char_wb"])
@pytest.mark.parametrize("ngram_range", NGRAM_RANGES, ids=NGRAM_IDS)
def test_character_ngrams(analyzer, ngram_range):
data = ["ab c", "" "edf gh"]
res = CountVectorizer(analyzer=analyzer, ngram_range=ngram_range)
res.fit(Series(data))
ref = SkCountVect(analyzer=analyzer, ngram_range=ngram_range).fit(data)
assert (
ref.get_feature_names()
) == res.get_feature_names().to_arrow().to_pylist()
@pytest.mark.parametrize(
"query",
[
Series(["science aa", "", "a aa aaa"]),
Series(["science aa", ""]),
Series(["science"]),
],
)
def test_transform_unsigned_categories(query):
token = "a"
thousand_tokens = list()
for i in range(1000):
thousand_tokens.append(token)
token += "a"
thousand_tokens[128] = "science"
vec = CountVectorizer().fit(Series(thousand_tokens))
res = vec.transform(query)
assert res.shape[0] == len(query)
# ----------------------------------------------------------------
# TfidfVectorizer tests are already covered by CountVectorizer and
# TfidfTransformer so we only do the bare minimum tests here
# ----------------------------------------------------------------
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(
norm="l2", use_idf=False, smooth_idf=False, sublinear_tf=False
)
tv.norm = "l1"
assert tv._tfidf.norm == "l1"
tv.use_idf = True
assert tv._tfidf.use_idf
tv.smooth_idf = True
assert tv._tfidf.smooth_idf
tv.sublinear_tf = True
assert tv._tfidf.sublinear_tf
def test_tfidf_vectorizer_idf_setter():
orig = TfidfVectorizer(use_idf=True)
orig.fit(DOCS_GPU)
copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True)
copy.idf_ = orig.idf_[0]
cp.testing.assert_array_almost_equal(
copy.transform(DOCS_GPU).todense(), orig.transform(DOCS_GPU).todense()
)
@pytest.mark.parametrize("norm", ["l1", "l2", None])
@pytest.mark.parametrize("use_idf", [True, False])
@pytest.mark.parametrize("smooth_idf", [True, False])
@pytest.mark.parametrize("sublinear_tf", [True, False])
def test_tfidf_vectorizer(norm, use_idf, smooth_idf, sublinear_tf):
tfidf_mat = TfidfVectorizer(
norm=norm,
use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf,
).fit_transform(DOCS_GPU)
ref = SkTfidfVect(
norm=norm,
use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf,
).fit_transform(DOCS)
cp.testing.assert_array_almost_equal(tfidf_mat.todense(), ref.toarray())
def test_tfidf_vectorizer_get_feature_names():
corpus = [
"This is the first document.",
"This document is the second document.",
"And this is the third one.",
"Is this the first document?",
]
vectorizer = TfidfVectorizer()
vectorizer.fit_transform(Series(corpus))
output = [
"and",
"document",
"first",
"is",
"one",
"second",
"the",
"third",
"this",
]
assert vectorizer.get_feature_names().to_arrow().to_pylist() == output
# ----------------------------------------------------------------
# HashingVectorizer tests
# ----------------------------------------------------------------
def assert_almost_equal_hash_matrices(mat_1, mat_2, ignore_sign=True):
"""
Currently if all the sorted values in the row is equal we
assume equality
TODO: Find better way to test ig hash matrices are equal
"""
assert mat_1.shape == mat_2.shape
for row_id in range(mat_1.shape[0]):
row_m1 = mat_1[row_id]
row_m2 = mat_2[row_id]
nz_row_m1 = np.sort(row_m1[row_m1 != 0])
nz_row_m2 = np.sort(row_m2[row_m2 != 0])
# print(nz_row_m1)
# print(nz_row_m2)
if ignore_sign:
nz_row_m1 = np.abs(nz_row_m1)
nz_row_m2 = np.abs(nz_row_m2)
nz_row_m1.sort()
nz_row_m2.sort()
np.testing.assert_almost_equal(nz_row_m1, nz_row_m2)
def test_hashingvectorizer():
corpus = [
"This is the first document.",
"This document is the second document.",
"And this is the third one.",
"Is this the first document?",
]
res = HashingVectorizer().fit_transform(Series(corpus))
ref = SkHashVect().fit_transform(corpus)
assert_almost_equal_hash_matrices(res.todense().get(), ref.toarray())
@pytest.mark.xfail
def test_vectorizer_empty_token_case():
"""
We ignore empty tokens right now but sklearn treats them as a character
we might want to look into this more but
this should not be a concern for most pipelines
"""
corpus = [
"a b ",
]
# we have extra null token here
# we slightly diverge from sklearn here as not treating it as a token
res = CountVectorizer(preprocessor=lambda s: s).fit_transform(
Series(corpus)
)
ref = SkCountVect(
preprocessor=lambda s: s, tokenizer=lambda s: s.split(" ")
).fit_transform(corpus)
cp.testing.assert_array_equal(res.todense(), ref.toarray())
res = HashingVectorizer(preprocessor=lambda s: s).fit_transform(
Series(corpus)
)
ref = SkHashVect(
preprocessor=lambda s: s, tokenizer=lambda s: s.split(" ")
).fit_transform(corpus)
assert_almost_equal_hash_matrices(res.todense().get(), ref.toarray())
@pytest.mark.parametrize("lowercase", [False, True])
def test_hashingvectorizer_lowercase(lowercase):
corpus = [
"This Is DoC",
"this DoC is the second DoC.",
"And this document is the third one.",
"and Is this the first document?",
]
res = HashingVectorizer(lowercase=lowercase).fit_transform(Series(corpus))
ref = SkHashVect(lowercase=lowercase).fit_transform(corpus)
assert_almost_equal_hash_matrices(res.todense().get(), ref.toarray())
def test_hashingvectorizer_stop_word():
ref = SkHashVect(stop_words="english").fit_transform(DOCS)
res = HashingVectorizer(stop_words="english").fit_transform(DOCS_GPU)
assert_almost_equal_hash_matrices(res.todense().get(), ref.toarray())
def test_hashingvectorizer_n_features():
n_features = 10
res = (
HashingVectorizer(n_features=n_features)
.fit_transform(DOCS_GPU)
.todense()
.get()
)
ref = SkHashVect(n_features=n_features).fit_transform(DOCS).toarray()
assert res.shape == ref.shape
@pytest.mark.parametrize("norm", ["l1", "l2", None, "max"])
def test_hashingvectorizer_norm(norm):
if norm not in ["l1", "l2", None]:
with pytest.raises(ValueError):
res = HashingVectorizer(norm=norm).fit_transform(DOCS_GPU)
else:
res = HashingVectorizer(norm=norm).fit_transform(DOCS_GPU)
ref = SkHashVect(norm=norm).fit_transform(DOCS)
assert_almost_equal_hash_matrices(res.todense().get(), ref.toarray())
@pytest.mark.xfail(reason="https://github.com/rapidsai/cuml/issues/4721")
def test_hashingvectorizer_alternate_sign():
# if alternate_sign = True
# we should have some negative and positive values
res = HashingVectorizer(alternate_sign=True).fit_transform(DOCS_GPU)
res_f_array = res.todense().get().flatten()
assert np.sum(res_f_array > 0, axis=0) > 0
assert np.sum(res_f_array < 0, axis=0) > 0
# if alternate_sign = False
# we should have no negative values and some positive values
res = HashingVectorizer(alternate_sign=False).fit_transform(DOCS_GPU)
res_f_array = res.todense().get().flatten()
assert np.sum(res_f_array > 0, axis=0) > 0
assert np.sum(res_f_array < 0, axis=0) == 0
@pytest.mark.parametrize("dtype", [np.float32, np.float64, cp.float64])
def test_hashingvectorizer_dtype(dtype):
res = HashingVectorizer(dtype=dtype).fit_transform(DOCS_GPU)
assert res.dtype == dtype
def test_hashingvectorizer_delimiter():
corpus = ["a0b0c", "a 0 b0e", "c0d0f"]
res = HashingVectorizer(
delimiter="0", norm=None, preprocessor=lambda s: s
).fit_transform(Series(corpus))
# equivalent logic for sklearn
ref = SkHashVect(
tokenizer=lambda s: s.split("0"),
norm=None,
token_pattern=None,
preprocessor=lambda s: s,
).fit_transform(corpus)
assert_almost_equal_hash_matrices(res.todense().get(), ref.toarray())
@pytest.mark.parametrize("vectorizer", ["tfidf", "hash_vec", "count_vec"])
def test_vectorizer_with_pandas_series(vectorizer):
corpus = [
"This Is DoC",
"this DoC is the second DoC.",
"And this document is the third one.",
"and Is this the first document?",
]
cuml_vec, sklearn_vec = {
"tfidf": (TfidfVectorizer, SkTfidfVect),
"hash_vec": (HashingVectorizer, SkHashVect),
"count_vec": (CountVectorizer, SkCountVect),
}[vectorizer]
raw_documents = pd.Series(corpus)
res = cuml_vec().fit_transform(raw_documents)
ref = sklearn_vec().fit_transform(raw_documents)
assert_almost_equal_hash_matrices(res.todense().get(), ref.toarray())
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_qn.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.metrics import accuracy_score
from cuml.datasets.classification import make_classification
from cuml.model_selection import train_test_split
from cuml.solvers import QN as cuQN
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
# todo: add util functions to better compare against precomputed solutions
@pytest.mark.parametrize("loss", ["sigmoid", "softmax"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("penalty", ["none", "l1", "l2", "elasticnet"])
@pytest.mark.parametrize("l1_strength", [0.00, 0.10])
@pytest.mark.parametrize("l2_strength", [0.00, 0.10])
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_qn(loss, dtype, penalty, l1_strength, l2_strength, fit_intercept):
if penalty == "none" and (l1_strength > 0 or l2_strength > 0):
pytest.skip("`none` penalty does not take l1/l2_strength")
tol = 1e-6
qn = cuQN(
loss=loss,
fit_intercept=fit_intercept,
l1_strength=l1_strength,
l2_strength=l2_strength,
tol=1e-8,
output_type="cupy",
)
if loss == "softmax":
X, y = make_classification(
n_samples=5000,
n_informative=10,
n_features=20,
n_classes=4,
dtype=dtype,
)
stratify = y.astype(dtype)
X_train, X_test, y_train, y_test = train_test_split(
X.astype(dtype), y.astype(dtype), stratify=stratify
)
most_class = cp.unique(y)[cp.argmax(cp.bincount(y))]
baseline_preds = cp.array([most_class] * y_test.shape[0], dtype=dtype)
baseline_score = accuracy_score(y_test, baseline_preds)
y_pred = qn.fit(X_train, y_train).predict(X_test)
cuml_score = accuracy_score(y_test, y_pred)
assert cuml_score > baseline_score
assert cuml_score >= 0.50
elif loss == "sigmoid":
X = np.array(precomputed_X, dtype=dtype)
y = np.array(precomputed_y_log, dtype=dtype)
qn.fit(X, y)
print(qn.objective)
print(qn.coef_)
if penalty == "none" and l1_strength == 0.0 and l2_strength == 0.0:
if fit_intercept:
assert (qn.objective - 0.40263831615448) < tol
cp.testing.assert_array_almost_equal(
qn.coef_, np.array([[-2.1088872, 2.4812558]]), decimal=3
)
else:
assert (qn.objective - 0.4317452311515808) < tol
cp.testing.assert_array_almost_equal(
qn.coef_, np.array([[-2.120777, 3.056865]]), decimal=3
)
elif penalty == "l1" and l2_strength == 0.0:
if fit_intercept:
if l1_strength == 0.0:
assert (qn.objective - 0.40263831615448) < tol
cp.testing.assert_array_almost_equal(
qn.coef_,
np.array([[-2.1088872, 2.4812558]]),
decimal=3,
)
else:
assert (qn.objective - 0.44295936822891235) < tol
cp.testing.assert_array_almost_equal(
qn.coef_,
np.array([[-1.6899368, 1.9021575]]),
decimal=3,
)
else:
if l1_strength == 0.0:
assert (qn.objective - 0.4317452311515808) < tol
cp.testing.assert_array_almost_equal(
qn.coef_, np.array([[-2.120777, 3.056865]]), decimal=3
)
else:
assert (qn.objective - 0.4769895672798157) < tol
cp.testing.assert_array_almost_equal(
qn.coef_,
np.array([[-1.6214856, 2.3650239]]),
decimal=3,
)
# assert False
elif penalty == "l2" and l1_strength == 0.0:
if fit_intercept:
if l2_strength == 0.0:
assert (qn.objective - 0.40263831615448) < tol
cp.testing.assert_array_almost_equal(
qn.coef_,
np.array([[-2.1088872, 2.4812558]]),
decimal=3,
)
else:
assert (qn.objective - 0.43780848383903503) < tol
cp.testing.assert_array_almost_equal(
qn.coef_, np.array([[-1.5337948, 1.678699]]), decimal=3
)
else:
if l2_strength == 0.0:
assert (qn.objective - 0.4317452311515808) < tol
cp.testing.assert_array_almost_equal(
qn.coef_, np.array([[-2.120777, 3.056865]]), decimal=3
)
else:
assert (qn.objective - 0.4750209450721741) < tol
cp.testing.assert_array_almost_equal(
qn.coef_,
np.array([[-1.3931049, 2.0140104]]),
decimal=3,
)
if penalty == "elasticnet":
if fit_intercept:
if l1_strength == 0.0 and l2_strength == 0.0:
assert (qn.objective - 0.40263831615448) < tol
cp.testing.assert_array_almost_equal(
qn.coef_,
np.array([[-2.1088872, 2.4812558]]),
decimal=3,
)
elif l1_strength == 0.0:
assert (qn.objective - 0.43780848383903503) < tol
cp.testing.assert_array_almost_equal(
qn.coef_, np.array([[-1.5337948, 1.678699]]), decimal=3
)
elif l2_strength == 0.0:
assert (qn.objective - 0.44295936822891235) < tol
cp.testing.assert_array_almost_equal(
qn.coef_,
np.array([[-1.6899368, 1.9021575]]),
decimal=3,
)
else:
assert (qn.objective - 0.467987984418869) < tol
cp.testing.assert_array_almost_equal(
qn.coef_,
np.array([[-1.3727235, 1.4639963]]),
decimal=3,
)
else:
if l1_strength == 0.0 and l2_strength == 0.0:
assert (qn.objective - 0.4317452311515808) < tol
cp.testing.assert_array_almost_equal(
qn.coef_, np.array([[-2.120777, 3.056865]]), decimal=3
)
elif l1_strength == 0.0:
assert (qn.objective - 0.4750209450721741) < tol
cp.testing.assert_array_almost_equal(
qn.coef_,
np.array([[-1.3931049, 2.0140104]]),
decimal=3,
)
elif l2_strength == 0.0:
assert (qn.objective - 0.4769895672798157) < tol
cp.testing.assert_array_almost_equal(
qn.coef_,
np.array([[-1.6214856, 2.3650239]]),
decimal=3,
)
else:
assert (qn.objective - 0.5067970156669617) < tol
cp.testing.assert_array_almost_equal(
qn.coef_, np.array([[-1.2102532, 1.752459]]), decimal=3
)
print()
# todo add tests for softmax dtype=np.float64
# elasticnet for this points converged to different solution
# if loss == 'softmax':
# if penalty == 'none' and l1_strength == 0.0 and l2_strength == 0.0:
# if fit_intercept:
# assert (qn.objective - 0.007433414924889803) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[15.236361,
# -41.595913,
# -33.55021],
# [-36.607555,
# -13.91267,
# -42.66093],
# [-25.04939,
# -26.793947,
# -31.50192]]),
# decimal=3)
# else:
# assert (qn.objective - 0.18794211745262146) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[14.2959795,
# -104.63812,
# -96.41866],
# [-105.31236,
# -170.4887,
# -96.486]]),
# decimal=3)
# elif penalty == 'l1' and l2_strength == 0.0:
# if fit_intercept:
# if l1_strength == 0.0:
# assert (qn.objective - 0.007433414924889803) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[15.236361,
# -41.595913,
# -33.55021],
# [-36.607555,
# -13.91267,
# -42.66093],
# [-25.04939,
# -26.793947,
# -31.50192]]),
# decimal=3)
# else:
# assert (qn.objective - 0.2925984263420105) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[1.2279763,
# -2.011927,
# -1.8038181],
# [-3.3828118,
# -0.64903206,
# -3.0688426],
# [-1.6962943,
# -0.8585775,
# -1.1564851]]),
# decimal=3)
# else:
# if l1_strength == 0.0:
# assert (qn.objective - 0.18794211745262146) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[14.2959795,
# -104.63812,
# -96.41866],
# [-105.31236,
# -170.4887,
# -96.486]]),
# decimal=3)
# else:
# assert (qn.objective - 0.3777262568473816) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[1.4765631,
# -1.569497,
# -0.6421711],
# [-2.0787644,
# -1.593922,
# -0.73674846]]),
# decimal=3)
# elif penalty == 'l2' and l1_strength == 0.0:
# if fit_intercept:
# if l2_strength == 0.0:
# assert (qn.objective - 0.007433414924889803) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[15.236361,
# -41.595913,
# -33.55021],
# [-36.607555,
# -13.91267,
# -42.66093],
# [-25.04939,
# -26.793947,
# -31.50192]]),
# decimal=3)
# else:
# assert (qn.objective - 0.28578639030456543) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[1.6702422,
# -1.5495867,
# -1.193351],
# [-2.207053,
# -0.6854614,
# -2.0305414],
# [-1.1746005,
# -0.7992407,
# -1.0034739]]),
# decimal=3)
# else:
# if l2_strength == 0.0:
# assert (qn.objective - 0.18794211745262146) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[14.2959795,
# -104.63812,
# -96.41866],
# [-105.31236,
# -170.4887,
# -96.486]]),
# decimal=3)
# else:
# assert (qn.objective - 0.3537392020225525) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[1.3769588,
# -1.0002015,
# -0.5205092],
# [-1.5185534,
# -1.029575,
# -0.47429192]]),
# decimal=3)
# if penalty == 'elasticnet':
# if fit_intercept:
# if l1_strength == 0.0 and l2_strength == 0.0:
# assert (qn.objective - 0.007433414924889803) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[15.236361,
# -41.595913,
# -33.55021],
# [-36.607555,
# -13.91267,
# -42.66093],
# [-25.04939,
# -26.793947,
# -31.50192]]),
# decimal=3)
# elif l1_strength == 0.0:
# assert (qn.objective - 0.28578639030456543) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[1.6702422,
# -1.5495867,
# -1.193351],
# [-2.207053,
# -0.6854614,
# -2.0305414],
# [-1.1746005,
# -0.7992407,
# -1.0034739]]),
# decimal=3)
# elif l2_strength == 0.0:
# assert (qn.objective - 0.2925984263420105) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[1.2279763,
# -2.011927,
# -1.8038181],
# [-3.3828118,
# -0.64903206,
# -3.0688426],
# [-1.6962943,
# -0.8585775,
# -1.1564851]]),
# decimal=3)
# else:
# assert (qn.objective - 0.34934690594673157) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[1.1901233,
# -1.2236115,
# -1.0416932],
# [-2.3100038,
# -0.46381754,
# -2.1544967],
# [-1.0984052,
# -0.44855425,
# -0.7347126]]),
# decimal=3)
# else:
# if l1_strength == 0.0 and l2_strength == 0.0:
# assert (qn.objective - 0.18794211745262146) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[14.2959795,
# -104.63812,
# -96.41866],
# [-105.31236,
# -170.4887,
# -96.486]]),
# decimal=3)
# elif l1_strength == 0.0:
# assert (qn.objective - 0.3537392020225525) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[1.3769588,
# -1.0002015,
# -0.5205092],
# [-1.5185534,
# -1.029575,
# -0.47429192]]),
# decimal=3)
# elif l2_strength == 0.0:
# assert (qn.objective - 0.3777262568473816) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[1.4765631,
# -1.569497,
# -0.6421711],
# [-2.0787644,
# -1.593922,
# -0.73674846]]),
# decimal=3)
# else:
# assert (qn.objective - 0.40656331181526184) < tol
# np.testing.assert_almost_equal(qn.coef_
# np.array([[1.2176441,
# -0.8387626,
# -0.3155345],
# [-1.3095317,
# -0.60578823,
# -0.26777366]]),
# decimal=3)
precomputed_X = [
[-0.2047076594847130, 0.4789433380575482],
[-0.5194387150567381, -0.5557303043474900],
[1.9657805725027142, 1.3934058329729904],
[0.0929078767437177, 0.2817461528302025],
[0.7690225676118387, 1.2464347363862822],
[1.0071893575830049, -1.2962211091122635],
[0.2749916334321240, 0.2289128789353159],
[1.3529168351654497, 0.8864293405915888],
[-2.0016373096603974, -0.3718425371402544],
[1.6690253095248706, -0.4385697358355719],
]
precomputed_y_log = [1, 1, 1, 0, 1, 0, 1, 0, 1, 0]
precomputed_y_multi = [2, 2, 0, 3, 3, 0, 0, 0, 1, 0]
precomputed_y_reg = [
0.2675836026202781,
-0.0678277759663704,
-0.6334027174275105,
-0.1018336189077367,
0.0933815935886932,
-1.1058853496996381,
-0.1658298189619160,
-0.2954290675648911,
0.7966520536712608,
-1.0767450516284769,
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_linear_model.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from contextlib import nullcontext
from distutils.version import LooseVersion
from functools import lru_cache
import pytest
import sklearn
from cuml.internals.array import elements_in_representable_range
from cuml.internals.safe_imports import (
cpu_only_import,
cpu_only_import_from,
gpu_only_import,
)
from cuml.testing.strategies import (
regression_datasets,
split_datasets,
standard_classification_datasets,
standard_regression_datasets,
)
from cuml.testing.utils import (
array_difference,
array_equal,
quality_param,
small_classification_dataset,
small_regression_dataset,
stress_param,
unit_param,
)
from hypothesis import assume, example, given, note
from hypothesis import strategies as st
from hypothesis import target
from hypothesis.extra.numpy import floating_dtypes
from sklearn.datasets import (
load_breast_cancer,
load_digits,
make_classification,
make_regression,
)
from sklearn.linear_model import LinearRegression as skLinearRegression
from sklearn.linear_model import LogisticRegression as skLog
from sklearn.linear_model import Ridge as skRidge
from sklearn.model_selection import train_test_split
from cuml import ElasticNet as cuElasticNet
from cuml import LinearRegression as cuLinearRegression
from cuml import LogisticRegression as cuLog
from cuml import Ridge as cuRidge
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
cudf = gpu_only_import("cudf")
rmm = gpu_only_import("rmm")
csr_matrix = cpu_only_import_from("scipy.sparse", "csr_matrix")
def _make_regression_dataset_uncached(nrows, ncols, n_info, **kwargs):
X, y = make_regression(
**kwargs,
n_samples=nrows,
n_features=ncols,
n_informative=n_info,
random_state=0,
)
return train_test_split(X, y, train_size=0.8, random_state=10)
@lru_cache(4)
def _make_regression_dataset_from_cache(nrows, ncols, n_info, **kwargs):
return _make_regression_dataset_uncached(nrows, ncols, n_info, **kwargs)
def make_regression_dataset(datatype, nrows, ncols, n_info, **kwargs):
if nrows * ncols < 1e8: # Keep cache under 4 GB
dataset = _make_regression_dataset_from_cache(
nrows, ncols, n_info, **kwargs
)
else:
dataset = _make_regression_dataset_uncached(
nrows, ncols, n_info, **kwargs
)
return map(lambda arr: arr.astype(datatype), dataset)
def make_classification_dataset(datatype, nrows, ncols, n_info, num_classes):
X, y = make_classification(
n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_classes=num_classes,
random_state=0,
)
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=10
)
return X_train, X_test, y_train, y_test
def sklearn_compatible_dataset(X_train, X_test, y_train, _=None):
return (
X_train.shape[1] >= 1
and (X_train > 0).any()
and (y_train > 0).any()
and all(
np.isfinite(x).all()
for x in (X_train, X_test, y_train)
if x is not None
)
)
def cuml_compatible_dataset(X_train, X_test, y_train, _=None):
return (
X_train.shape[0] >= 2
and X_train.shape[1] >= 1
and np.isfinite(X_train).all()
and all(
elements_in_representable_range(x, np.float32)
for x in (X_train, X_test, y_train)
if x is not None
)
)
_ALGORITHMS = ["svd", "eig", "qr", "svd-qr", "svd-jacobi"]
algorithms = st.sampled_from(_ALGORITHMS)
@pytest.mark.parametrize("ntargets", [1, 2])
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("algorithm", ["eig", "svd"])
@pytest.mark.parametrize(
"nrows", [unit_param(1000), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
def test_linear_regression_model(
datatype, algorithm, nrows, column_info, ntargets
):
if algorithm == "svd" and nrows > 46340:
pytest.skip(
"svd solver is not supported for the data that has more"
"than 46340 rows or columns if you are using CUDA version"
"10.x"
)
if 1 < ntargets and algorithm != "svd":
pytest.skip("The multi-target fit only supports using the svd solver.")
ncols, n_info = column_info
X_train, X_test, y_train, y_test = make_regression_dataset(
datatype, nrows, ncols, n_info, n_targets=ntargets
)
# Initialization of cuML's linear regression model
cuols = cuLinearRegression(fit_intercept=True, algorithm=algorithm)
# fit and predict cuml linear regression model
cuols.fit(X_train, y_train)
cuols_predict = cuols.predict(X_test)
if nrows < 500000:
# sklearn linear regression model initialization, fit and predict
skols = skLinearRegression(fit_intercept=True)
skols.fit(X_train, y_train)
skols_predict = skols.predict(X_test)
assert array_equal(skols_predict, cuols_predict, 1e-1, with_sign=True)
@pytest.mark.parametrize("ntargets", [1, 2])
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("algorithm", ["eig", "svd", "qr", "svd-qr"])
@pytest.mark.parametrize(
"fit_intercept, distribution",
[
(True, "lognormal"),
(True, "exponential"),
(True, "uniform"),
(True, "exponential"),
(False, "lognormal"),
(False, "uniform"),
],
)
def test_weighted_linear_regression(
ntargets, datatype, algorithm, fit_intercept, distribution
):
nrows, ncols, n_info = 1000, 20, 10
max_weight = 10
noise = 20
if 1 < ntargets and algorithm != "svd":
pytest.skip("The multi-target fit only supports using the svd solver.")
X_train, X_test, y_train, y_test = make_regression_dataset(
datatype, nrows, ncols, n_info, noise=noise, n_targets=ntargets
)
# set weight per sample to be from 1 to max_weight
if distribution == "uniform":
wt = np.random.randint(1, high=max_weight, size=len(X_train))
elif distribution == "exponential":
wt = np.random.exponential(size=len(X_train))
else:
wt = np.random.lognormal(size=len(X_train))
# Initialization of cuML's linear regression model
cuols = cuLinearRegression(
fit_intercept=fit_intercept, algorithm=algorithm
)
# fit and predict cuml linear regression model
cuols.fit(X_train, y_train, sample_weight=wt)
cuols_predict = cuols.predict(X_test)
# sklearn linear regression model initialization, fit and predict
skols = skLinearRegression(fit_intercept=fit_intercept)
skols.fit(X_train, y_train, sample_weight=wt)
skols_predict = skols.predict(X_test)
assert array_equal(skols_predict, cuols_predict, 1e-1, with_sign=True)
@pytest.mark.skipif(
rmm._cuda.gpu.runtimeGetVersion() < 11000,
reason="svd solver does not support more than 46340 rows or columns for"
" CUDA<11 and other solvers do not support single-column input",
)
def test_linear_regression_single_column():
"""Test that linear regression can be run on single column with more than
46340 rows (a limitation on CUDA <11)"""
model = cuLinearRegression()
with pytest.warns(UserWarning):
model.fit(cp.random.rand(46341), cp.random.rand(46341))
# The assumptions required to have this test pass are relatively strong.
# It should be possible to relax assumptions once #4963 is resolved.
# See also: test_linear_regression_model_default_generalized
@given(
split_datasets(
standard_regression_datasets(
dtypes=floating_dtypes(sizes=(32, 64)),
n_samples=st.just(1000),
n_targets=st.integers(1, 10),
),
test_sizes=st.just(0.2),
)
)
@example(small_regression_dataset(np.float32))
@example(small_regression_dataset(np.float64))
def test_linear_regression_model_default(dataset):
X_train, X_test, y_train, _ = dataset
# Filter datasets based on required assumptions
assume(sklearn_compatible_dataset(X_train, X_test, y_train))
assume(cuml_compatible_dataset(X_train, X_test, y_train))
# Initialization of cuML's linear regression model
cuols = cuLinearRegression()
# fit and predict cuml linear regression model
cuols.fit(X_train, y_train)
cuols_predict = cuols.predict(X_test)
# sklearn linear regression model initialization and fit
skols = skLinearRegression()
skols.fit(X_train, y_train)
skols_predict = skols.predict(X_test)
target(float(array_difference(skols_predict, cuols_predict)))
assert array_equal(skols_predict, cuols_predict, 1e-1, with_sign=True)
# TODO: Replace test_linear_regression_model_default with this test once #4963
# is resolved.
@pytest.mark.skip(reason="https://github.com/rapidsai/cuml/issues/4963")
@given(
split_datasets(regression_datasets(dtypes=floating_dtypes(sizes=(32, 64))))
)
def test_linear_regression_model_default_generalized(dataset):
X_train, X_test, y_train, _ = dataset
# Filter datasets based on required assumptions
assume(sklearn_compatible_dataset(X_train, X_test, y_train))
assume(cuml_compatible_dataset(X_train, X_test, y_train))
# Initialization of cuML's linear regression model
cuols = cuLinearRegression()
# fit and predict cuml linear regression model
cuols.fit(X_train, y_train)
cuols_predict = cuols.predict(X_test)
# sklearn linear regression model initialization and fit
skols = skLinearRegression()
skols.fit(X_train, y_train)
skols_predict = skols.predict(X_test)
target(float(array_difference(skols_predict, cuols_predict)))
assert array_equal(skols_predict, cuols_predict, 1e-1, with_sign=True)
@given(
split_datasets(
standard_regression_datasets(
dtypes=floating_dtypes(sizes=(32, 64)),
),
),
)
@example(small_regression_dataset(np.float32))
@example(small_regression_dataset(np.float64))
def test_ridge_regression_model_default(dataset):
assume(sklearn_compatible_dataset(*dataset))
assume(cuml_compatible_dataset(*dataset))
X_train, X_test, y_train, _ = dataset
curidge = cuRidge()
# fit and predict cuml ridge regression model
curidge.fit(X_train, y_train)
curidge_predict = curidge.predict(X_test)
# sklearn ridge regression model initialization, fit and predict
skridge = skRidge()
skridge.fit(X_train, y_train)
skridge_predict = skridge.predict(X_test)
equal = array_equal(skridge_predict, curidge_predict, 1e-1, with_sign=True)
note(equal)
target(float(np.abs(equal.compute_difference())))
assert equal
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("algorithm", ["eig", "svd"])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
def test_ridge_regression_model(datatype, algorithm, nrows, column_info):
if algorithm == "svd" and nrows > 46340:
pytest.skip(
"svd solver is not supported for the data that has more"
"than 46340 rows or columns if you are using CUDA version"
"10.x"
)
ncols, n_info = column_info
X_train, X_test, y_train, y_test = make_regression_dataset(
datatype, nrows, ncols, n_info
)
# Initialization of cuML's ridge regression model
curidge = cuRidge(fit_intercept=False, solver=algorithm)
# fit and predict cuml ridge regression model
curidge.fit(X_train, y_train)
curidge_predict = curidge.predict(X_test)
if nrows < 500000:
# sklearn ridge regression model initialization, fit and predict
skridge = skRidge(fit_intercept=False)
skridge.fit(X_train, y_train)
skridge_predict = skridge.predict(X_test)
assert array_equal(
skridge_predict, curidge_predict, 1e-1, with_sign=True
)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("algorithm", ["eig", "svd"])
@pytest.mark.parametrize(
"fit_intercept, distribution",
[
(True, "lognormal"),
(True, "exponential"),
(True, "uniform"),
(True, "exponential"),
(False, "lognormal"),
(False, "uniform"),
],
)
def test_weighted_ridge(datatype, algorithm, fit_intercept, distribution):
nrows, ncols, n_info = 1000, 20, 10
max_weight = 10
noise = 20
X_train, X_test, y_train, y_test = make_regression_dataset(
datatype, nrows, ncols, n_info, noise=noise
)
# set weight per sample to be from 1 to max_weight
if distribution == "uniform":
wt = np.random.randint(1, high=max_weight, size=len(X_train))
elif distribution == "exponential":
wt = np.random.exponential(size=len(X_train))
else:
wt = np.random.lognormal(size=len(X_train))
# Initialization of cuML's linear regression model
curidge = cuRidge(fit_intercept=fit_intercept, solver=algorithm)
# fit and predict cuml linear regression model
curidge.fit(X_train, y_train, sample_weight=wt)
curidge_predict = curidge.predict(X_test)
# sklearn linear regression model initialization, fit and predict
skridge = skRidge(fit_intercept=fit_intercept)
skridge.fit(X_train, y_train, sample_weight=wt)
skridge_predict = skridge.predict(X_test)
assert array_equal(skridge_predict, curidge_predict, 1e-1, with_sign=True)
@pytest.mark.parametrize(
"num_classes, dtype, penalty, l1_ratio, fit_intercept, C, tol",
[
# L-BFGS Solver
(2, np.float32, "none", 1.0, True, 1.0, 1e-3),
(2, np.float64, "l2", 1.0, True, 1.0, 1e-8),
(10, np.float32, "elasticnet", 0.0, True, 1.0, 1e-3),
(10, np.float32, "none", 1.0, False, 1.0, 1e-8),
(10, np.float32, "none", 1.0, False, 2.0, 1e-3),
# OWL-QN Solver
(2, np.float32, "l1", 1.0, True, 1.0, 1e-3),
(2, np.float64, "elasticnet", 1.0, True, 1.0, 1e-8),
(10, np.float32, "l1", 1.0, True, 1.0, 1e-3),
(10, np.float32, "l1", 1.0, False, 1.0, 1e-8),
(10, np.float32, "elasticnet", 1.0, False, 0.5, 1e-3),
],
)
@pytest.mark.parametrize("nrows", [unit_param(1000)])
@pytest.mark.parametrize("column_info", [unit_param([20, 10])])
# ignoring UserWarnings in sklearn about setting unused parameters
# like l1 for none penalty
@pytest.mark.filterwarnings("ignore::UserWarning:sklearn[.*]")
def test_logistic_regression(
num_classes,
dtype,
penalty,
l1_ratio,
fit_intercept,
nrows,
column_info,
C,
tol,
):
ncols, n_info = column_info
# Checking sklearn >= 0.21 for testing elasticnet
sk_check = LooseVersion(str(sklearn.__version__)) >= LooseVersion("0.21.0")
if not sk_check and penalty == "elasticnet":
pytest.skip(
"Need sklearn > 0.21 for testing logistic with" "elastic net."
)
X_train, X_test, y_train, y_test = make_classification_dataset(
datatype=dtype,
nrows=nrows,
ncols=ncols,
n_info=n_info,
num_classes=num_classes,
)
y_train = y_train.astype(dtype)
y_test = y_test.astype(dtype)
culog = cuLog(
penalty=penalty,
l1_ratio=l1_ratio,
C=C,
fit_intercept=fit_intercept,
tol=tol,
)
culog.fit(X_train, y_train)
# Only solver=saga supports elasticnet in scikit
if penalty in ["elasticnet", "l1"]:
if sk_check:
sklog = skLog(
penalty=penalty,
l1_ratio=l1_ratio,
solver="saga",
C=C,
fit_intercept=fit_intercept,
multi_class="auto",
)
else:
sklog = skLog(
penalty=penalty,
solver="saga",
C=C,
fit_intercept=fit_intercept,
multi_class="auto",
)
else:
sklog = skLog(
penalty=penalty,
solver="lbfgs",
C=C,
fit_intercept=fit_intercept,
multi_class="auto",
)
sklog.fit(X_train, y_train)
# Setting tolerance to lowest possible per loss to detect regressions
# as much as possible
cu_preds = culog.predict(X_test)
tol_test = 0.012
tol_train = 0.006
if num_classes == 10 and penalty in ["elasticnet", "l1"]:
tol_test *= 10
tol_train *= 10
assert (
culog.score(X_train, y_train)
>= sklog.score(X_train, y_train) - tol_train
)
assert (
culog.score(X_test, y_test) >= sklog.score(X_test, y_test) - tol_test
)
assert len(np.unique(cu_preds)) == len(np.unique(y_test))
if fit_intercept is False:
assert np.array_equal(culog.intercept_, sklog.intercept_)
@given(
dtype=floating_dtypes(sizes=(32, 64)),
penalty=st.sampled_from(("none", "l1", "l2", "elasticnet")),
l1_ratio=st.one_of(st.none(), st.floats(min_value=0.0, max_value=1.0)),
)
def test_logistic_regression_unscaled(dtype, penalty, l1_ratio):
if penalty == "elasticnet":
assume(l1_ratio is not None)
# Test logistic regression on the breast cancer dataset. We do not scale
# the dataset which could lead to numerical problems (fixed in PR #2543).
X, y = load_breast_cancer(return_X_y=True)
X = X.astype(dtype)
y = y.astype(dtype)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
params = {
"penalty": penalty,
"C": 1,
"tol": 1e-4,
"fit_intercept": True,
"max_iter": 5000,
"l1_ratio": l1_ratio,
}
culog = cuLog(**params)
culog.fit(X_train, y_train)
score_train = culog.score(X_train, y_train)
score_test = culog.score(X_test, y_test)
target(1 / score_train, label="inverse train score")
target(1 / score_test, label="inverse test score")
# TODO: Use a more rigorous approach to determine expected minimal scores
# here. The values here are selected empirically and passed during test
# development.
assert score_train >= 0.94
assert score_test >= 0.94
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_logistic_regression_model_default(dtype):
X_train, X_test, y_train, y_test = small_classification_dataset(dtype)
y_train = y_train.astype(dtype)
y_test = y_test.astype(dtype)
culog = cuLog()
culog.fit(X_train, y_train)
sklog = skLog(multi_class="auto")
sklog.fit(X_train, y_train)
assert culog.score(X_test, y_test) >= sklog.score(X_test, y_test) - 0.022
@given(
dtype=floating_dtypes(sizes=(32, 64)),
order=st.sampled_from(("C", "F")),
sparse_input=st.booleans(),
fit_intercept=st.booleans(),
penalty=st.sampled_from(("none", "l1", "l2")),
)
def test_logistic_regression_model_digits(
dtype, order, sparse_input, fit_intercept, penalty
):
# smallest sklearn score with max_iter = 10000
# put it as a constant here, because sklearn 0.23.1 needs a lot of iters
# to converge and has a bug returning an unrelated error if not converged.
acceptable_score = 0.95
digits = load_digits()
X_dense = digits.data.astype(dtype)
X_dense.reshape(X_dense.shape, order=order)
X = csr_matrix(X_dense) if sparse_input else X_dense
y = digits.target.astype(dtype)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
culog = cuLog(fit_intercept=fit_intercept, penalty=penalty)
culog.fit(X_train, y_train)
score = culog.score(X_test, y_test)
assert score >= acceptable_score
@given(dtype=floating_dtypes(sizes=(32, 64)))
def test_logistic_regression_sparse_only(dtype, nlp_20news):
# sklearn score with max_iter = 10000
sklearn_score = 0.878
acceptable_score = sklearn_score - 0.01
X, y = nlp_20news
X = csr_matrix(X.astype(dtype))
y = y.get().astype(dtype)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
culog = cuLog()
culog.fit(X_train, y_train)
score = culog.score(X_test, y_test)
assert score >= acceptable_score
@given(
dataset=split_datasets(
standard_classification_datasets(
dtypes=floating_dtypes(sizes=(32, 64)),
n_classes=st.sampled_from((2, 10)),
n_features=st.just(20),
n_informative=st.just(10),
)
),
fit_intercept=st.booleans(),
sparse_input=st.booleans(),
)
def test_logistic_regression_decision_function(
dataset, fit_intercept, sparse_input
):
X_train, X_test, y_train, y_test = dataset
# Assumption needed to avoid qn.h: logistic loss invalid C error.
assume(set(np.unique(y_train)) == set(np.unique(y_test)))
num_classes = len(np.unique(np.concatenate((y_train, y_test))))
if sparse_input:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
culog = cuLog(fit_intercept=fit_intercept, output_type="numpy")
culog.fit(X_train, y_train)
sklog = skLog(fit_intercept=fit_intercept)
sklog.coef_ = culog.coef_
sklog.intercept_ = culog.intercept_ if fit_intercept else 0
sklog.classes_ = np.arange(num_classes)
cu_dec_func = culog.decision_function(X_test)
if cu_dec_func.shape[0] > 2: # num_classes
cu_dec_func = cu_dec_func.T
sk_dec_func = sklog.decision_function(X_test)
assert array_equal(cu_dec_func, sk_dec_func)
@given(
dataset=split_datasets(
standard_classification_datasets(
dtypes=floating_dtypes(sizes=(32, 64)),
n_classes=st.sampled_from((2, 10)),
n_features=st.just(20),
n_informative=st.just(10),
)
),
fit_intercept=st.booleans(),
sparse_input=st.booleans(),
)
def test_logistic_regression_predict_proba(
dataset, fit_intercept, sparse_input
):
X_train, X_test, y_train, y_test = dataset
# Assumption needed to avoid qn.h: logistic loss invalid C error.
assume(set(np.unique(y_train)) == set(np.unique(y_test)))
num_classes = len(np.unique(y_train))
if sparse_input:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
culog = cuLog(fit_intercept=fit_intercept, output_type="numpy")
culog.fit(X_train, y_train)
sklog = skLog(
fit_intercept=fit_intercept,
**(
{"solver": "lbfgs", "multi_class": "multinomial"}
if num_classes > 2
else {}
),
)
sklog.coef_ = culog.coef_
sklog.intercept_ = culog.intercept_ if fit_intercept else 0
sklog.classes_ = np.arange(num_classes)
cu_proba = culog.predict_proba(X_test)
sk_proba = sklog.predict_proba(X_test)
cu_log_proba = culog.predict_log_proba(X_test)
sk_log_proba = sklog.predict_log_proba(X_test)
assert array_equal(cu_proba, sk_proba)
assert array_equal(cu_log_proba, sk_log_proba)
@pytest.mark.parametrize("constructor", [np.array, cp.array, cudf.DataFrame])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_logistic_regression_input_type_consistency(constructor, dtype):
from cudf.core.frame import Frame
X = constructor([[5, 10], [3, 1], [7, 8]]).astype(dtype)
y = constructor([0, 1, 1]).astype(dtype)
clf = cuLog().fit(X, y, convert_dtype=True)
original_type = type(X)
if constructor == cudf.DataFrame:
original_type = Frame
assert isinstance(clf.predict_proba(X), original_type)
assert isinstance(clf.predict(X), original_type)
@pytest.mark.parametrize("train_dtype", [np.float32, np.float64])
@pytest.mark.parametrize("test_dtype", [np.float64, np.float32])
def test_linreg_predict_convert_dtype(train_dtype, test_dtype):
X, y = make_regression(
n_samples=50, n_features=10, n_informative=5, random_state=0
)
X = X.astype(train_dtype)
y = y.astype(train_dtype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
clf = cuLinearRegression()
clf.fit(X_train, y_train)
clf.predict(X_test.astype(test_dtype))
@given(
dataset=split_datasets(
standard_regression_datasets(dtypes=floating_dtypes(sizes=(32, 64)))
),
test_dtype=floating_dtypes(sizes=(32, 64)),
)
def test_ridge_predict_convert_dtype(dataset, test_dtype):
assume(cuml_compatible_dataset(*dataset))
X_train, X_test, y_train, _ = dataset
clf = cuRidge()
clf.fit(X_train, y_train)
clf.predict(X_test.astype(test_dtype))
@given(
dataset=split_datasets(
standard_classification_datasets(
dtypes=floating_dtypes(sizes=(32, 64))
)
),
test_dtype=floating_dtypes(sizes=(32, 64)),
)
def test_logistic_predict_convert_dtype(dataset, test_dtype):
X_train, X_test, y_train, y_test = dataset
# Assumption needed to avoid qn.h: logistic loss invalid C error.
assume(set(np.unique(y_train)) == set(np.unique(y_test)))
clf = cuLog()
clf.fit(X_train, y_train)
clf.predict(X_test.astype(test_dtype))
@pytest.fixture(
scope="session", params=["binary", "multiclass-3", "multiclass-7"]
)
def regression_dataset(request):
regression_type = request.param
out = {}
for test_status in ["regular", "stress_test"]:
if test_status == "regular":
n_samples, n_features = 100000, 5
elif test_status == "stress_test":
n_samples, n_features = 1000000, 20
data = (np.random.rand(n_samples, n_features) * 2) - 1
if regression_type == "binary":
coef = (np.random.rand(n_features) * 2) - 1
coef /= np.linalg.norm(coef)
output = (data @ coef) > 0
elif regression_type.startswith("multiclass"):
n_classes = 3 if regression_type == "multiclass-3" else 7
coef = (np.random.rand(n_features, n_classes) * 2) - 1
coef /= np.linalg.norm(coef, axis=0)
output = (data @ coef).argmax(axis=1)
output = output.astype(np.int32)
out[test_status] = (regression_type, data, coef, output)
return out
@pytest.mark.parametrize(
"option", ["sample_weight", "class_weight", "balanced", "no_weight"]
)
@pytest.mark.parametrize(
"test_status", ["regular", stress_param("stress_test")]
)
def test_logistic_regression_weighting(
regression_dataset, option, test_status
):
regression_type, data, coef, output = regression_dataset[test_status]
class_weight = None
sample_weight = None
if option == "sample_weight":
n_samples = data.shape[0]
sample_weight = np.abs(np.random.rand(n_samples))
elif option == "class_weight":
class_weight = np.random.rand(2)
class_weight = {0: class_weight[0], 1: class_weight[1]}
elif option == "balanced":
class_weight = "balanced"
culog = cuLog(fit_intercept=False, class_weight=class_weight)
culog.fit(data, output, sample_weight=sample_weight)
sklog = skLog(fit_intercept=False, class_weight=class_weight)
sklog.fit(data, output, sample_weight=sample_weight)
skcoef = np.squeeze(sklog.coef_)
cucoef = np.squeeze(culog.coef_)
if regression_type == "binary":
skcoef /= np.linalg.norm(skcoef)
cucoef /= np.linalg.norm(cucoef)
unit_tol = 0.04
total_tol = 0.08
elif regression_type.startswith("multiclass"):
skcoef /= np.linalg.norm(skcoef, axis=1)[:, None]
cucoef /= np.linalg.norm(cucoef, axis=1)[:, None]
unit_tol = 0.2
total_tol = 0.3
equality = array_equal(
skcoef, cucoef, unit_tol=unit_tol, total_tol=total_tol
)
if not equality:
print("\ncoef.shape: ", coef.shape)
print("coef:\n", coef)
print("cucoef.shape: ", cucoef.shape)
print("cucoef:\n", cucoef)
assert equality
cuOut = culog.predict(data)
skOut = sklog.predict(data)
assert array_equal(skOut, cuOut, unit_tol=unit_tol, total_tol=total_tol)
@pytest.mark.parametrize("algo", [cuLog, cuRidge])
# ignoring warning about change of solver
@pytest.mark.filterwarnings("ignore::UserWarning:cuml[.*]")
def test_linear_models_set_params(algo):
x = np.linspace(0, 1, 50)
y = 2 * x
model = algo()
model.fit(x, y)
coef_before = model.coef_
if algo == cuLog:
params = {"penalty": "none", "C": 1, "max_iter": 30}
model = algo(penalty="none", C=1, max_iter=30)
else:
model = algo(solver="svd", alpha=0.1)
params = {"solver": "svd", "alpha": 0.1}
model.fit(x, y)
coef_after = model.coef_
model = algo()
model.set_params(**params)
model.fit(x, y)
coef_test = model.coef_
assert not array_equal(coef_before, coef_after)
assert array_equal(coef_after, coef_test)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("alpha", [0.1, 1.0, 10.0])
@pytest.mark.parametrize("l1_ratio", [0.1, 0.5, 0.9])
@pytest.mark.parametrize(
"nrows", [unit_param(1000), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
def test_elasticnet_solvers_eq(datatype, alpha, l1_ratio, nrows, column_info):
ncols, n_info = column_info
X_train, X_test, y_train, y_test = make_regression_dataset(
datatype, nrows, ncols, n_info
)
kwargs = {"alpha": alpha, "l1_ratio": l1_ratio}
cd = cuElasticNet(solver="cd", **kwargs)
cd.fit(X_train, y_train)
cd_res = cd.predict(X_test)
qn = cuElasticNet(solver="qn", **kwargs)
qn.fit(X_train, y_train)
# the results of the two models should be close (even if both are bad)
assert qn.score(X_test, cd_res) > 0.95
# coefficients of the two models should be close
assert np.corrcoef(cd.coef_, qn.coef_)[0, 1] > 0.98
@given(
dataset=standard_regression_datasets(
n_features=st.integers(min_value=1, max_value=10),
dtypes=floating_dtypes(sizes=(32, 64)),
),
algorithm=algorithms,
xp=st.sampled_from([np, cp]),
copy=st.sampled_from((True, False, None, ...)),
)
@example(make_regression(n_features=1), "svd", cp, True)
@example(make_regression(n_features=1), "svd", cp, False)
@example(make_regression(n_features=1), "svd", cp, None)
@example(make_regression(n_features=1), "svd", cp, ...)
@example(make_regression(n_features=1), "svd", np, False)
@example(make_regression(n_features=2), "svd", cp, False)
@example(make_regression(n_features=2), "eig", np, False)
def test_linear_regression_input_copy(dataset, algorithm, xp, copy):
X, y = dataset
X, y = xp.asarray(X), xp.asarray(y)
X_copy = X.copy()
with (pytest.warns(UserWarning) if copy in (None, ...) else nullcontext()):
if copy is ...: # no argument
cuLR = cuLinearRegression(algorithm=algorithm)
else:
cuLR = cuLinearRegression(algorithm=algorithm, copy_X=copy)
cuLR.fit(X, y)
if (X.shape[1] == 1 and xp is cp) and copy is False:
assert not array_equal(X, X_copy)
else:
assert array_equal(X, X_copy)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_make_arima.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml
import pytest
# Note: this test is not really strict, it only checks that the function
# supports the given parameters and returns an output in the correct form.
# The test doesn't guarantee the quality of the generated series
# Testing parameters
output_type = [
(None, 100), # Default is cupy if None is used
("numpy", 100),
("cupy", 100000),
("numba", 100000),
("cudf", 100),
]
dtype = ["single", "double"]
n_obs = [50, 200]
random_state = [None, 1234]
order = [
(3, 0, 0, 0, 0, 0, 0, 1),
(0, 1, 2, 0, 0, 0, 0, 1),
(1, 1, 1, 2, 1, 0, 12, 0),
]
@pytest.mark.parametrize("dtype", dtype)
@pytest.mark.parametrize("output_type,batch_size", output_type)
@pytest.mark.parametrize("n_obs", n_obs)
@pytest.mark.parametrize("random_state", random_state)
@pytest.mark.parametrize("order", order)
def test_make_arima(
dtype, output_type, batch_size, n_obs, random_state, order
):
p, d, q, P, D, Q, s, k = order
with cuml.using_output_type(output_type):
out = cuml.make_arima(
batch_size,
n_obs,
(p, d, q),
(P, D, Q, s),
k,
random_state=random_state,
dtype=dtype,
)
assert out.shape == (n_obs, batch_size), "out shape mismatch"
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_benchmark.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.benchmark.bench_helper_funcs import fit, fit_predict
import time
from sklearn import metrics
from cuml.internals.safe_imports import gpu_only_import_from
import pytest
from cuml.internals.safe_imports import gpu_only_import
from cuml.benchmark import datagen, algorithms
from cuml.benchmark.bench_helper_funcs import _training_data_to_numpy
from cuml.benchmark.runners import (
AccuracyComparisonRunner,
SpeedupComparisonRunner,
run_variations,
)
from cuml.internals.import_utils import has_umap
from cuml.internals.import_utils import has_xgboost
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cudf = gpu_only_import("cudf")
cuda = gpu_only_import_from("numba", "cuda")
pd = cpu_only_import("pandas")
pytestmark = pytest.mark.skip
@pytest.mark.parametrize("dataset", ["blobs", "regression", "classification"])
def test_data_generators(dataset):
data = datagen.gen_data(dataset, "numpy", n_samples=100, n_features=10)
assert isinstance(data[0], np.ndarray)
assert data[0].shape[0] == 100
@pytest.mark.parametrize(
"input_type", ["numpy", "cudf", "pandas", "gpuarray", "gpuarray-c"]
)
def test_data_generator_types(input_type):
X, *_ = datagen.gen_data("blobs", input_type, n_samples=100, n_features=10)
if input_type == "numpy":
assert isinstance(X, np.ndarray)
elif input_type == "cudf":
assert isinstance(X, cudf.DataFrame)
elif input_type == "pandas":
assert isinstance(X, pd.DataFrame)
elif input_type == "gpuarray":
assert cuda.is_cuda_array(X)
elif input_type == "gpuarray-c":
assert cuda.is_cuda_array(X)
else:
assert False
def test_data_generator_split():
X_train, y_train, X_test, y_test = datagen.gen_data(
"blobs", "numpy", n_samples=100, n_features=10, test_fraction=0.20
)
assert X_train.shape == (100, 10)
assert X_test.shape == (25, 10)
def test_run_variations():
algo = algorithms.algorithm_by_name("LogisticRegression")
res = run_variations(
[algo],
dataset_name="classification",
bench_rows=[100, 200],
bench_dims=[10, 20],
)
assert res.shape[0] == 4
assert (res.n_samples == 100).sum() == 2
assert (res.n_features == 20).sum() == 2
def test_speedup_runner():
class MockAlgo:
def __init__(self, t):
self.t = t
def fit(self, X, y):
time.sleep(self.t)
return
def predict(self, X):
nr = X.shape[0]
res = np.zeros(nr)
res[0 : int(nr / 5.0)] = 1.0
return res
class FastMockAlgo(MockAlgo):
def __init__(self):
MockAlgo.__init__(self, 0.1)
class SlowMockAlgo(MockAlgo):
def __init__(self):
MockAlgo.__init__(self, 2)
pair = algorithms.AlgorithmPair(
SlowMockAlgo,
FastMockAlgo,
shared_args={},
name="Mock",
bench_func=fit_predict,
accuracy_function=metrics.accuracy_score,
)
runner = SpeedupComparisonRunner([20], [5], dataset_name="zeros")
results = runner.run(pair)[0]
expected_speedup = SlowMockAlgo().t / FastMockAlgo().t
assert results["speedup"] == pytest.approx(expected_speedup, 0.4)
def test_multi_reps():
class CountingAlgo:
tot_reps = 0
def fit(self, X, y):
CountingAlgo.tot_reps += 1
pair = algorithms.AlgorithmPair(
CountingAlgo,
CountingAlgo,
shared_args={},
bench_func=fit,
name="Counting",
)
runner = AccuracyComparisonRunner(
[20], [5], dataset_name="zeros", test_fraction=0.20, n_reps=4
)
runner.run(pair)
# Double the n_reps since it is used in cpu and cuml versions
assert CountingAlgo.tot_reps == 8
def test_accuracy_runner():
# Set up data that should deliver accuracy of 0.20 if all goes right
class MockAlgo:
def fit(self, X, y):
return
def predict(self, X):
nr = X.shape[0]
res = np.zeros(nr)
res[0 : int(nr / 5.0)] = 1.0
return res
pair = algorithms.AlgorithmPair(
MockAlgo,
MockAlgo,
shared_args={},
name="Mock",
bench_func=fit_predict,
accuracy_function=metrics.accuracy_score,
)
runner = AccuracyComparisonRunner(
[20], [5], dataset_name="zeros", test_fraction=0.20
)
results = runner.run(pair)[0]
assert results["cuml_acc"] == pytest.approx(0.80)
# Only test a few algorithms (which collectively span several types)
# to reduce runtime burden
# skipping UMAP-Supervised due to issue
# https://github.com/rapidsai/cuml/issues/4243
@pytest.mark.parametrize(
"algo_name", ["DBSCAN", "LogisticRegression", "ElasticNet", "FIL"]
)
def test_real_algos_runner(algo_name):
pair = algorithms.algorithm_by_name(algo_name)
if (algo_name == "UMAP-Supervised" and not has_umap()) or (
algo_name == "FIL" and not has_xgboost()
):
pytest.xfail()
runner = AccuracyComparisonRunner(
[50], [5], dataset_name="classification", test_fraction=0.20
)
results = runner.run(pair)[0]
print(results)
assert results["cuml_acc"] is not None
# Test FIL with several input types
@pytest.mark.parametrize(
"input_type", ["numpy", "cudf", "gpuarray", "gpuarray-c"]
)
def test_fil_input_types(input_type):
pair = algorithms.algorithm_by_name("FIL")
if not has_xgboost():
pytest.xfail()
runner = AccuracyComparisonRunner(
[20],
[5],
dataset_name="classification",
test_fraction=0.5,
input_type=input_type,
)
results = runner.run(pair, run_cpu=False)[0]
assert results["cuml_acc"] is not None
@pytest.mark.parametrize("input_type", ["numpy", "cudf", "pandas", "gpuarray"])
def test_training_data_to_numpy(input_type):
X, y, *_ = datagen.gen_data(
"blobs", input_type, n_samples=100, n_features=10
)
X_np, y_np = _training_data_to_numpy(X, y)
assert isinstance(X_np, np.ndarray)
assert isinstance(y_np, np.ndarray)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_allocator.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.input_utils import sparse_scipy_to_cp
from cuml.testing.utils import small_classification_dataset
from cuml.naive_bayes import MultinomialNB
from cuml import LogisticRegression
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
try:
from cupy.cuda import using_allocator as cupy_using_allocator
except ImportError:
from cupy.cuda.memory import using_allocator as cupy_using_allocator
def dummy_allocator(nbytes):
raise AssertionError("Dummy allocator should not be called")
def test_dummy_allocator():
with pytest.raises(AssertionError):
with cupy_using_allocator(dummy_allocator):
a = cp.arange(10)
del a
def test_logistic_regression():
with cupy_using_allocator(dummy_allocator):
X_train, X_test, y_train, y_test = small_classification_dataset(
np.float32
)
y_train = y_train.astype(np.float32)
y_test = y_test.astype(np.float32)
culog = LogisticRegression()
culog.fit(X_train, y_train)
culog.predict(X_train)
def test_naive_bayes(nlp_20news):
X, y = nlp_20news
X = sparse_scipy_to_cp(X, cp.float32).astype(cp.float32)
y = y.astype(cp.int32)
with cupy_using_allocator(dummy_allocator):
model = MultinomialNB()
model.fit(X, y)
y_hat = model.predict(X)
y_hat = model.predict(X)
y_hat = model.predict_proba(X)
y_hat = model.predict_log_proba(X)
y_hat = model.score(X, y)
del y_hat
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_auto_arima.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.input_utils import input_to_cuml_array
from cuml.tsa import auto_arima
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
###############################################################################
# Helpers and reference functions #
###############################################################################
def _build_division_map_ref(id_tracker, batch_size, n_sub):
"""Reference implementation for _build_division_map in pure Python"""
id_to_model = np.zeros(batch_size, dtype=np.int32)
id_to_pos = np.zeros(batch_size, dtype=np.int32)
for i in range(n_sub):
id_to_model[id_tracker[i]] = i
for j in range(len(id_tracker[i])):
id_to_pos[id_tracker[i][j]] = j
return id_to_model, id_to_pos
###############################################################################
# Tests #
###############################################################################
@pytest.mark.parametrize("batch_size", [10, 100])
@pytest.mark.parametrize("n_obs", [31, 65])
@pytest.mark.parametrize("prop_true", [0, 0.5, 1])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_divide_by_mask(batch_size, n_obs, prop_true, dtype):
"""Test the helper that splits a dataset in 2 based on a boolean mask"""
# Generate random data, mask and batch indices
data_np = (
(np.random.uniform(-1.0, 1.0, (batch_size, n_obs)))
.astype(dtype)
.transpose()
)
nb_true = int(prop_true * batch_size)
mask_np = np.random.permutation(
[False] * (batch_size - nb_true) + [True] * nb_true
)
b_id_np = np.array(range(batch_size), dtype=np.int32)
data, *_ = input_to_cuml_array(data_np)
mask, *_ = input_to_cuml_array(mask_np)
b_id, *_ = input_to_cuml_array(b_id_np)
# Call the tested function
sub_data, sub_id = [None, None], [None, None]
(
sub_data[0],
sub_id[0],
sub_data[1],
sub_id[1],
) = auto_arima._divide_by_mask(data, mask, b_id)
# Compute the expected results in pure Python
sub_data_ref = [data_np[:, np.logical_not(mask_np)], data_np[:, mask_np]]
sub_id_ref = [b_id_np[np.logical_not(mask_np)], b_id_np[mask_np]]
# Compare the results
for i in range(2):
# First check the cases of empty sub-batches
if sub_data[i] is None:
# The reference must be empty
assert sub_data_ref[i].shape[1] == 0
# And the id array must be None too
assert sub_id[i] is None
# When the sub-batch is not empty, compare to the reference
else:
np.testing.assert_allclose(
sub_data[i].to_output("numpy"), sub_data_ref[i]
)
np.testing.assert_array_equal(
sub_id[i].to_output("numpy"), sub_id_ref[i]
)
@pytest.mark.parametrize("batch_size", [10, 100])
@pytest.mark.parametrize("n_obs", [31, 65])
@pytest.mark.parametrize("n_sub", [1, 2, 10])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_divide_by_min(batch_size, n_obs, n_sub, dtype):
"""Test the helper that splits a dataset by selecting the minimum
of a given criterion
"""
# Generate random data, metrics and batch indices
data_np = (
(np.random.uniform(-1.0, 1.0, (batch_size, n_obs)))
.astype(dtype)
.transpose()
)
crit_np = (
(np.random.uniform(-1.0, 1.0, (n_sub, batch_size)))
.astype(dtype)
.transpose()
)
b_id_np = np.array(range(batch_size), dtype=np.int32)
data, *_ = input_to_cuml_array(data_np)
crit, *_ = input_to_cuml_array(crit_np)
b_id, *_ = input_to_cuml_array(b_id_np)
# Call the tested function
sub_batches, sub_id = auto_arima._divide_by_min(data, crit, b_id)
# Compute the expected results in pure Python
which_sub = crit_np.argmin(axis=1)
sub_batches_ref = []
sub_id_ref = []
for i in range(n_sub):
sub_batches_ref.append(data_np[:, which_sub == i])
sub_id_ref.append(b_id_np[which_sub == i])
# Compare the results
for i in range(n_sub):
# First check the cases of empty sub-batches
if sub_batches[i] is None:
# The reference must be empty
assert sub_batches_ref[i].shape[1] == 0
# And the id array must be None too
assert sub_id[i] is None
# When the sub-batch is not empty, compare to the reference
else:
np.testing.assert_allclose(
sub_batches[i].to_output("numpy"), sub_batches_ref[i]
)
np.testing.assert_array_equal(
sub_id[i].to_output("numpy"), sub_id_ref[i]
)
@pytest.mark.parametrize("batch_size", [25, 103, 1001])
@pytest.mark.parametrize("n_sub", [1, 2, 10])
def test_build_division_map(batch_size, n_sub):
"""Test the helper that builds a map of the new sub-batch and position
in this batch of each series in a divided batch
"""
# Generate the id tracker
# Note: in the real use case the individual id arrays are sorted but the
# helper function doesn't require that
tracker_np = np.array_split(np.random.permutation(batch_size), n_sub)
tracker = [
input_to_cuml_array(tr, convert_to_dtype=np.int32)[0]
for tr in tracker_np
]
# Call the tested function
id_to_model, id_to_pos = auto_arima._build_division_map(
tracker, batch_size
)
# Compute the expected results in pure Python
id_to_model_ref, id_to_pos_ref = _build_division_map_ref(
tracker_np, batch_size, n_sub
)
# Compare the results
np.testing.assert_array_equal(
id_to_model.to_output("numpy"), id_to_model_ref
)
np.testing.assert_array_equal(id_to_pos.to_output("numpy"), id_to_pos_ref)
@pytest.mark.parametrize("batch_size", [10, 100])
@pytest.mark.parametrize("n_obs", [31, 65])
@pytest.mark.parametrize("n_sub", [1, 2, 10])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_merge_series(batch_size, n_obs, n_sub, dtype):
"""Test the helper that merges a divided batch based on division maps
that track the sub-batch and position of each member
"""
# Generate an id tracker and compute id_to_sub and id_to_pos
tracker_np = np.array_split(np.random.permutation(batch_size), n_sub)
id_to_sub_np, id_to_pos_np = _build_division_map_ref(
tracker_np, batch_size, n_sub
)
id_to_sub, *_ = input_to_cuml_array(
id_to_sub_np, convert_to_dtype=np.int32
)
id_to_pos, *_ = input_to_cuml_array(
id_to_pos_np, convert_to_dtype=np.int32
)
# Generate the final dataset (expected result)
data_np = (
(np.random.uniform(-1.0, 1.0, (batch_size, n_obs)))
.astype(dtype)
.transpose()
)
# Divide the dataset according to the id tracker
data_div = []
for i in range(n_sub):
data_piece = np.zeros(
(n_obs, len(tracker_np[i])), dtype=dtype, order="F"
)
for j in range(len(tracker_np[i])):
data_piece[:, j] = data_np[:, tracker_np[i][j]]
data_div.append(input_to_cuml_array(data_piece)[0])
# Call the tested function
data = auto_arima._merge_series(data_div, id_to_sub, id_to_pos, batch_size)
# Compare the results
np.testing.assert_allclose(data.to_output("numpy"), data_np)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_cuml_descr_decor.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.input_utils import input_to_cuml_array
from cuml.internals.input_utils import determine_array_type
from cuml.internals.input_utils import determine_array_dtype
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.array import CumlArray
import pytest
from cuml.internals.safe_imports import cpu_only_import
import pickle
import cuml
import cuml.internals
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
test_input_types = ["numpy", "numba", "cupy", "cudf"]
test_output_types_str = ["numpy", "numba", "cupy", "cudf"]
test_dtypes_short = [
np.uint8,
np.float16,
np.int32,
np.float64,
]
unsupported_cudf_dtypes = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.float16,
]
test_shapes = [10, (10, 1), (10, 5), (1, 10)]
class DummyTestEstimator(cuml.Base):
input_any_ = CumlArrayDescriptor()
def _set_input(self, X):
self.input_any_ = X
@cuml.internals.api_base_return_any()
def store_input(self, X):
self.input_any_ = X
@cuml.internals.api_return_any()
def get_input(self):
return self.input_any_
# === Standard Functions ===
def fit(self, X, convert_dtype=True) -> "DummyTestEstimator":
return self
def predict(self, X, convert_dtype=True) -> CumlArray:
return X
def transform(self, X, convert_dtype=False) -> CumlArray:
pass
def fit_transform(self, X, y=None) -> CumlArray:
return self.fit(X).transform(X)
def assert_array_identical(a, b):
cupy_a = input_to_cuml_array(a, order="K").array
cupy_b = input_to_cuml_array(b, order="K").array
if len(a) == 0 and len(b) == 0:
return True
assert cupy_a.shape == cupy_b.shape
assert cupy_a.dtype == cupy_b.dtype
assert cupy_a.order == cupy_b.order
assert cp.all(cp.asarray(cupy_a) == cp.asarray(cupy_b)).item()
def create_input(input_type, input_dtype, input_shape, input_order):
rand_ary = cp.ones(input_shape, dtype=input_dtype, order=input_order)
cuml_ary = CumlArray(rand_ary)
return cuml_ary.to_output(input_type)
def create_output(X_in, output_type):
cuml_ary_tuple = input_to_cuml_array(X_in, order="K")
return cuml_ary_tuple.array.to_output(output_type)
@pytest.mark.parametrize("input_type", test_input_types)
def test_pickle(input_type):
if input_type == "numba":
pytest.skip("numba arrays cant be picked at this time")
est = DummyTestEstimator()
X_in = create_input(input_type, np.float32, (10, 5), "C")
est.store_input(X_in)
# Loop and verify we have filled the cache
for out_type in test_output_types_str:
with cuml.using_output_type(out_type):
assert_array_identical(
est.input_any_, create_output(X_in, out_type)
)
est_pickled_bytes = pickle.dumps(est)
est_unpickled: DummyTestEstimator = pickle.loads(est_pickled_bytes)
# Assert that we only resture the input
assert est_unpickled.__dict__["input_any_"].input_type == input_type
assert len(est_unpickled.__dict__["input_any_"].values) == 1
assert_array_identical(est.get_input(), est_unpickled.get_input())
assert_array_identical(est.input_any_, est_unpickled.input_any_)
# Loop one more time with the picked one to make sure it works right
for out_type in test_output_types_str:
with cuml.using_output_type(out_type):
assert_array_identical(
est.input_any_, create_output(X_in, out_type)
)
est_unpickled.output_type = out_type
assert_array_identical(
est_unpickled.input_any_, create_output(X_in, out_type)
)
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("input_dtype", [np.float32, np.int16])
@pytest.mark.parametrize("input_shape", [10, (10, 5)])
@pytest.mark.parametrize("output_type", test_output_types_str)
def test_dec_input_output(input_type, input_dtype, input_shape, output_type):
if input_type == "cudf" or output_type == "cudf":
if input_dtype in unsupported_cudf_dtypes:
pytest.skip("Unsupported cudf combination")
X_in = create_input(input_type, input_dtype, input_shape, "C")
X_out = create_output(X_in, output_type)
# Test with output_type="input"
est = DummyTestEstimator(output_type="input")
est.store_input(X_in)
# Test is was stored internally correctly
assert X_in is est.get_input()
assert est.__dict__["input_any_"].input_type == input_type
# Check the current type matches input type
assert determine_array_type(est.input_any_) == input_type
assert_array_identical(est.input_any_, X_in)
# Switch output type and check type and equality
with cuml.using_output_type(output_type):
assert determine_array_type(est.input_any_) == output_type
assert_array_identical(est.input_any_, X_out)
# Now Test with output_type=output_type
est = DummyTestEstimator(output_type=output_type)
est.store_input(X_in)
# Check the current type matches output type
assert determine_array_type(est.input_any_) == output_type
assert_array_identical(est.input_any_, X_out)
with cuml.using_output_type("input"):
assert determine_array_type(est.input_any_) == input_type
assert_array_identical(est.input_any_, X_in)
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("input_dtype", [np.float32, np.int16])
@pytest.mark.parametrize("input_shape", test_shapes)
def test_auto_fit(input_type, input_dtype, input_shape):
"""
Test autowrapping on fit that will set output_type, and n_features
"""
X_in = create_input(input_type, input_dtype, input_shape, "C")
# Test with output_type="input"
est = DummyTestEstimator()
est.fit(X_in)
def calc_n_features(shape):
if isinstance(shape, tuple) and len(shape) >= 1:
# When cudf and shape[1] is used, a series is created which will
# remove the last shape
if input_type == "cudf" and shape[1] == 1:
return None
return shape[1]
return None
assert est._input_type == input_type
assert est.target_dtype is None
assert est.n_features_in_ == calc_n_features(input_shape)
@pytest.mark.parametrize("input_type", test_input_types)
@pytest.mark.parametrize("base_output_type", test_input_types)
@pytest.mark.parametrize(
"global_output_type", test_output_types_str + ["input", None]
)
def test_auto_predict(input_type, base_output_type, global_output_type):
"""
Test autowrapping on predict that will set target_type
"""
X_in = create_input(input_type, np.float32, (10, 10), "F")
# Test with output_type="input"
est = DummyTestEstimator()
# With cuml.global_settings.output_type == None, this should return the
# input type
X_out = est.predict(X_in)
assert determine_array_type(X_out) == input_type
assert_array_identical(X_in, X_out)
# Test with output_type=base_output_type
est = DummyTestEstimator(output_type=base_output_type)
# With cuml.global_settings.output_type == None, this should return the
# base_output_type
X_out = est.predict(X_in)
assert determine_array_type(X_out) == base_output_type
assert_array_identical(X_in, X_out)
# Test with global_output_type, should return global_output_type
with cuml.using_output_type(global_output_type):
X_out = est.predict(X_in)
target_output_type = global_output_type
if target_output_type is None or target_output_type == "input":
target_output_type = base_output_type
if target_output_type == "input":
target_output_type = input_type
assert determine_array_type(X_out) == target_output_type
assert_array_identical(X_in, X_out)
@pytest.mark.parametrize("input_arg", ["X", "y", "bad", ...])
@pytest.mark.parametrize("target_arg", ["X", "y", "bad", ...])
@pytest.mark.parametrize("get_output_type", [True, False])
@pytest.mark.parametrize("get_output_dtype", [True, False])
def test_return_array(
input_arg: str,
target_arg: str,
get_output_type: bool,
get_output_dtype: bool,
):
"""
Test autowrapping on predict that will set target_type
"""
input_type_X = "numpy"
input_dtype_X = np.float64
input_type_Y = "cupy"
input_dtype_Y = np.int32
inner_type = "numba"
inner_dtype = np.float16
X_in = create_input(input_type_X, input_dtype_X, (10, 10), "F")
Y_in = create_input(input_type_Y, input_dtype_Y, (10, 10), "F")
def test_func(X, y):
if not get_output_type:
cuml.internals.set_api_output_type(inner_type)
if not get_output_dtype:
cuml.internals.set_api_output_dtype(inner_dtype)
return X
expected_to_fail = (input_arg == "bad" and get_output_type) or (
target_arg == "bad" and get_output_dtype
)
try:
test_func = cuml.internals.api_return_array(
input_arg=input_arg,
target_arg=target_arg,
get_output_type=get_output_type,
get_output_dtype=get_output_dtype,
)(test_func)
except ValueError:
assert expected_to_fail
return
else:
assert not expected_to_fail
X_out = test_func(X=X_in, y=Y_in)
target_type = None
target_dtype = None
if not get_output_type:
target_type = inner_type
else:
if input_arg == "y":
target_type = input_type_Y
else:
target_type = input_type_X
if not get_output_dtype:
target_dtype = inner_dtype
else:
if target_arg == "X":
target_dtype = input_dtype_X
else:
target_dtype = input_dtype_Y
assert determine_array_type(X_out) == target_type
assert determine_array_dtype(X_out) == target_dtype
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/conftest.py | #
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.utils import create_synthetic_dataset
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import datasets
from sklearn.datasets import make_regression as skl_make_reg
from sklearn.datasets import make_classification as skl_make_clas
from sklearn.datasets import fetch_california_housing
from sklearn.datasets import fetch_20newsgroups
from sklearn.utils import Bunch
from datetime import timedelta
from math import ceil
import hypothesis
from cuml.internals.safe_imports import gpu_only_import
import pytest
import os
import subprocess
import pandas as pd
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
# Add the import here for any plugins that should be loaded EVERY TIME
pytest_plugins = "cuml.testing.plugins.quick_run_plugin"
CI = os.environ.get("CI") in ("true", "1")
HYPOTHESIS_ENABLED = os.environ.get("HYPOTHESIS_ENABLED") in (
"true",
"1",
)
# Configure hypothesis profiles
HEALTH_CHECKS_SUPPRESSED_BY_DEFAULT = (
list(hypothesis.HealthCheck)
if CI
else [
hypothesis.HealthCheck.data_too_large,
hypothesis.HealthCheck.too_slow,
]
)
HYPOTHESIS_DEFAULT_PHASES = (
(
hypothesis.Phase.explicit,
hypothesis.Phase.reuse,
hypothesis.Phase.generate,
hypothesis.Phase.target,
hypothesis.Phase.shrink,
)
if HYPOTHESIS_ENABLED
else (hypothesis.Phase.explicit,)
)
hypothesis.settings.register_profile(
name="unit",
deadline=None if CI else timedelta(milliseconds=2000),
parent=hypothesis.settings.get_profile("default"),
phases=HYPOTHESIS_DEFAULT_PHASES,
max_examples=20,
suppress_health_check=HEALTH_CHECKS_SUPPRESSED_BY_DEFAULT,
)
hypothesis.settings.register_profile(
name="quality",
parent=hypothesis.settings.get_profile("unit"),
max_examples=100,
)
hypothesis.settings.register_profile(
name="stress",
parent=hypothesis.settings.get_profile("quality"),
max_examples=200,
)
def pytest_addoption(parser):
# Any custom option, that should be available at any time (not just a
# plugin), goes here.
group = parser.getgroup("cuML Custom Options")
group.addoption(
"--run_stress",
action="store_true",
default=False,
help=(
"Runs tests marked with 'stress'. These are the most intense "
"tests that take the longest to run and are designed to stress "
"the hardware's compute resources."
),
)
group.addoption(
"--run_quality",
action="store_true",
default=False,
help=(
"Runs tests marked with 'quality'. These tests are more "
"computationally intense than 'unit', but less than 'stress'"
),
)
group.addoption(
"--run_unit",
action="store_true",
default=False,
help=(
"Runs tests marked with 'unit'. These are the quickest tests "
"that are focused on accuracy and correctness."
),
)
def pytest_collection_modifyitems(config, items):
should_run_quality = config.getoption("--run_quality")
should_run_stress = config.getoption("--run_stress")
# Run unit is implied if no --run_XXX is set
should_run_unit = config.getoption("--run_unit") or not (
should_run_quality or should_run_stress
)
# Mark the tests as "skip" if needed
if not should_run_unit:
skip_unit = pytest.mark.skip(
reason="Unit tests run with --run_unit flag."
)
for item in items:
if "unit" in item.keywords:
item.add_marker(skip_unit)
if not should_run_quality:
skip_quality = pytest.mark.skip(
reason="Quality tests run with --run_quality flag."
)
for item in items:
if "quality" in item.keywords:
item.add_marker(skip_quality)
if not should_run_stress:
skip_stress = pytest.mark.skip(
reason="Stress tests run with --run_stress flag."
)
for item in items:
if "stress" in item.keywords:
item.add_marker(skip_stress)
def pytest_configure(config):
cp.cuda.set_allocator(None)
# max_gpu_memory: Capacity of the GPU memory in GB
pytest.max_gpu_memory = get_gpu_memory()
pytest.adapt_stress_test = "CUML_ADAPT_STRESS_TESTS" in os.environ
# Load special hypothesis profiles for either quality or stress tests.
# Note that the profile can be manually overwritten with the
# --hypothesis-profile command line option in which case the settings
# specified here will be ignored.
if config.getoption("--run_stress"):
hypothesis.settings.load_profile("stress")
elif config.getoption("--run_quality"):
hypothesis.settings.load_profile("quality")
else:
hypothesis.settings.load_profile("unit")
@pytest.fixture(scope="module")
def nlp_20news():
try:
twenty_train = fetch_20newsgroups(
subset="train", shuffle=True, random_state=42
)
except: # noqa E722
pytest.xfail(reason="Error fetching 20 newsgroup dataset")
count_vect = CountVectorizer()
X = count_vect.fit_transform(twenty_train.data)
Y = cp.array(twenty_train.target)
return X, Y
@pytest.fixture(scope="module")
def housing_dataset():
try:
data = fetch_california_housing()
# failing to download has appeared as multiple varied errors in CI
except: # noqa E722
pytest.xfail(reason="Error fetching housing dataset")
X = cp.array(data["data"])
y = cp.array(data["target"])
feature_names = data["feature_names"]
return X, y, feature_names
@pytest.fixture(scope="module")
def deprecated_boston_dataset():
# dataset was removed in Scikit-learn 1.2, we should change it for a
# better dataset for tests, see
# https://github.com/rapidsai/cuml/issues/5158
df = pd.read_csv(
"https://raw.githubusercontent.com/scikit-learn/scikit-learn/baf828ca126bcb2c0ad813226963621cafe38adb/sklearn/datasets/data/boston_house_prices.csv",
header=None,
) # noqa: E501
n_samples = int(df[0][0])
data = df[list(np.arange(13))].values[2:n_samples].astype(np.float64)
targets = df[13].values[2:n_samples].astype(np.float64)
return Bunch(
data=data,
target=targets,
)
@pytest.fixture(
scope="module",
params=["digits", "deprecated_boston_dataset", "diabetes", "cancer"],
)
def test_datasets(request, deprecated_boston_dataset):
test_datasets_dict = {
"digits": datasets.load_digits(),
"deprecated_boston_dataset": deprecated_boston_dataset,
"diabetes": datasets.load_diabetes(),
"cancer": datasets.load_breast_cancer(),
}
return test_datasets_dict[request.param]
@pytest.fixture(scope="session")
def random_seed(request):
current_random_seed = os.getenv("PYTEST_RANDOM_SEED")
if current_random_seed is not None and current_random_seed.isdigit():
random_seed = int(current_random_seed)
else:
random_seed = np.random.randint(0, 1e6)
os.environ["PYTEST_RANDOM_SEED"] = str(random_seed)
print("\nRandom seed value:", random_seed)
return random_seed
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
@pytest.fixture(scope="function")
def failure_logger(request):
"""
To be used when willing to log the random seed used in some failing test.
"""
yield
if request.node.rep_call.failed:
error_msg = " {} failed with seed: {}"
error_msg = error_msg.format(
request.node.nodeid, os.getenv("PYTEST_RANDOM_SEED")
)
print(error_msg)
@pytest.fixture(scope="module")
def exact_shap_regression_dataset():
return create_synthetic_dataset(
generator=skl_make_reg,
n_samples=101,
n_features=11,
test_size=3,
random_state_generator=42,
random_state_train_test_split=42,
noise=0.1,
)
@pytest.fixture(scope="module")
def exact_shap_classification_dataset():
return create_synthetic_dataset(
generator=skl_make_clas,
n_samples=101,
n_features=11,
test_size=3,
random_state_generator=42,
random_state_train_test_split=42,
)
def get_gpu_memory():
bash_command = "nvidia-smi --query-gpu=memory.total --format=csv"
output = subprocess.check_output(bash_command, shell=True).decode("utf-8")
lines = output.split("\n")
lines.pop(0)
gpus_memory = []
for line in lines:
tokens = line.split(" ")
if len(tokens) > 1:
gpus_memory.append(int(tokens[0]))
gpus_memory.sort()
max_gpu_memory = ceil(gpus_memory[-1] / 1024)
return max_gpu_memory
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_stats.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.utils import array_equal
from cuml.prims.stats import cov
from cuml.prims.stats.covariance import _cov_sparse
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("ncols", [500, 1500])
@pytest.mark.parametrize("sparse", [True, False])
@pytest.mark.parametrize("dtype", [cp.float32, cp.float64])
def test_cov(nrows, ncols, sparse, dtype):
if sparse:
x = cupyx.scipy.sparse.random(
nrows, ncols, density=0.07, format="csr", dtype=dtype
)
else:
x = cp.random.random((nrows, ncols), dtype=dtype)
cov_result = cov(x, x)
assert cov_result.shape == (ncols, ncols)
if sparse:
x = x.todense()
local_cov = cp.cov(x, rowvar=False, ddof=0)
assert array_equal(cov_result, local_cov, 1e-6, with_sign=True)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("ncols", [500, 1500])
@pytest.mark.parametrize("dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("mtype", ["csr", "coo"])
def test_cov_sparse(nrows, ncols, dtype, mtype):
x = cupyx.scipy.sparse.random(
nrows, ncols, density=0.07, format=mtype, dtype=dtype
)
cov_result = _cov_sparse(x, return_mean=True)
# check cov
assert cov_result[0].shape == (ncols, ncols)
x = x.todense()
local_cov = cp.cov(x, rowvar=False, ddof=0)
assert array_equal(cov_result[0], local_cov, 1e-6, with_sign=True)
# check mean
local_mean = x.mean(axis=0)
assert array_equal(cov_result[1], local_mean, 1e-6, with_sign=True)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_tsvd.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.utils import check_random_state
from sklearn.decomposition import TruncatedSVD as skTSVD
from sklearn.datasets import make_blobs
from cuml.testing.utils import (
array_equal,
unit_param,
quality_param,
stress_param,
)
from cuml.testing.utils import get_handle
from cuml import TruncatedSVD as cuTSVD
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"name", [unit_param(None), quality_param("random"), stress_param("blobs")]
)
def test_tsvd_fit(datatype, name, use_handle):
if name == "blobs":
X, y = make_blobs(n_samples=500000, n_features=1000, random_state=0)
elif name == "random":
pytest.skip(
"fails when using random dataset " "used by sklearn for testing"
)
shape = 5000, 100
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
else:
n, p = 500, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1 + np.array([3, 4, 2, 3, 5])
if name != "blobs":
sktsvd = skTSVD(n_components=1)
sktsvd.fit(X)
handle, stream = get_handle(use_handle)
cutsvd = cuTSVD(n_components=1, handle=handle)
cutsvd.fit(X)
cutsvd.handle.sync()
if name != "blobs":
for attr in [
"singular_values_",
"components_",
"explained_variance_ratio_",
]:
with_sign = False if attr in ["components_"] else True
assert array_equal(
getattr(cutsvd, attr),
getattr(sktsvd, attr),
0.4,
with_sign=with_sign,
)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"name", [unit_param(None), quality_param("random"), stress_param("blobs")]
)
def test_tsvd_fit_transform(datatype, name, use_handle):
if name == "blobs":
X, y = make_blobs(n_samples=500000, n_features=1000, random_state=0)
elif name == "random":
pytest.skip(
"fails when using random dataset " "used by sklearn for testing"
)
shape = 5000, 100
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
else:
n, p = 500, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1 + np.array([3, 4, 2, 3, 5])
if name != "blobs":
skpca = skTSVD(n_components=1)
Xsktsvd = skpca.fit_transform(X)
handle, stream = get_handle(use_handle)
cutsvd = cuTSVD(n_components=1, handle=handle)
Xcutsvd = cutsvd.fit_transform(X)
cutsvd.handle.sync()
if name != "blobs":
assert array_equal(Xcutsvd, Xsktsvd, 1e-3, with_sign=True)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"name", [unit_param(None), quality_param("random"), stress_param("blobs")]
)
def test_tsvd_inverse_transform(datatype, name, use_handle):
if name == "blobs":
pytest.skip("fails when using blobs dataset")
X, y = make_blobs(n_samples=500000, n_features=1000, random_state=0)
elif name == "random":
pytest.skip(
"fails when using random dataset " "used by sklearn for testing"
)
shape = 5000, 100
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
else:
n, p = 500, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1 + np.array([3, 4, 2, 3, 5])
cutsvd = cuTSVD(n_components=1)
Xcutsvd = cutsvd.fit_transform(X)
input_gdf = cutsvd.inverse_transform(Xcutsvd)
cutsvd.handle.sync()
assert array_equal(input_gdf, X, 0.4, with_sign=True)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_one_hot_encoder.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.preprocessing import OneHotEncoder as SkOneHotEncoder
from cuml.testing.utils import (
stress_param,
from_df_to_numpy,
assert_inverse_equal,
generate_inputs_from_categories,
)
from cuml.preprocessing import OneHotEncoder
from cuml.internals.safe_imports import gpu_only_import_from
import pytest
from cuml.internals.safe_imports import cpu_only_import
import math
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
DataFrame = gpu_only_import_from("cudf", "DataFrame")
def _from_df_to_cupy(df):
"""Transform char columns to integer columns, and then create an array"""
for col in df.columns:
if not np.issubdtype(df[col].dtype, np.number):
if isinstance(df, pd.DataFrame):
df[col] = [ord(c) for c in df[col]]
else:
df[col] = [ord(c) for c in df[col].values_host]
return cp.array(from_df_to_numpy(df))
def _convert_drop(drop):
if drop is None or drop == "first":
return drop
return [ord(x) if isinstance(x, str) else x for x in drop.values()]
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_vs_skonehot(as_array):
X = DataFrame({"gender": ["M", "F", "F"], "int": [1, 3, 2]})
skX = from_df_to_numpy(X)
if as_array:
X = _from_df_to_cupy(X)
skX = cp.asnumpy(X)
enc = OneHotEncoder(sparse=True)
skohe = SkOneHotEncoder(sparse=True)
ohe = enc.fit_transform(X)
ref = skohe.fit_transform(skX)
cp.testing.assert_array_equal(ohe.toarray(), ref.toarray())
@pytest.mark.parametrize("drop", [None, "first", {"g": "F", "i": 3}])
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_inverse_transform(drop, as_array):
X = DataFrame({"g": ["M", "F", "F"], "i": [1, 3, 2]})
if as_array:
X = _from_df_to_cupy(X)
drop = _convert_drop(drop)
enc = OneHotEncoder(drop=drop)
ohe = enc.fit_transform(X)
inv = enc.inverse_transform(ohe)
assert_inverse_equal(inv, X)
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_categories(as_array):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
categories = DataFrame({"chars": ["a", "b", "c"], "int": [0, 1, 2]})
if as_array:
X = _from_df_to_cupy(X)
categories = _from_df_to_cupy(categories).transpose()
enc = OneHotEncoder(categories=categories, sparse=False)
ref = cp.array(
[[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 1.0]]
)
res = enc.fit_transform(X)
cp.testing.assert_array_equal(res, ref)
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
@pytest.mark.filterwarnings(
"ignore:((.|\n)*)unknown((.|\n)*):UserWarning:" "cuml[.*]"
)
def test_onehot_fit_handle_unknown(as_array):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
Y = DataFrame({"chars": ["c", "b"], "int": [0, 2]})
if as_array:
X = _from_df_to_cupy(X)
Y = _from_df_to_cupy(Y)
enc = OneHotEncoder(handle_unknown="error", categories=Y)
with pytest.raises(KeyError):
enc.fit(X)
enc = OneHotEncoder(handle_unknown="ignore", categories=Y)
enc.fit(X)
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_transform_handle_unknown(as_array):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
Y = DataFrame({"chars": ["c", "b"], "int": [0, 2]})
if as_array:
X = _from_df_to_cupy(X)
Y = _from_df_to_cupy(Y)
enc = OneHotEncoder(handle_unknown="error", sparse=False)
enc = enc.fit(X)
with pytest.raises(KeyError):
enc.transform(Y)
enc = OneHotEncoder(handle_unknown="ignore", sparse=False)
enc = enc.fit(X)
ohe = enc.transform(Y)
ref = cp.array([[0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]])
cp.testing.assert_array_equal(ohe, ref)
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
@pytest.mark.filterwarnings(
"ignore:((.|\n)*)unknown((.|\n)*):UserWarning:" "cuml[.*]"
)
def test_onehot_inverse_transform_handle_unknown(as_array):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
Y_ohe = cp.array([[0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]])
ref = DataFrame({"chars": [None, "b"], "int": [0, 2]})
if as_array:
X = _from_df_to_cupy(X)
ref = DataFrame({0: [None, ord("b")], 1: [0, 2]})
enc = OneHotEncoder(handle_unknown="ignore")
enc = enc.fit(X)
df = enc.inverse_transform(Y_ohe)
assert_inverse_equal(df, ref)
@pytest.mark.parametrize("drop", [None, "first"])
@pytest.mark.parametrize("sparse", [True, False], ids=["sparse", "dense"])
@pytest.mark.parametrize("n_samples", [10, 1000, 20000, stress_param(250000)])
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_random_inputs(drop, sparse, n_samples, as_array):
X, ary = generate_inputs_from_categories(
n_samples=n_samples, as_array=as_array
)
enc = OneHotEncoder(sparse=sparse, drop=drop, categories="auto")
sk_enc = SkOneHotEncoder(sparse=sparse, drop=drop, categories="auto")
ohe = enc.fit_transform(X)
ref = sk_enc.fit_transform(ary)
if sparse:
cp.testing.assert_array_equal(ohe.toarray(), ref.toarray())
else:
cp.testing.assert_array_equal(ohe, ref)
inv_ohe = enc.inverse_transform(ohe)
assert_inverse_equal(inv_ohe, X)
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_drop_idx_first(as_array):
X_ary = [["c", 2, "a"], ["b", 2, "b"]]
X = DataFrame({"chars": ["c", "b"], "int": [2, 2], "letters": ["a", "b"]})
if as_array:
X = _from_df_to_cupy(X)
X_ary = cp.asnumpy(X)
enc = OneHotEncoder(sparse=False, drop="first", categories="auto")
sk_enc = SkOneHotEncoder(sparse=False, drop="first", categories="auto")
ohe = enc.fit_transform(X)
ref = sk_enc.fit_transform(X_ary)
cp.testing.assert_array_equal(ohe, ref)
inv = enc.inverse_transform(ohe)
assert_inverse_equal(inv, X)
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_drop_one_of_each(as_array):
X = DataFrame({"chars": ["c", "b"], "int": [2, 2], "letters": ["a", "b"]})
drop = dict({"chars": "b", "int": 2, "letters": "b"})
X_ary = from_df_to_numpy(X)
drop_ary = ["b", 2, "b"]
if as_array:
X = _from_df_to_cupy(X)
X_ary = cp.asnumpy(X)
drop = drop_ary = _convert_drop(drop)
enc = OneHotEncoder(sparse=False, drop=drop, categories="auto")
ohe = enc.fit_transform(X)
print(ohe.dtype)
ref = SkOneHotEncoder(
sparse=False, drop=drop_ary, categories="auto"
).fit_transform(X_ary)
cp.testing.assert_array_equal(ohe, ref)
inv = enc.inverse_transform(ohe)
assert_inverse_equal(inv, X)
@pytest.mark.parametrize(
"drop, pattern",
[
[dict({"chars": "b"}), "`drop` should have as many columns"],
[
dict({"chars": "b", "int": [2, 0]}),
"Trying to drop multiple values",
],
[
dict({"chars": "b", "int": 3}),
"Some categories [0-9a-zA-Z, ]* were not found",
],
[
DataFrame({"chars": "b", "int": 3}),
"Wrong input for parameter `drop`.",
],
],
)
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_drop_exceptions(drop, pattern, as_array):
X = DataFrame({"chars": ["c", "b", "d"], "int": [2, 1, 0]})
if as_array:
X = _from_df_to_cupy(X)
drop = _convert_drop(drop) if not isinstance(drop, DataFrame) else drop
with pytest.raises(ValueError, match=pattern):
OneHotEncoder(sparse=False, drop=drop).fit(X)
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_get_categories(as_array):
X = DataFrame({"chars": ["c", "b", "d"], "ints": [2, 1, 0]})
ref = [np.array(["b", "c", "d"]), np.array([0, 1, 2])]
if as_array:
X = _from_df_to_cupy(X)
ref[0] = np.array([ord(x) for x in ref[0]])
enc = OneHotEncoder().fit(X)
cats = enc.categories_
for i in range(len(ref)):
np.testing.assert_array_equal(ref[i], cats[i].to_numpy())
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_sparse_drop(as_array):
X = DataFrame({"g": ["M", "F", "F"], "i": [1, 3, 2], "l": [5, 5, 6]})
drop = {"g": "F", "i": 3, "l": 6}
ary = from_df_to_numpy(X)
drop_ary = ["F", 3, 6]
if as_array:
X = _from_df_to_cupy(X)
ary = cp.asnumpy(X)
drop = drop_ary = _convert_drop(drop)
enc = OneHotEncoder(sparse=True, drop=drop, categories="auto")
sk_enc = SkOneHotEncoder(sparse=True, drop=drop_ary, categories="auto")
ohe = enc.fit_transform(X)
ref = sk_enc.fit_transform(ary)
cp.testing.assert_array_equal(ohe.toarray(), ref.toarray())
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_categories_shape_mismatch(as_array):
X = DataFrame({"chars": ["a"], "int": [0]})
categories = DataFrame({"chars": ["a", "b", "c"]})
if as_array:
X = _from_df_to_cupy(X)
categories = _from_df_to_cupy(categories).transpose()
with pytest.raises(ValueError):
OneHotEncoder(categories=categories, sparse=False).fit(X)
def test_onehot_category_specific_cases():
# See this for reasoning: https://github.com/rapidsai/cuml/issues/2690
# All of these cases use sparse=False, where
# test_onehot_category_class_count uses sparse=True
# ==== 2 Rows (Low before High) ====
example_df = DataFrame()
example_df["low_cardinality_column"] = ["A"] * 200 + ["B"] * 56
example_df["high_cardinality_column"] = cp.linspace(0, 255, 256)
encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
encoder.fit_transform(example_df)
# ==== 2 Rows (High before Low, used to fail) ====
example_df = DataFrame()
example_df["high_cardinality_column"] = cp.linspace(0, 255, 256)
example_df["low_cardinality_column"] = ["A"] * 200 + ["B"] * 56
encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
encoder.fit_transform(example_df)
@pytest.mark.parametrize(
"total_classes",
[np.iinfo(np.uint8).max, np.iinfo(np.uint16).max],
ids=["uint8", "uint16"],
)
def test_onehot_category_class_count(total_classes: int):
# See this for reasoning: https://github.com/rapidsai/cuml/issues/2690
# All tests use sparse=True to avoid memory errors
encoder = OneHotEncoder(handle_unknown="ignore", sparse=True)
# ==== 2 Rows ====
example_df = DataFrame()
example_df["high_cardinality_column"] = cp.linspace(
0, total_classes - 1, total_classes
)
example_df["low_cardinality_column"] = ["A"] * 200 + ["B"] * (
total_classes - 200
)
assert encoder.fit_transform(example_df).shape[1] == total_classes + 2
# ==== 3 Rows ====
example_df = DataFrame()
example_df["high_cardinality_column"] = cp.linspace(
0, total_classes - 1, total_classes
)
example_df["low_cardinality_column"] = ["A"] * total_classes
example_df["med_cardinality_column"] = ["B"] * total_classes
assert encoder.fit_transform(example_df).shape[1] == total_classes + 2
# ==== N Rows (Even Split) ====
num_rows = [3, 10, 100]
for row_count in num_rows:
class_per_row = int(math.ceil(total_classes / float(row_count))) + 1
example_df = DataFrame()
for row_idx in range(row_count):
example_df[str(row_idx)] = cp.linspace(
row_idx * class_per_row,
((row_idx + 1) * class_per_row) - 1,
class_per_row,
)
assert (
encoder.fit_transform(example_df).shape[1]
== class_per_row * row_count
)
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_onehot_get_feature_names(as_array):
fruits = ["apple", "banana", "strawberry"]
if as_array:
fruits = [ord(fruit[0]) for fruit in fruits]
sizes = [0, 1, 2]
X = DataFrame({"fruits": fruits, "sizes": sizes})
if as_array:
X = _from_df_to_cupy(X)
enc = OneHotEncoder().fit(X)
feature_names_ref = ["x0_" + str(fruit) for fruit in fruits] + [
"x1_" + str(size) for size in sizes
]
feature_names = enc.get_feature_names()
assert np.array_equal(feature_names, feature_names_ref)
feature_names_ref = ["fruit_" + str(fruit) for fruit in fruits] + [
"size_" + str(size) for size in sizes
]
feature_names = enc.get_feature_names(["fruit", "size"])
assert np.array_equal(feature_names, feature_names_ref)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_make_classification.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.utils import array_equal
from cuml.datasets.classification import make_classification
from cuml.internals.safe_imports import gpu_only_import
import pytest
from functools import partial
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.mark.parametrize("n_samples", [500, 1000])
@pytest.mark.parametrize("n_features", [50, 100])
@pytest.mark.parametrize("hypercube", [True, False])
@pytest.mark.parametrize("n_classes", [2, 4])
@pytest.mark.parametrize("n_clusters_per_class", [2, 4])
@pytest.mark.parametrize("n_informative", [7, 20])
@pytest.mark.parametrize("random_state", [None, 1234])
@pytest.mark.parametrize("order", ["C", "F"])
def test_make_classification(
n_samples,
n_features,
hypercube,
n_classes,
n_clusters_per_class,
n_informative,
random_state,
order,
):
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_classes=n_classes,
hypercube=hypercube,
n_clusters_per_class=n_clusters_per_class,
n_informative=n_informative,
random_state=random_state,
order=order,
)
assert X.shape == (n_samples, n_features)
import cupy as cp
assert len(cp.unique(y)) == n_classes
assert y.shape == (n_samples,)
if order == "F":
assert X.flags["F_CONTIGUOUS"]
elif order == "C":
assert X.flags["C_CONTIGUOUS"]
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(
make_classification,
class_sep=class_sep,
n_redundant=0,
n_repeated=0,
flip_y=0,
shift=0,
scale=1,
shuffle=False,
)
for n_informative, weights, n_clusters_per_class in [
(2, [1], 1),
(2, [1 / 3] * 3, 1),
(2, [1 / 4] * 4, 1),
(2, [1 / 2] * 2, 2),
(2, [3 / 4, 1 / 4], 2),
(10, [1 / 3] * 3, 10),
(int(64), [1], 1),
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(
n_samples=n_samples,
n_classes=n_classes,
weights=weights,
n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube,
random_state=0,
)
assert X.shape == (n_samples, n_informative)
assert y.shape == (n_samples,)
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(cp.asnumpy(X))
signs = signs.view(dtype="|S{0}".format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs, return_inverse=True)
assert (
len(unique_signs) == n_clusters
), "Wrong number of clusters, or not in distinct quadrants"
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert array_equal(
cp.abs(centroid) / class_sep,
cp.ones(n_informative),
1e-5,
)
else:
with pytest.raises(AssertionError):
assert array_equal(
cp.abs(centroid) / class_sep,
cp.ones(n_informative),
1e-5,
)
with pytest.raises(ValueError):
make(
n_features=2, n_informative=2, n_classes=5, n_clusters_per_class=1
)
with pytest.raises(ValueError):
make(
n_features=2, n_informative=2, n_classes=3, n_clusters_per_class=2
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_api.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.datasets import make_classification
from cuml.testing.utils import ClassEnumerator
from cuml.internals.base import Base
from cuml.internals.safe_imports import cpu_only_import
import inspect
import pytest
import cuml
import cuml.internals.mixins as cumix
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
###############################################################################
# Helper functions and classes #
###############################################################################
def func_positional_arg(func):
if hasattr(func, "__wrapped__"):
return func_positional_arg(func.__wrapped__)
elif hasattr(func, "__code__"):
all_args = func.__code__.co_argcount
if func.__defaults__ is not None:
kwargs = len(func.__defaults__)
else:
kwargs = 0
return all_args - kwargs
return 2
@pytest.fixture(scope="session")
def dataset():
X, y = make_classification(100, 5, random_state=42)
X = X.astype(np.float64)
y = y.astype(np.float64)
return X, y
models_config = ClassEnumerator(
module=cuml, exclude_classes=(cuml.UniversalBase,)
)
models = models_config.get_models()
# tag system based on experimental tag system from Scikit-learn >=0.21
# https://scikit-learn.org/stable/developers/develop.html#estimator-tags
tags = {
# cuML specific tags
"preferred_input_order": None,
"X_types_gpu": list,
# Scikit-learn API standard tags
"allow_nan": bool,
"binary_only": bool,
"multilabel": bool,
"multioutput": bool,
"multioutput_only": bool,
"no_validation": bool,
"non_deterministic": bool,
"pairwise": bool,
"poor_score": bool,
"preserves_dtype": list,
"requires_fit": bool,
"requires_y": bool,
"requires_positive_X": bool,
"requires_positive_y": bool,
"stateless": bool,
"X_types": list,
"_skip_test": bool,
"_xfail_checks": bool,
}
tags_mixins = {
cumix.FMajorInputTagMixin: {"preferred_input_order": "F"},
cumix.CMajorInputTagMixin: {"preferred_input_order": "C"},
cumix.SparseInputTagMixin: {
"X_types_gpu": ["2darray", "sparse"],
"X_types": ["2darray", "sparse"],
},
cumix.StringInputTagMixin: {
"X_types_gpu": ["2darray", "string"],
"X_types": ["2darray", "string"],
},
cumix.AllowNaNTagMixin: {"allow_nan": True},
cumix.StatelessTagMixin: {"stateless": True},
}
class dummy_regressor_estimator(Base, cumix.RegressorMixin):
def __init__(self, *, handle=None, verbose=False, output_type=None):
super().__init__(handle=handle)
class dummy_classifier_estimator(Base, cumix.ClassifierMixin):
def __init__(self, *, handle=None, verbose=False, output_type=None):
super().__init__(handle=handle)
class dummy_cluster_estimator(Base, cumix.ClusterMixin):
def __init__(self, *, handle=None, verbose=False, output_type=None):
super().__init__(handle=handle)
class dummy_class_with_tags(
cumix.TagsMixin, cumix.FMajorInputTagMixin, cumix.CMajorInputTagMixin
):
@staticmethod
def _more_static_tags():
return {"X_types": ["categorical"]}
def _more_tags(self):
return {"X_types": ["string"]}
###############################################################################
# Tags Tests #
###############################################################################
@pytest.mark.parametrize("model", list(models.values()))
def test_get_tags(model):
# This test ensures that our estimators return the tags defined by
# Scikit-learn and our cuML specific tags
assert hasattr(model, "_get_tags")
model_tags = model._get_tags()
if hasattr(model, "_more_static_tags"):
import inspect
assert isinstance(
inspect.getattr_static(model, "_more_static_tags"), staticmethod
)
for tag, tag_type in tags.items():
# preferred input order can be None or a string
if tag == "preferred_input_order":
if model_tags[tag] is not None:
assert isinstance(model_tags[tag], str)
else:
assert isinstance(model_tags[tag], tag_type)
return True
def test_dynamic_tags_and_composition():
static_tags = dummy_class_with_tags._get_tags()
dynamic_tags = dummy_class_with_tags()._get_tags()
print(dummy_class_with_tags.__mro__)
# In python, the MRO is so that the uppermost inherited class
# being closest to the final class, so in our dummy_class_with_tags
# the F Major input mixin should the C mixin
assert static_tags["preferred_input_order"] == "F"
assert dynamic_tags["preferred_input_order"] == "F"
# Testing dynamic tags actually take precedence over static ones on the
# instantiated object
assert static_tags["X_types"] == ["categorical"]
assert dynamic_tags["X_types"] == ["string"]
@pytest.mark.parametrize("mixin", tags_mixins.keys())
def test_tag_mixins(mixin):
for tag, value in tags_mixins[mixin].items():
assert mixin._more_static_tags()[tag] == value
@pytest.mark.parametrize(
"model",
[
dummy_cluster_estimator,
dummy_regressor_estimator,
dummy_classifier_estimator,
],
)
def test_estimator_type_mixins(model):
assert hasattr(model, "_estimator_type")
if model._estimator_type in ["regressor", "classifier"]:
assert model._get_tags()["requires_y"]
else:
assert not model._get_tags()["requires_y"]
@pytest.mark.parametrize("model", list(models.values()))
def test_mro(model):
found_base = False
for cl in reversed(inspect.getmro(model.__class__)):
if cl == Base:
if found_base:
pytest.fail("Found Base class twice in the MRO")
else:
found_base = True
###############################################################################
# Fit Function Tests #
###############################################################################
@pytest.mark.parametrize("model_name", list(models.keys()))
# ignore random forest float64 warnings
@pytest.mark.filterwarnings("ignore:To use pickling or GPU-based")
def test_fit_function(dataset, model_name):
# This test ensures that our estimators return self after a call to fit
if model_name in [
"SparseRandomProjection",
"TSNE",
"TruncatedSVD",
"AutoARIMA",
"MultinomialNB",
"LabelEncoder",
]:
pytest.xfail("These models are not tested yet")
n_pos_args_constr = func_positional_arg(models[model_name].__init__)
if model_name in ["SparseRandomProjection", "GaussianRandomProjection"]:
model = models[model_name](n_components=2)
elif model_name in ["ARIMA", "AutoARIMA", "ExponentialSmoothing"]:
model = models[model_name](np.random.normal(0.0, 1.0, (10,)))
elif model_name in ["RandomForestClassifier", "RandomForestRegressor"]:
model = models[model_name](n_bins=32)
else:
if n_pos_args_constr == 1:
model = models[model_name]()
elif n_pos_args_constr == 2:
model = models[model_name](5)
else:
model = models[model_name](5, 5)
if hasattr(model, "fit"):
# Unfortunately co_argcount doesn't work with decorated functions,
# and the inspect module doesn't work with Cython. Therefore we need
# to register the number of arguments manually if `fit` is decorated
pos_args_spec = {
"ARIMA": 1,
"ElasticNet": 3,
"Lasso": 3,
"LinearRegression": 3,
"LogisticRegression": 3,
"NearestNeighbors": 2,
"PCA": 2,
"Ridge": 3,
"UMAP": 2,
}
n_pos_args_fit = (
pos_args_spec[model_name]
if model_name in pos_args_spec
else func_positional_arg(models[model_name].fit)
)
X, y = dataset
if model_name == "RandomForestClassifier":
y = y.astype(np.int32)
assert model.fit(X, y) is model
else:
if n_pos_args_fit == 1:
assert model.fit() is model
elif n_pos_args_fit == 2:
assert model.fit(X) is model
else:
assert model.fit(X, y) is model
# test classifiers correctly set self.classes_ during fit
if hasattr(model, "_estimator_type"):
if model._estimator_type == "classifier":
cp.testing.assert_array_almost_equal(
model.classes_, np.unique(y)
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_pickle.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.model_selection import train_test_split
from sklearn.manifold import trustworthiness
from sklearn.datasets import load_iris, make_classification, make_regression
from sklearn.base import clone
from cuml.testing.utils import (
array_equal,
unit_param,
stress_param,
ClassEnumerator,
get_classes_from_package,
compare_svm,
compare_probabilistic_svm,
)
from cuml.tsa.arima import ARIMA
import pytest
import pickle
import cuml
from cuml.internals.safe_imports import cpu_only_import, cpu_only_import_from
np = cpu_only_import("numpy")
scipy_sparse = cpu_only_import_from("scipy", "sparse")
regression_config = ClassEnumerator(module=cuml.linear_model)
regression_models = regression_config.get_models()
solver_config = ClassEnumerator(
module=cuml.solvers,
# QN uses softmax here because some of the tests uses multiclass
# logistic regression which requires a softmax loss
custom_constructors={"QN": lambda: cuml.QN(loss="softmax")},
)
solver_models = solver_config.get_models()
cluster_config = ClassEnumerator(
module=cuml.cluster,
exclude_classes=[cuml.DBSCAN, cuml.AgglomerativeClustering, cuml.HDBSCAN],
)
cluster_models = cluster_config.get_models()
decomposition_config = ClassEnumerator(module=cuml.decomposition)
decomposition_models = decomposition_config.get_models()
decomposition_config_xfail = ClassEnumerator(module=cuml.random_projection)
decomposition_models_xfail = decomposition_config_xfail.get_models()
neighbor_config = ClassEnumerator(
module=cuml.neighbors, exclude_classes=[cuml.neighbors.KernelDensity]
)
neighbor_models = neighbor_config.get_models()
dbscan_model = {"DBSCAN": cuml.DBSCAN}
agglomerative_model = {"AgglomerativeClustering": cuml.AgglomerativeClustering}
hdbscan_model = {"HDBSCAN": cuml.HDBSCAN}
umap_model = {"UMAP": cuml.UMAP}
rf_module = ClassEnumerator(module=cuml.ensemble)
rf_models = rf_module.get_models()
k_neighbors_config = ClassEnumerator(
module=cuml.neighbors,
exclude_classes=[
cuml.neighbors.NearestNeighbors,
cuml.neighbors.KernelDensity,
],
)
k_neighbors_models = k_neighbors_config.get_models()
unfit_pickle_xfail = [
"ARIMA",
"AutoARIMA",
"KalmanFilter",
"BaseRandomForestModel",
"ForestInference",
"MulticlassClassifier",
"OneVsOneClassifier",
"OneVsRestClassifier",
]
unfit_clone_xfail = [
"AutoARIMA",
"ARIMA",
"BaseRandomForestModel",
"GaussianRandomProjection",
"MulticlassClassifier",
"OneVsOneClassifier",
"OneVsRestClassifier",
"SparseRandomProjection",
"UMAP",
]
all_models = get_classes_from_package(cuml, import_sub_packages=True)
all_models.update(
{
**regression_models,
**solver_models,
**cluster_models,
**decomposition_models,
**decomposition_models_xfail,
**neighbor_models,
**dbscan_model,
**hdbscan_model,
**agglomerative_model,
**umap_model,
**rf_models,
**k_neighbors_models,
"ARIMA": lambda: ARIMA(np.random.normal(0.0, 1.0, (10,))),
"ExponentialSmoothing": lambda: cuml.ExponentialSmoothing(
np.array([-217.72, -206.77])
),
}
)
def pickle_save_load(tmpdir, func_create_model, func_assert):
model, X_test = func_create_model()
pickle_file = tmpdir.join("cu_model.pickle")
try:
with open(pickle_file, "wb") as pf:
pickle.dump(model, pf)
except (TypeError, ValueError) as e:
pf.close()
pytest.fail(e)
del model
with open(pickle_file, "rb") as pf:
cu_after_pickle_model = pickle.load(pf)
func_assert(cu_after_pickle_model, X_test)
def make_classification_dataset(datatype, nrows, ncols, n_info, n_classes):
X, y = make_classification(
n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_classes=n_classes,
random_state=0,
)
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
return X_train, y_train, X_test
def make_dataset(datatype, nrows, ncols, n_info):
X, y = make_regression(
n_samples=nrows, n_features=ncols, n_informative=n_info, random_state=0
)
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
return X_train, y_train, X_test
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("key", rf_models.keys())
@pytest.mark.parametrize("nrows", [unit_param(500)])
@pytest.mark.parametrize("ncols", [unit_param(16)])
@pytest.mark.parametrize("n_info", [unit_param(7)])
@pytest.mark.parametrize("n_classes", [unit_param(2), unit_param(5)])
def test_rf_regression_pickle(
tmpdir, datatype, nrows, ncols, n_info, n_classes, key
):
result = {}
if datatype == np.float64:
pytest.xfail(
"Pickling is not supported for dataset with" " dtype float64"
)
def create_mod():
if key == "RandomForestRegressor":
X_train, y_train, X_test = make_dataset(
datatype, nrows, ncols, n_info
)
else:
X_train, y_train, X_test = make_classification_dataset(
datatype, nrows, ncols, n_info, n_classes
)
model = rf_models[key]()
model.fit(X_train, y_train)
if datatype == np.float32:
predict_model = "GPU"
else:
predict_model = "CPU"
result["rf_res"] = model.predict(X_test, predict_model=predict_model)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["rf_res"], pickled_model.predict(X_test))
# Confirm no crash from score
pickled_model.score(
X_test, np.zeros(X_test.shape[0]), predict_model="GPU"
)
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", regression_models.keys())
@pytest.mark.parametrize(
"data_size", [unit_param([500, 20, 10]), stress_param([500000, 1000, 500])]
)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_regressor_pickle(tmpdir, datatype, keys, data_size, fit_intercept):
if (
data_size[0] == 500000
and datatype == np.float64
and ("LogisticRegression" in keys or "Ridge" in keys)
and pytest.max_gpu_memory < 32
):
if pytest.adapt_stress_test:
data_size[0] = data_size[0] * pytest.max_gpu_memory // 640
data_size[1] = data_size[1] * pytest.max_gpu_memory // 640
data_size[2] = data_size[2] * pytest.max_gpu_memory // 640
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
result = {}
def create_mod():
nrows, ncols, n_info = data_size
if "LogisticRegression" in keys and nrows == 500000:
nrows, ncols, n_info = (nrows // 20, ncols // 20, n_info // 20)
X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info)
if "MBSGD" in keys:
model = regression_models[keys](
fit_intercept=fit_intercept, batch_size=nrows / 100
)
else:
model = regression_models[keys](fit_intercept=fit_intercept)
model.fit(X_train, y_train)
result["regressor"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["regressor"], pickled_model.predict(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", solver_models.keys())
@pytest.mark.parametrize(
"data_size", [unit_param([500, 20, 10]), stress_param([500000, 1000, 500])]
)
def test_solver_pickle(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
if "QN" in keys and nrows == 500000:
nrows, ncols, n_info = (nrows // 20, ncols // 20, n_info // 20)
X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info)
model = solver_models[keys]()
model.fit(X_train, y_train)
result["solver"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["solver"], pickled_model.predict(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", cluster_models.keys())
@pytest.mark.parametrize(
"data_size", [unit_param([500, 20, 10]), stress_param([500000, 1000, 500])]
)
def test_cluster_pickle(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info)
model = cluster_models[keys]()
model.fit(X_train)
result["cluster"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["cluster"], pickled_model.predict(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", decomposition_models_xfail.values())
@pytest.mark.parametrize(
"data_size", [unit_param([500, 20, 10]), stress_param([500000, 1000, 500])]
)
@pytest.mark.xfail
def test_decomposition_pickle(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info)
model = decomposition_models_xfail[keys]()
result["decomposition"] = model.fit_transform(X_train)
return model, X_train
def assert_model(pickled_model, X_test):
assert array_equal(
result["decomposition"], pickled_model.transform(X_test)
)
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", umap_model.keys())
def test_umap_pickle(tmpdir, datatype, keys):
result = {}
def create_mod():
X_train = load_iris().data
model = umap_model[keys](output_type="numpy")
cu_before_pickle_transform = model.fit_transform(X_train)
result["umap_embedding"] = model.embedding_
n_neighbors = model.n_neighbors
result["umap"] = trustworthiness(
X_train, cu_before_pickle_transform, n_neighbors=n_neighbors
)
return model, X_train
def assert_model(pickled_model, X_train):
cu_after_embed = pickled_model.embedding_
n_neighbors = pickled_model.n_neighbors
assert array_equal(result["umap_embedding"], cu_after_embed)
cu_trust_after = trustworthiness(
X_train, pickled_model.transform(X_train), n_neighbors=n_neighbors
)
assert cu_trust_after >= result["umap"] - 0.2
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", decomposition_models.keys())
@pytest.mark.parametrize(
"data_size", [unit_param([500, 20, 10]), stress_param([500000, 1000, 500])]
)
@pytest.mark.xfail
def test_decomposition_pickle_xfail(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
X_train, _, _ = make_dataset(datatype, nrows, ncols, n_info)
model = decomposition_models[keys]()
result["decomposition"] = model.fit_transform(X_train)
return model, X_train
def assert_model(pickled_model, X_test):
assert array_equal(
result["decomposition"], pickled_model.transform(X_test)
)
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("model_name", all_models.keys())
@pytest.mark.filterwarnings(
"ignore:Transformers((.|\n)*):UserWarning:" "cuml[.*]"
)
def test_unfit_pickle(model_name):
# Any model xfailed in this test cannot be used for hyperparameter sweeps
# with dask or sklearn
if (
model_name in decomposition_models_xfail.keys()
or model_name in unfit_pickle_xfail
):
pytest.xfail()
# Pickling should work even if fit has not been called
mod = all_models[model_name]()
mod_pickled_bytes = pickle.dumps(mod)
mod_unpickled = pickle.loads(mod_pickled_bytes)
assert mod_unpickled is not None
@pytest.mark.parametrize("model_name", all_models.keys())
@pytest.mark.filterwarnings(
"ignore:Transformers((.|\n)*):UserWarning:" "cuml[.*]"
)
def test_unfit_clone(model_name):
if model_name in unfit_clone_xfail:
pytest.xfail()
# Cloning runs into many of the same problems as pickling
mod = all_models[model_name]()
clone(mod)
# TODO: check parameters exactly?
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", neighbor_models.keys())
@pytest.mark.parametrize(
"data_info",
[unit_param([500, 20, 10, 5]), stress_param([500000, 1000, 500, 50])],
)
def test_neighbors_pickle(tmpdir, datatype, keys, data_info):
if (
data_info[0] == 500000
and pytest.max_gpu_memory < 32
and ("KNeighborsClassifier" in keys or "KNeighborsRegressor" in keys)
):
if pytest.adapt_stress_test:
data_info[0] = data_info[0] * pytest.max_gpu_memory // 32
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
result = {}
def create_mod():
nrows, ncols, n_info, k = data_info
X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info)
model = neighbor_models[keys]()
if keys in k_neighbors_models.keys():
model.fit(X_train, y_train)
else:
model.fit(X_train)
result["neighbors_D"], result["neighbors_I"] = model.kneighbors(
X_test, n_neighbors=k
)
return model, X_test
def assert_model(pickled_model, X_test):
D_after, I_after = pickled_model.kneighbors(
X_test, n_neighbors=data_info[3]
)
assert array_equal(result["neighbors_D"], D_after)
assert array_equal(result["neighbors_I"], I_after)
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"data_info",
[
unit_param([500, 20, 10, 3, 5]),
stress_param([500000, 1000, 500, 10, 50]),
],
)
@pytest.mark.parametrize("keys", k_neighbors_models.keys())
def test_k_neighbors_classifier_pickle(tmpdir, datatype, data_info, keys):
if (
data_info[0] == 500000
and "NearestNeighbors" in keys
and pytest.max_gpu_memory < 32
):
if pytest.adapt_stress_test:
data_info[0] = data_info[0] * pytest.max_gpu_memory // 32
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
result = {}
def create_mod():
nrows, ncols, n_info, n_classes, k = data_info
X_train, y_train, X_test = make_classification_dataset(
datatype, nrows, ncols, n_info, n_classes
)
model = k_neighbors_models[keys](n_neighbors=k)
model.fit(X_train, y_train)
result["neighbors"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
D_after = pickled_model.predict(X_test)
assert array_equal(result["neighbors"], D_after)
state = pickled_model.__dict__
assert state["n_indices"] == 1
assert "_fit_X" in state
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"data_info",
[unit_param([500, 20, 10, 5]), stress_param([500000, 1000, 500, 50])],
)
def test_neighbors_pickle_nofit(tmpdir, datatype, data_info):
result = {}
"""
.. note:: This test digs down a bit far into the
internals of the implementation, but it's
important that regressions do not occur
from changes to the class.
"""
def create_mod():
nrows, ncols, n_info, k = data_info
X_train, _, X_test = make_dataset(datatype, nrows, ncols, n_info)
model = cuml.neighbors.NearestNeighbors()
result["model"] = model
return model, [X_train, X_test]
def assert_model(loaded_model, X):
state = loaded_model.__dict__
assert state["n_indices"] == 0
assert "_fit_X" not in state
loaded_model.fit(X[0])
state = loaded_model.__dict__
assert state["n_indices"] == 1
assert "_fit_X" in state
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", dbscan_model.keys())
@pytest.mark.parametrize(
"data_size", [unit_param([500, 20, 10]), stress_param([500000, 1000, 500])]
)
def test_dbscan_pickle(tmpdir, datatype, keys, data_size):
if data_size[0] == 500000 and pytest.max_gpu_memory < 32:
if pytest.adapt_stress_test:
data_size[0] = data_size[0] * pytest.max_gpu_memory // 32
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
result = {}
def create_mod():
nrows, ncols, n_info = data_size
X_train, _, _ = make_dataset(datatype, nrows, ncols, n_info)
model = dbscan_model[keys]()
result["dbscan"] = model.fit_predict(X_train)
return model, X_train
def assert_model(pickled_model, X_train):
pickle_after_predict = pickled_model.fit_predict(X_train)
assert array_equal(result["dbscan"], pickle_after_predict)
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", agglomerative_model.keys())
@pytest.mark.parametrize(
"data_size", [unit_param([500, 20, 10]), stress_param([500000, 1000, 500])]
)
def test_agglomerative_pickle(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
X_train, _, _ = make_dataset(datatype, nrows, ncols, n_info)
model = agglomerative_model[keys]()
result["agglomerative"] = model.fit_predict(X_train)
return model, X_train
def assert_model(pickled_model, X_train):
pickle_after_predict = pickled_model.fit_predict(X_train)
assert array_equal(result["agglomerative"], pickle_after_predict)
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", hdbscan_model.keys())
@pytest.mark.parametrize(
"data_size", [unit_param([500, 20, 10]), stress_param([500000, 1000, 500])]
)
@pytest.mark.parametrize("prediction_data", [True, False])
def test_hdbscan_pickle(tmpdir, datatype, keys, data_size, prediction_data):
result = {}
from cuml.cluster.hdbscan.prediction import all_points_membership_vectors
from cuml.cluster.hdbscan.prediction import approximate_predict
def create_mod():
nrows, ncols, n_info = data_size
X_train, _, _ = make_dataset(datatype, nrows, ncols, n_info)
model = hdbscan_model[keys](prediction_data=prediction_data)
result["hdbscan"] = model.fit_predict(X_train)
result[
"hdbscan_single_linkage_tree"
] = model.single_linkage_tree_.to_numpy()
result["condensed_tree"] = model.condensed_tree_.to_numpy()
if prediction_data:
result["hdbscan_all_points"] = all_points_membership_vectors(model)
result["hdbscan_approx"] = approximate_predict(model, X_train)
return model, X_train
def assert_model(pickled_model, X_train):
labels = pickled_model.fit_predict(X_train)
assert array_equal(result["hdbscan"], labels)
assert np.all(
result["hdbscan_single_linkage_tree"]
== pickled_model.single_linkage_tree_.to_numpy()
)
assert np.all(
result["condensed_tree"]
== pickled_model.condensed_tree_.to_numpy()
)
if prediction_data:
all_points = all_points_membership_vectors(pickled_model)
approx = approximate_predict(pickled_model, X_train)
assert array_equal(result["hdbscan_all_points"], all_points)
assert array_equal(result["hdbscan_approx"], approx)
pickle_save_load(tmpdir, create_mod, assert_model)
def test_tsne_pickle(tmpdir):
result = {}
def create_mod():
iris = load_iris()
iris_selection = np.random.RandomState(42).choice(
[True, False], 150, replace=True, p=[0.75, 0.25]
)
X = iris.data[iris_selection]
model = cuml.manifold.TSNE(n_components=2, random_state=199)
result["model"] = model
return model, X
def assert_model(pickled_model, X):
model_params = pickled_model.__dict__
# Confirm params in model are identical
new_keys = set(model_params.keys())
for key, value in zip(model_params.keys(), model_params.values()):
assert model_params[key] == value
new_keys -= set([key])
# Check all keys have been checked
assert len(new_keys) == 0
# Transform data
result["fit_model"] = pickled_model.fit(X)
result["data"] = X
result["trust"] = trustworthiness(
X, pickled_model.embedding_, n_neighbors=10
)
def create_mod_2():
model = result["fit_model"]
return model, result["data"]
def assert_second_model(pickled_model, X):
trust_after = trustworthiness(
X, pickled_model.embedding_, n_neighbors=10
)
assert result["trust"] == trust_after
pickle_save_load(tmpdir, create_mod, assert_model)
pickle_save_load(tmpdir, create_mod_2, assert_second_model)
# Probabilistic SVM is tested separately because it is a meta estimator that
# owns a set of base SV classifiers.
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"params", [{"probability": True}, {"probability": False}]
)
@pytest.mark.parametrize("multiclass", [True, False])
@pytest.mark.parametrize("sparse", [False, True])
def test_svc_pickle(tmpdir, datatype, params, multiclass, sparse):
result = {}
if sparse and params["probability"]:
pytest.skip("Probabilistic SVC does not support sparse input")
def create_mod():
model = cuml.svm.SVC(**params)
iris = load_iris()
iris_selection = np.random.RandomState(42).choice(
[True, False], 150, replace=True, p=[0.75, 0.25]
)
X_train = iris.data[iris_selection]
if sparse:
X_train = scipy_sparse.csr_matrix(X_train)
y_train = iris.target[iris_selection]
if not multiclass:
y_train = (y_train > 0).astype(datatype)
data = [X_train, y_train]
result["model"] = model.fit(X_train, y_train)
return model, data
def assert_model(pickled_model, data):
if result["model"].probability:
print("Comparing probabilistic svc")
compare_probabilistic_svm(
result["model"], pickled_model, data[0], data[1], 0, 0
)
else:
print("comparing base svc")
compare_svm(result["model"], pickled_model, data[0], data[1])
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"params", [{"probability": True}, {"probability": False}]
)
@pytest.mark.parametrize("multiclass", [True, False])
def test_linear_svc_pickle(tmpdir, datatype, params, multiclass):
result = {}
def create_mod():
model = cuml.svm.LinearSVC(**params)
iris = load_iris()
iris_selection = np.random.RandomState(42).choice(
[True, False], 150, replace=True, p=[0.75, 0.25]
)
X_train = iris.data[iris_selection]
y_train = iris.target[iris_selection]
if not multiclass:
y_train = (y_train > 0).astype(datatype)
data = [X_train, y_train]
result["model"] = model.fit(X_train, y_train)
return model, data
def assert_model(pickled_model, data):
if result["model"].probability:
print("Comparing probabilistic LinearSVC")
compare_probabilistic_svm(
result["model"], pickled_model, data[0], data[1], 0, 0
)
else:
print("comparing base LinearSVC")
pred_before = result["model"].predict(data[0])
pred_after = pickled_model.predict(data[0])
assert array_equal(pred_before, pred_after)
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("nrows", [unit_param(500)])
@pytest.mark.parametrize("ncols", [unit_param(16)])
@pytest.mark.parametrize("n_info", [unit_param(7)])
@pytest.mark.parametrize("sparse", [False, True])
def test_svr_pickle(tmpdir, datatype, nrows, ncols, n_info, sparse):
result = {}
def create_mod():
X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info)
if sparse:
X_train = scipy_sparse.csr_matrix(X_train)
X_test = scipy_sparse.csr_matrix(X_test)
model = cuml.svm.SVR()
model.fit(X_train, y_train)
result["svr"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["svr"], pickled_model.predict(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("nrows", [unit_param(500)])
@pytest.mark.parametrize("ncols", [unit_param(16)])
@pytest.mark.parametrize("n_info", [unit_param(7)])
def test_svr_pickle_nofit(tmpdir, datatype, nrows, ncols, n_info):
def create_mod():
X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info)
model = cuml.svm.SVR()
return model, [X_train, y_train, X_test]
def assert_model(pickled_model, X):
state = pickled_model.__dict__
assert state["_fit_status_"] == -1
pickled_model.fit(X[0], X[1])
state = pickled_model.__dict__
assert state["_fit_status_"] == 0
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float64])
@pytest.mark.parametrize("nrows", [unit_param(1024)])
@pytest.mark.parametrize("ncols", [unit_param(300000)])
@pytest.mark.parametrize("n_info", [unit_param(2)])
def test_sparse_svr_pickle(tmpdir, datatype, nrows, ncols, n_info):
"""
A separate test to cover the case when the SVM model
parameters are sparse. Spares input alone does not
guarantee that the model parameters (SvmModel.support_matrix)
are sparse (a dense representation can be chosen for
performance reason). The large number of features used
here will result in a sparse model representation.
"""
result = {}
def create_mod():
X_train = scipy_sparse.random(
nrows,
ncols,
density=0.001,
format="csr",
dtype=datatype,
random_state=42,
)
y_train = np.random.RandomState(42).rand(nrows)
X_test = X_train
model = cuml.svm.SVR(max_iter=1)
model.fit(X_train, y_train)
result["svr"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["svr"], pickled_model.predict(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("nrows", [unit_param(500)])
@pytest.mark.parametrize("ncols", [unit_param(16)])
@pytest.mark.parametrize("n_info", [unit_param(7)])
@pytest.mark.parametrize(
"params", [{"probability": True}, {"probability": False}]
)
def test_svc_pickle_nofit(tmpdir, datatype, nrows, ncols, n_info, params):
def create_mod():
X_train, y_train, X_test = make_classification_dataset(
datatype, nrows, ncols, n_info, n_classes=2
)
model = cuml.svm.SVC(**params)
return model, [X_train, y_train, X_test]
def assert_model(pickled_model, X):
state = pickled_model.__dict__
assert state["_fit_status_"] == -1
pickled_model.fit(X[0], X[1])
state = pickled_model.__dict__
assert state["_fit_status_"] == 0
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize("datatype", [np.float32])
@pytest.mark.parametrize("key", ["RandomForestClassifier"])
@pytest.mark.parametrize("nrows", [unit_param(100)])
@pytest.mark.parametrize("ncols", [unit_param(20)])
@pytest.mark.parametrize("n_info", [unit_param(10)])
@pytest.mark.filterwarnings(
"ignore:((.|\n)*)n_streams((.|\n)*):UserWarning:" "cuml[.*]"
)
def test_small_rf(tmpdir, key, datatype, nrows, ncols, n_info):
result = {}
def create_mod():
X_train, y_train, X_test = make_classification_dataset(
datatype, nrows, ncols, n_info, n_classes=2
)
model = rf_models[key](
n_estimators=1,
max_depth=1,
max_features=1.0,
random_state=10,
n_bins=32,
)
model.fit(X_train, y_train)
result["rf_res"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["rf_res"], pickled_model.predict(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_stratified_kfold.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.model_selection import StratifiedKFold
import pytest
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
def get_x_y(n_samples, n_classes):
X = cudf.DataFrame({"x": range(n_samples)})
y = cp.arange(n_samples) % n_classes
cp.random.shuffle(y)
y = cudf.Series(y)
return X, y
@pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("n_splits", [5, 10])
@pytest.mark.parametrize("n_samples", [10000])
@pytest.mark.parametrize("n_classes", [2, 10])
def test_split_dataframe(n_samples, n_classes, n_splits, shuffle):
X, y = get_x_y(n_samples, n_classes)
kf = StratifiedKFold(n_splits=n_splits, shuffle=shuffle)
for train_index, test_index in kf.split(X, y):
assert len(train_index) + len(test_index) == n_samples
assert len(train_index) == len(test_index) * (n_splits - 1)
for i in range(n_classes):
ratio_tr = (y[train_index] == i).sum() / len(train_index)
ratio_te = (y[test_index] == i).sum() / len(test_index)
assert ratio_tr == ratio_te
def test_num_classes_check():
X, y = get_x_y(n_samples=1000, n_classes=1)
kf = StratifiedKFold(n_splits=5)
err_msg = "number of unique classes cannot be less than 2"
with pytest.raises(ValueError, match=err_msg):
for train_index, test_index in kf.split(X, y):
pass
@pytest.mark.parametrize("n_splits", [0, 1])
def test_invalid_folds(n_splits):
X, y = get_x_y(n_samples=1000, n_classes=2)
err_msg = f"n_splits {n_splits} is not a integer at least 2"
with pytest.raises(ValueError, match=err_msg):
kf = StratifiedKFold(n_splits=n_splits)
for train_index, test_index in kf.split(X, y):
break
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_base.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import cuml
import pytest
import numpydoc.docscrape
from pylibraft.common.cuda import Stream
from cuml.testing.utils import (
get_classes_from_package,
small_classification_dataset,
)
from cuml._thirdparty.sklearn.utils.skl_dependencies import (
BaseEstimator as sklBaseEstimator,
)
all_base_children = get_classes_from_package(cuml, import_sub_packages=True)
def test_base_class_usage():
# Ensure base class returns the 3 main properties needed by all classes
base = cuml.Base()
base.handle.sync()
base_params = base.get_param_names()
assert "handle" in base_params
assert "verbose" in base_params
assert "output_type" in base_params
del base
def test_base_class_usage_with_handle():
stream = Stream()
handle = cuml.Handle(stream=stream)
base = cuml.Base(handle=handle)
base.handle.sync()
del base
def test_base_hasattr():
base = cuml.Base()
# With __getattr__ overriding magic, hasattr should still return
# True only for valid attributes
assert hasattr(base, "handle")
assert not hasattr(base, "somefakeattr")
@pytest.mark.parametrize("datatype", ["float32", "float64"])
@pytest.mark.parametrize("use_integer_n_features", [True, False])
def test_base_n_features_in(datatype, use_integer_n_features):
X_train, _, _, _ = small_classification_dataset(datatype)
integer_n_features = 8
clf = cuml.Base()
if use_integer_n_features:
clf._set_n_features_in(integer_n_features)
assert clf.n_features_in_ == integer_n_features
else:
clf._set_n_features_in(X_train)
assert clf.n_features_in_ == X_train.shape[1]
@pytest.mark.parametrize("child_class", list(all_base_children.keys()))
def test_base_subclass_init_matches_docs(child_class: str):
"""
This test is comparing the docstrings for arguments in __init__ for any
class that derives from `Base`, We ensure that 1) the base arguments exist
in the derived class, 2) The types and default values are the same and 3)
That the docstring matches the base class
This is to prevent multiple different docstrings for identical arguments
throughout the documentation
Parameters
----------
child_class : str
Classname to test in the dict all_base_children
"""
klass = all_base_children[child_class]
if issubclass(klass, sklBaseEstimator):
pytest.skip(
"Preprocessing models do not have "
"the base arguments in constructors."
)
# To quickly find and replace all instances in the documentation, the below
# regex's may be useful
# output_type: r"^[ ]{4}output_type :.*\n(^(?![ ]{0,4}(?![ ]{4,})).*(\n))+"
# verbose: r"^[ ]{4}verbose :.*\n(^(?![ ]{0,4}(?![ ]{4,})).*(\n))+"
# handle: r"^[ ]{4}handle :.*\n(^(?![ ]{0,4}(?![ ]{4,})).*(\n))+"
def get_param_doc(param_doc_obj, name: str):
found_doc = next((x for x in param_doc_obj if x.name == name), None)
assert found_doc is not None, "Could not find {} in docstring".format(
name
)
return found_doc
# Load the base class signature, parse the docstring and pull out params
base_sig = inspect.signature(cuml.Base, follow_wrapped=True)
base_doc = numpydoc.docscrape.NumpyDocString(cuml.Base.__doc__)
base_doc_params = base_doc["Parameters"]
# Load the current class signature, parse the docstring and pull out params
klass_sig = inspect.signature(klass, follow_wrapped=True)
klass_doc = numpydoc.docscrape.NumpyDocString(klass.__doc__ or "")
klass_doc_params = klass_doc["Parameters"]
for name, param in base_sig.parameters.items():
if param.name == "output_mem_type":
continue # TODO(wphicks): Add this to all algos
# Ensure the base param exists in the derived
assert param.name in klass_sig.parameters
klass_param = klass_sig.parameters[param.name]
# Ensure the default values are the same
assert param.default == klass_param.default
# Make sure we aren't accidentally a *args or **kwargs
assert (
klass_param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
or klass_param.kind == inspect.Parameter.KEYWORD_ONLY
)
if klass.__doc__ is not None:
found_doc = get_param_doc(klass_doc_params, name)
base_item_doc = get_param_doc(base_doc_params, name)
# Ensure the docstring is identical
assert (
found_doc.type == base_item_doc.type
), "Docstring mismatch for {}".format(name)
assert " ".join(found_doc.desc) == " ".join(base_item_doc.desc)
@pytest.mark.parametrize("child_class", list(all_base_children.keys()))
# ignore ColumnTransformer init warning
@pytest.mark.filterwarnings("ignore:Transformers are required")
def test_base_children_get_param_names(child_class: str):
"""
This test ensures that the arguments in `Base.__init__` are available in
all derived classes `get_param_names`
"""
klass = all_base_children[child_class]
sig = inspect.signature(klass, follow_wrapped=True)
try:
bound = sig.bind()
bound.apply_defaults()
except TypeError:
pytest.skip(
"{}.__init__ requires non-default arguments to create. Skipping.".format(
klass.__name__
)
)
else:
# Create an instance
obj = klass(*bound.args, **bound.kwargs)
param_names = obj.get_param_names()
# Now ensure the base parameters are included in get_param_names
for name, param in sig.parameters.items():
if param.name == "output_mem_type":
continue # TODO(wphicks): Add this to all algos
if (
param.kind == inspect.Parameter.VAR_KEYWORD
or param.kind == inspect.Parameter.VAR_POSITIONAL
):
continue
assert name in param_names
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_coordinate_descent.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_regression
from sklearn.linear_model import Lasso, ElasticNet
from cuml.testing.utils import unit_param, quality_param, stress_param
from cuml.metrics import r2_score
from cuml.linear_model import ElasticNet as cuElasticNet
from cuml import Lasso as cuLasso
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("X_type", ["ndarray"])
@pytest.mark.parametrize("alpha", [0.1, 0.001])
@pytest.mark.parametrize("algorithm", ["cyclic", "random"])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
@pytest.mark.filterwarnings("ignore:Objective did not converge::sklearn[.*]")
def test_lasso(datatype, X_type, alpha, algorithm, nrows, column_info):
ncols, n_info = column_info
X, y = make_regression(
n_samples=nrows, n_features=ncols, n_informative=n_info, random_state=0
)
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
cu_lasso = cuLasso(
alpha=np.array([alpha]),
fit_intercept=True,
max_iter=1000,
selection=algorithm,
tol=1e-10,
)
cu_lasso.fit(X_train, y_train)
assert cu_lasso.coef_ is not None
cu_predict = cu_lasso.predict(X_test)
cu_r2 = r2_score(y_test, cu_predict)
if nrows < 500000:
sk_lasso = Lasso(
alpha=alpha,
fit_intercept=True,
max_iter=1000,
selection=algorithm,
tol=1e-10,
)
sk_lasso.fit(X_train, y_train)
sk_predict = sk_lasso.predict(X_test)
sk_r2 = r2_score(y_test, sk_predict)
assert cu_r2 >= sk_r2 - 0.07
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
def test_lasso_default(datatype, nrows, column_info):
ncols, n_info = column_info
X, y = make_regression(
n_samples=nrows, n_features=ncols, n_informative=n_info, random_state=0
)
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
cu_lasso = cuLasso()
cu_lasso.fit(X_train, y_train)
assert cu_lasso.coef_ is not None
cu_predict = cu_lasso.predict(X_test)
cu_r2 = r2_score(y_test, cu_predict)
sk_lasso = Lasso()
sk_lasso.fit(X_train, y_train)
sk_predict = sk_lasso.predict(X_test)
sk_r2 = r2_score(y_test, sk_predict)
assert cu_r2 >= sk_r2 - 0.07
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("model", ["lasso", "elastic-net"])
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize(
"distribution", ["lognormal", "exponential", "uniform"]
)
@pytest.mark.filterwarnings("ignore:Objective did not converge::sklearn[.*]")
def test_weighted_cd(datatype, model, fit_intercept, distribution):
nrows, ncols, n_info = 1000, 20, 10
max_weight = 10
noise = 20
X, y = make_regression(nrows, ncols, n_informative=n_info, noise=noise)
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
# set weight per sample to be from 1 to max_weight
if distribution == "uniform":
wt = np.random.randint(1, high=max_weight, size=len(X_train))
elif distribution == "exponential":
wt = np.random.exponential(scale=max_weight, size=len(X_train))
else:
wt = np.random.lognormal(size=len(X_train))
cuModel = cuLasso if model == "lasso" else cuElasticNet
skModel = Lasso if model == "lasso" else ElasticNet
# Initialization of cuML's linear regression model
cumodel = cuModel(fit_intercept=fit_intercept, tol=1e-10, max_iter=1000)
# fit and predict cuml linear regression model
cumodel.fit(X_train, y_train, sample_weight=wt)
cumodel_predict = cumodel.predict(X_test)
# sklearn linear regression model initialization, fit and predict
skmodel = skModel(fit_intercept=fit_intercept, tol=1e-10, max_iter=1000)
skmodel.fit(X_train, y_train, sample_weight=wt)
skmodel_predict = skmodel.predict(X_test)
cu_r2 = r2_score(y_test, cumodel_predict)
sk_r2 = r2_score(y_test, skmodel_predict)
assert cu_r2 >= sk_r2 - 0.07
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("X_type", ["ndarray"])
@pytest.mark.parametrize("alpha", [0.2, 0.7])
@pytest.mark.parametrize("algorithm", ["cyclic", "random"])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
@pytest.mark.filterwarnings("ignore:Objective did not converge::sklearn[.*]")
def test_elastic_net(datatype, X_type, alpha, algorithm, nrows, column_info):
ncols, n_info = column_info
X, y = make_regression(
n_samples=nrows, n_features=ncols, n_informative=n_info, random_state=0
)
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
elastic_cu = cuElasticNet(
alpha=np.array([alpha]),
fit_intercept=True,
max_iter=1000,
selection=algorithm,
tol=1e-10,
)
elastic_cu.fit(X_train, y_train)
cu_predict = elastic_cu.predict(X_test)
cu_r2 = r2_score(y_test, cu_predict)
if nrows < 500000:
elastic_sk = ElasticNet(
alpha=alpha,
fit_intercept=True,
max_iter=1000,
selection=algorithm,
tol=1e-10,
)
elastic_sk.fit(X_train, y_train)
sk_predict = elastic_sk.predict(X_test)
sk_r2 = r2_score(y_test, sk_predict)
assert cu_r2 >= sk_r2 - 0.07
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
def test_elastic_net_default(datatype, nrows, column_info):
ncols, n_info = column_info
X, y = make_regression(
n_samples=nrows, n_features=ncols, n_informative=n_info, random_state=0
)
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
elastic_cu = cuElasticNet()
elastic_cu.fit(X_train, y_train)
cu_predict = elastic_cu.predict(X_test)
cu_r2 = r2_score(y_test, cu_predict)
elastic_sk = ElasticNet()
elastic_sk.fit(X_train, y_train)
sk_predict = elastic_sk.predict(X_test)
sk_r2 = r2_score(y_test, sk_predict)
assert cu_r2 >= sk_r2 - 0.07
@pytest.mark.parametrize("train_dtype", [np.float32, np.float64])
@pytest.mark.parametrize("test_dtype", [np.float64, np.float32])
def test_elastic_net_predict_convert_dtype(train_dtype, test_dtype):
X, y = make_regression(
n_samples=50, n_features=10, n_informative=5, random_state=0
)
X = X.astype(train_dtype)
y = y.astype(train_dtype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
clf = cuElasticNet()
clf.fit(X_train, y_train)
clf.predict(X_test.astype(test_dtype))
@pytest.mark.parametrize("train_dtype", [np.float32, np.float64])
@pytest.mark.parametrize("test_dtype", [np.float64, np.float32])
def test_lasso_predict_convert_dtype(train_dtype, test_dtype):
X, y = make_regression(
n_samples=50, n_features=10, n_informative=5, random_state=0
)
X = X.astype(train_dtype)
y = y.astype(train_dtype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=0
)
clf = cuLasso()
clf.fit(X_train, y_train)
clf.predict(X_test.astype(test_dtype))
@pytest.mark.parametrize("algo", [cuElasticNet, cuLasso])
def test_set_params(algo):
x = np.linspace(0, 1, 50)
y = 2 * x
model = algo(alpha=0.01)
model.fit(x, y)
coef_before = model.coef_
model = algo(selection="random", alpha=0.1)
model.fit(x, y)
coef_after = model.coef_
model = algo(alpha=0.01)
model.set_params(**{"selection": "random", "alpha": 0.1})
model.fit(x, y)
coef_test = model.coef_
assert coef_before != coef_after
assert coef_after == coef_test
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_agglomerative.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml.cluster import AgglomerativeClustering
from cuml.datasets import make_blobs
from cuml.metrics import adjusted_rand_score
from sklearn import cluster
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
@pytest.mark.parametrize("connectivity", ["knn", "pairwise"])
def test_duplicate_distances(connectivity):
X = cp.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [2.0, 2.0, 2.0]])
cuml_agg = AgglomerativeClustering(
n_clusters=2,
affinity="euclidean",
linkage="single",
n_neighbors=3,
connectivity=connectivity,
)
sk_agg = cluster.AgglomerativeClustering(
n_clusters=2, affinity="euclidean", linkage="single"
)
cuml_agg.fit(X)
sk_agg.fit(X.get())
assert adjusted_rand_score(cuml_agg.labels_, sk_agg.labels_) == 1.0
@pytest.mark.parametrize("nrows", [100, 1000])
@pytest.mark.parametrize("ncols", [25, 50])
@pytest.mark.parametrize("nclusters", [1, 2, 10, 50])
@pytest.mark.parametrize("k", [3, 5, 15])
@pytest.mark.parametrize("connectivity", ["knn", "pairwise"])
def test_single_linkage_sklearn_compare(
nrows, ncols, nclusters, k, connectivity
):
X, y = make_blobs(
int(nrows), ncols, nclusters, cluster_std=1.0, shuffle=False
)
cuml_agg = AgglomerativeClustering(
n_clusters=nclusters,
affinity="euclidean",
linkage="single",
n_neighbors=k,
connectivity=connectivity,
)
cuml_agg.fit(X)
sk_agg = cluster.AgglomerativeClustering(
n_clusters=nclusters, affinity="euclidean", linkage="single"
)
sk_agg.fit(cp.asnumpy(X))
# Cluster assignments should be exact, even though the actual
# labels may differ
assert adjusted_rand_score(cuml_agg.labels_, sk_agg.labels_) == 1.0
assert cuml_agg.n_connected_components_ == sk_agg.n_connected_components_
assert cuml_agg.n_leaves_ == sk_agg.n_leaves_
assert cuml_agg.n_clusters_ == sk_agg.n_clusters_
def test_invalid_inputs():
# Test bad affinity
with pytest.raises(ValueError):
AgglomerativeClustering(affinity="doesntexist")
with pytest.raises(ValueError):
AgglomerativeClustering(linkage="doesntexist")
with pytest.raises(ValueError):
AgglomerativeClustering(connectivity="doesntexist")
with pytest.raises(ValueError):
AgglomerativeClustering(n_neighbors=1)
with pytest.raises(ValueError):
AgglomerativeClustering(n_neighbors=1024)
with pytest.raises(ValueError):
AgglomerativeClustering(n_clusters=0)
with pytest.raises(ValueError):
AgglomerativeClustering(n_clusters=500).fit(cp.ones((2, 5)))
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_meta_estimators.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.svm import SVC
from cuml.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from cuml.model_selection import train_test_split
from cuml.datasets import make_regression, make_classification
from cuml.testing.utils import ClassEnumerator
from cuml.model_selection import GridSearchCV
from cuml.pipeline import Pipeline, make_pipeline
import pytest
import cuml
from cuml.internals.safe_imports import gpu_only_import
cupy = gpu_only_import("cupy")
def test_pipeline():
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
pipe = Pipeline(steps=[("scaler", StandardScaler()), ("svc", SVC())])
pipe.fit(X_train, y_train)
score = pipe.score(X_test, y_test)
assert score > 0.8
def test_gridsearchCV():
iris = load_iris()
parameters = {"kernel": ("linear", "rbf"), "C": [1, 10]}
clf = GridSearchCV(SVC(), parameters)
clf.fit(iris.data, iris.target)
assert clf.best_params_["kernel"] == "rbf"
assert clf.best_params_["C"] == 10
@pytest.fixture(scope="session")
def regression_dataset(request):
X, y = make_regression(n_samples=10, n_features=5, random_state=0)
return train_test_split(X, y, random_state=0)
@pytest.fixture(scope="session")
def classification_dataset(request):
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
return train_test_split(X, y, random_state=0)
models_config = ClassEnumerator(module=cuml)
models = models_config.get_models()
@pytest.mark.parametrize(
"model_key",
[
"ElasticNet",
"Lasso",
"Ridge",
"LinearRegression",
"LogisticRegression",
"MBSGDRegressor",
"RandomForestRegressor",
"KNeighborsRegressor",
],
)
@pytest.mark.parametrize("instantiation", ["Pipeline", "make_pipeline"])
def test_pipeline_with_regression(
regression_dataset, model_key, instantiation
):
X_train, X_test, y_train, y_test = regression_dataset
model_const = models[model_key]
if model_key == "RandomForestRegressor":
model = model_const(n_bins=2)
else:
model = model_const()
if instantiation == "Pipeline":
pipe = Pipeline(steps=[("scaler", StandardScaler()), ("model", model)])
elif instantiation == "make_pipeline":
pipe = make_pipeline(StandardScaler(), model)
pipe.fit(X_train, y_train)
prediction = pipe.predict(X_test)
assert isinstance(prediction, cupy.ndarray)
_ = pipe.score(X_test, y_test)
@pytest.mark.parametrize(
"model_key",
["MBSGDClassifier", "RandomForestClassifier", "KNeighborsClassifier"],
)
@pytest.mark.parametrize("instantiation", ["Pipeline", "make_pipeline"])
def test_pipeline_with_classification(
classification_dataset, model_key, instantiation
):
X_train, X_test, y_train, y_test = classification_dataset
model_const = models[model_key]
if model_key == "RandomForestClassifier":
model = model_const(n_bins=2)
else:
model = model_const()
if instantiation == "Pipeline":
pipe = Pipeline(steps=[("scaler", StandardScaler()), ("model", model)])
elif instantiation == "make_pipeline":
pipe = make_pipeline(StandardScaler(), model)
pipe.fit(X_train, y_train)
prediction = pipe.predict(X_test)
assert isinstance(prediction, cupy.ndarray)
if model_key == "RandomForestClassifier":
pytest.skip(
"RandomForestClassifier is not yet supported"
"by the Pipeline utility"
)
_ = pipe.score(X_test, y_test)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_device_selection.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import platform
from cuml.testing.test_preproc_utils import to_output_type
from cuml.testing.utils import array_equal
from cuml.cluster.hdbscan import HDBSCAN
from cuml.neighbors import NearestNeighbors
from cuml.metrics import trustworthiness
from cuml.metrics import adjusted_rand_score
from cuml.manifold import UMAP
from cuml.linear_model import (
ElasticNet,
Lasso,
LinearRegression,
LogisticRegression,
Ridge,
)
from cuml.internals.memory_utils import using_memory_type
from cuml.internals.mem_type import MemoryType
from cuml.decomposition import PCA, TruncatedSVD
from cuml.common.device_selection import DeviceType, using_device_type
from hdbscan import HDBSCAN as refHDBSCAN
from sklearn.neighbors import NearestNeighbors as skNearestNeighbors
from sklearn.linear_model import Ridge as skRidge
from sklearn.linear_model import ElasticNet as skElasticNet
from sklearn.linear_model import Lasso as skLasso
from sklearn.linear_model import LogisticRegression as skLogisticRegression
from sklearn.linear_model import LinearRegression as skLinearRegression
from sklearn.decomposition import PCA as skPCA
from sklearn.decomposition import TruncatedSVD as skTruncatedSVD
from sklearn.datasets import make_regression, make_blobs
from pytest_cases import fixture_union, fixture_plus
from importlib import import_module
import inspect
import pickle
from cuml.internals.safe_imports import gpu_only_import
import itertools as it
import pytest
import cuml
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
cudf = gpu_only_import("cudf")
IS_ARM = platform.processor() == "aarch64"
if not IS_ARM:
from umap import UMAP as refUMAP
def assert_membership_vectors(cu_vecs, sk_vecs):
"""
Assert the membership vectors by taking the adjusted rand score
of the argsorted membership vectors.
"""
if sk_vecs.shape == cu_vecs.shape:
cu_labels_sorted = np.argsort(cu_vecs)[::-1]
sk_labels_sorted = np.argsort(sk_vecs)[::-1]
k = min(sk_vecs.shape[1], 10)
for i in range(k):
assert (
adjusted_rand_score(
cu_labels_sorted[:, i], sk_labels_sorted[:, i]
)
>= 0.85
)
@pytest.mark.parametrize(
"input", [("cpu", DeviceType.host), ("gpu", DeviceType.device)]
)
def test_device_type(input):
initial_device_type = cuml.global_settings.device_type
with using_device_type(input[0]):
assert cuml.global_settings.device_type == input[1]
assert cuml.global_settings.device_type == initial_device_type
def test_device_type_exception():
with pytest.raises(ValueError):
with using_device_type("wrong_option"):
assert True
@pytest.mark.parametrize(
"input",
[
("device", MemoryType.device),
("host", MemoryType.host),
("managed", MemoryType.managed),
("mirror", MemoryType.mirror),
],
)
def test_memory_type(input):
initial_memory_type = cuml.global_settings.memory_type
with using_memory_type(input[0]):
assert cuml.global_settings.memory_type == input[1]
assert cuml.global_settings.memory_type == initial_memory_type
def test_memory_type_exception():
with pytest.raises(ValueError):
with using_memory_type("wrong_option"):
assert True
def make_reg_dataset():
X, y = make_regression(
n_samples=2000, n_features=20, n_informative=18, random_state=0
)
X_train, X_test = X[:1800], X[1800:]
y_train, _ = y[:1800], y[1800:]
return (
X_train.astype(np.float32),
y_train.astype(np.float32),
X_test.astype(np.float32),
)
def make_blob_dataset():
X, y = make_blobs(
n_samples=2000, n_features=20, centers=20, random_state=0
)
X_train, X_test = X[:1800], X[1800:]
y_train, _ = y[:1800], y[1800:]
return (
X_train.astype(np.float32),
y_train.astype(np.float32),
X_test.astype(np.float32),
)
X_train_reg, y_train_reg, X_test_reg = make_reg_dataset()
X_train_blob, y_train_blob, X_test_blob = make_blob_dataset()
def check_trustworthiness(cuml_embedding, test_data):
X_test = to_output_type(test_data["X_test"], "numpy")
cuml_embedding = to_output_type(cuml_embedding, "numpy")
trust = trustworthiness(X_test, cuml_embedding, n_neighbors=10)
ref_trust = test_data["ref_trust"]
tol = 0.02
assert trust >= ref_trust - tol
def check_allclose(cuml_output, test_data):
ref_output = to_output_type(test_data["ref_y_test"], "numpy")
cuml_output = to_output_type(cuml_output, "numpy")
np.testing.assert_allclose(ref_output, cuml_output, rtol=0.15)
def check_allclose_without_sign(cuml_output, test_data):
ref_output = to_output_type(test_data["ref_y_test"], "numpy")
cuml_output = to_output_type(cuml_output, "numpy")
assert ref_output.shape == cuml_output.shape
ref_output, cuml_output = np.abs(ref_output), np.abs(cuml_output)
np.testing.assert_allclose(ref_output, cuml_output, rtol=0.15)
def check_nn(cuml_output, test_data):
ref_dists = to_output_type(test_data["ref_y_test"][0], "numpy")
ref_indices = to_output_type(test_data["ref_y_test"][1], "numpy")
cuml_dists = to_output_type(cuml_output[0], "numpy")
cuml_indices = to_output_type(cuml_output[1], "numpy")
np.testing.assert_allclose(ref_indices, cuml_indices)
np.testing.assert_allclose(ref_dists, cuml_dists, rtol=0.15)
def fixture_generation_helper(params):
param_names = sorted(params)
param_combis = list(
it.product(*(params[param_name] for param_name in param_names))
)
ids = ["-".join(map(str, param_combi)) for param_combi in param_combis]
param_combis = [
dict(zip(param_names, param_combi)) for param_combi in param_combis
]
return {"scope": "session", "params": param_combis, "ids": ids}
@fixture_plus(
**fixture_generation_helper(
{
"input_type": ["numpy", "dataframe", "cupy", "cudf", "numba"],
"fit_intercept": [False, True],
}
)
)
def linreg_test_data(request):
kwargs = {
"fit_intercept": request.param["fit_intercept"],
}
sk_model = skLinearRegression(**kwargs)
sk_model.fit(X_train_reg, y_train_reg)
input_type = request.param["input_type"]
if input_type == "dataframe":
modified_y_train = pd.Series(y_train_reg)
elif input_type == "cudf":
modified_y_train = cudf.Series(y_train_reg)
else:
modified_y_train = to_output_type(y_train_reg, input_type)
return {
"cuEstimator": LinearRegression,
"kwargs": kwargs,
"infer_func": "predict",
"assert_func": check_allclose,
"X_train": to_output_type(X_train_reg, input_type),
"y_train": modified_y_train,
"X_test": to_output_type(X_test_reg, input_type),
"ref_y_test": sk_model.predict(X_test_reg),
}
@fixture_plus(
**fixture_generation_helper(
{
"input_type": ["numpy", "dataframe", "cupy", "cudf", "numba"],
"penalty": ["none", "l2"],
"fit_intercept": [False, True],
}
)
)
def logreg_test_data(request):
kwargs = {
"penalty": request.param["penalty"],
"fit_intercept": request.param["fit_intercept"],
"max_iter": 1000,
}
y_train_logreg = (y_train_reg > np.median(y_train_reg)).astype(np.int32)
sk_model = skLogisticRegression(**kwargs)
sk_model.fit(X_train_reg, y_train_logreg)
input_type = request.param["input_type"]
if input_type == "dataframe":
y_train_logreg = pd.Series(y_train_logreg)
elif input_type == "cudf":
y_train_logreg = cudf.Series(y_train_logreg)
else:
y_train_logreg = to_output_type(y_train_logreg, input_type)
return {
"cuEstimator": LogisticRegression,
"kwargs": kwargs,
"infer_func": "predict",
"assert_func": check_allclose,
"X_train": to_output_type(X_train_reg, input_type),
"y_train": y_train_logreg,
"X_test": to_output_type(X_test_reg, input_type),
"ref_y_test": sk_model.predict(X_test_reg),
}
@fixture_plus(
**fixture_generation_helper(
{
"input_type": ["numpy", "dataframe", "cupy", "cudf", "numba"],
"fit_intercept": [False, True],
"selection": ["cyclic", "random"],
}
)
)
def lasso_test_data(request):
kwargs = {
"fit_intercept": request.param["fit_intercept"],
"selection": request.param["selection"],
"tol": 0.0001,
}
sk_model = skLasso(**kwargs)
sk_model.fit(X_train_reg, y_train_reg)
input_type = request.param["input_type"]
if input_type == "dataframe":
modified_y_train = pd.Series(y_train_reg)
elif input_type == "cudf":
modified_y_train = cudf.Series(y_train_reg)
else:
modified_y_train = to_output_type(y_train_reg, input_type)
return {
"cuEstimator": Lasso,
"kwargs": kwargs,
"infer_func": "predict",
"assert_func": check_allclose,
"X_train": to_output_type(X_train_reg, input_type),
"y_train": modified_y_train,
"X_test": to_output_type(X_test_reg, input_type),
"ref_y_test": sk_model.predict(X_test_reg),
}
@fixture_plus(
**fixture_generation_helper(
{
"input_type": ["numpy", "dataframe", "cupy", "cudf", "numba"],
"fit_intercept": [False, True],
"selection": ["cyclic", "random"],
}
)
)
def elasticnet_test_data(request):
kwargs = {
"fit_intercept": request.param["fit_intercept"],
"selection": request.param["selection"],
"tol": 0.0001,
}
sk_model = skElasticNet(**kwargs)
sk_model.fit(X_train_reg, y_train_reg)
input_type = request.param["input_type"]
if input_type == "dataframe":
modified_y_train = pd.Series(y_train_reg)
elif input_type == "cudf":
modified_y_train = cudf.Series(y_train_reg)
else:
modified_y_train = to_output_type(y_train_reg, input_type)
return {
"cuEstimator": ElasticNet,
"kwargs": kwargs,
"infer_func": "predict",
"assert_func": check_allclose,
"X_train": to_output_type(X_train_reg, input_type),
"y_train": modified_y_train,
"X_test": to_output_type(X_test_reg, input_type),
"ref_y_test": sk_model.predict(X_test_reg),
}
@fixture_plus(
**fixture_generation_helper(
{
"input_type": ["numpy", "dataframe", "cupy", "cudf", "numba"],
"fit_intercept": [False, True],
}
)
)
def ridge_test_data(request):
kwargs = {"fit_intercept": request.param["fit_intercept"], "solver": "svd"}
sk_model = skRidge(**kwargs)
sk_model.fit(X_train_reg, y_train_reg)
input_type = request.param["input_type"]
if input_type == "dataframe":
modified_y_train = pd.Series(y_train_reg)
elif input_type == "cudf":
modified_y_train = cudf.Series(y_train_reg)
else:
modified_y_train = to_output_type(y_train_reg, input_type)
return {
"cuEstimator": Ridge,
"kwargs": kwargs,
"infer_func": "predict",
"assert_func": check_allclose,
"X_train": to_output_type(X_train_reg, input_type),
"y_train": modified_y_train,
"X_test": to_output_type(X_test_reg, input_type),
"ref_y_test": sk_model.predict(X_test_reg),
}
@fixture_plus(
**fixture_generation_helper(
{
"input_type": ["cupy"],
"n_components": [2, 16],
"init": ["spectral", "random"],
}
)
)
def umap_test_data(request):
kwargs = {
"n_neighbors": 12,
"n_components": request.param["n_components"],
"init": request.param["init"],
"random_state": 42,
}
# todo: remove after https://github.com/rapidsai/cuml/issues/5441 is
# fixed
if not IS_ARM:
ref_model = refUMAP(**kwargs)
ref_model.fit(X_train_blob, y_train_blob)
ref_embedding = ref_model.transform(X_test_blob)
ref_trust = trustworthiness(X_test_blob, ref_embedding, n_neighbors=12)
else:
ref_trust = 0.0
input_type = request.param["input_type"]
if input_type == "dataframe":
modified_y_train = pd.Series(y_train_blob)
elif input_type == "cudf":
modified_y_train = cudf.Series(y_train_blob)
else:
modified_y_train = to_output_type(y_train_blob, input_type)
return {
"cuEstimator": UMAP,
"kwargs": kwargs,
"infer_func": "transform",
"assert_func": check_trustworthiness,
"X_train": to_output_type(X_train_blob, input_type),
"y_train": modified_y_train,
"X_test": to_output_type(X_test_blob, input_type),
"ref_trust": ref_trust,
}
@fixture_plus(
**fixture_generation_helper(
{
"input_type": ["numpy", "dataframe", "cupy", "cudf", "numba"],
"n_components": [2, 8],
}
)
)
def pca_test_data(request):
kwargs = {
"n_components": request.param["n_components"],
"svd_solver": "full",
"tol": 1e-07,
"iterated_power": 15,
}
sk_model = skPCA(**kwargs)
sk_model.fit(X_train_blob, y_train_blob)
input_type = request.param["input_type"]
if input_type == "dataframe":
modified_y_train = pd.Series(y_train_blob)
elif input_type == "cudf":
modified_y_train = cudf.Series(y_train_blob)
else:
modified_y_train = to_output_type(y_train_blob, input_type)
return {
"cuEstimator": PCA,
"kwargs": kwargs,
"infer_func": "transform",
"assert_func": check_allclose_without_sign,
"X_train": to_output_type(X_train_blob, input_type),
"y_train": modified_y_train,
"X_test": to_output_type(X_test_blob, input_type),
"ref_y_test": sk_model.transform(X_test_blob),
}
@fixture_plus(
**fixture_generation_helper(
{
"input_type": ["numpy", "dataframe", "cupy", "cudf", "numba"],
"n_components": [2, 8],
}
)
)
def tsvd_test_data(request):
kwargs = {
"n_components": request.param["n_components"],
"n_iter": 15,
"tol": 1e-07,
}
sk_model = skTruncatedSVD(**kwargs)
sk_model.fit(X_train_blob, y_train_blob)
input_type = request.param["input_type"]
if input_type == "dataframe":
modified_y_train = pd.Series(y_train_blob)
elif input_type == "cudf":
modified_y_train = cudf.Series(y_train_blob)
else:
modified_y_train = to_output_type(y_train_blob, input_type)
return {
"cuEstimator": TruncatedSVD,
"kwargs": kwargs,
"infer_func": "transform",
"assert_func": check_allclose_without_sign,
"X_train": to_output_type(X_train_blob, input_type),
"y_train": modified_y_train,
"X_test": to_output_type(X_test_blob, input_type),
"ref_y_test": sk_model.transform(X_test_blob),
}
@fixture_plus(
**fixture_generation_helper(
{
"input_type": ["numpy", "dataframe", "cupy", "cudf", "numba"],
"metric": ["euclidean", "cosine"],
"n_neighbors": [3, 8],
"return_distance": [True],
}
)
)
def nn_test_data(request):
kwargs = {
"metric": request.param["metric"],
"n_neighbors": request.param["n_neighbors"],
}
infer_func_kwargs = {"return_distance": request.param["return_distance"]}
sk_model = skNearestNeighbors(**kwargs)
sk_model.fit(X_train_blob)
input_type = request.param["input_type"]
return {
"cuEstimator": NearestNeighbors,
"kwargs": kwargs,
"infer_func": "kneighbors",
"infer_func_kwargs": infer_func_kwargs,
"assert_func": check_nn,
"X_train": to_output_type(X_train_blob, input_type),
"X_test": to_output_type(X_test_blob, input_type),
"ref_y_test": sk_model.kneighbors(X_test_blob),
}
fixture_union(
"test_data",
[
"linreg_test_data",
"logreg_test_data",
"lasso_test_data",
"ridge_test_data",
"umap_test_data",
"pca_test_data",
"tsvd_test_data",
"nn_test_data",
],
)
def test_train_cpu_infer_cpu(test_data):
cuEstimator = test_data["cuEstimator"]
if cuEstimator is Lasso:
pytest.skip("https://github.com/rapidsai/cuml/issues/5298")
if cuEstimator is UMAP and IS_ARM:
pytest.skip("https://github.com/rapidsai/cuml/issues/5441")
model = cuEstimator(**test_data["kwargs"])
with using_device_type("cpu"):
if "y_train" in test_data:
model.fit(test_data["X_train"], test_data["y_train"])
else:
model.fit(test_data["X_train"])
infer_func = getattr(model, test_data["infer_func"])
infer_func_kwargs = test_data.get("infer_func_kwargs", {})
cuml_output = infer_func(test_data["X_test"], **infer_func_kwargs)
assert_func = test_data["assert_func"]
assert_func(cuml_output, test_data)
def test_train_gpu_infer_cpu(test_data):
cuEstimator = test_data["cuEstimator"]
if cuEstimator is UMAP:
pytest.skip("UMAP GPU training CPU inference not yet implemented")
model = cuEstimator(**test_data["kwargs"])
with using_device_type("gpu"):
if "y_train" in test_data:
model.fit(test_data["X_train"], test_data["y_train"])
else:
model.fit(test_data["X_train"])
with using_device_type("cpu"):
infer_func = getattr(model, test_data["infer_func"])
infer_func_kwargs = test_data.get("infer_func_kwargs", {})
cuml_output = infer_func(test_data["X_test"], **infer_func_kwargs)
assert_func = test_data["assert_func"]
assert_func(cuml_output, test_data)
def test_train_cpu_infer_gpu(test_data):
cuEstimator = test_data["cuEstimator"]
if cuEstimator is UMAP and IS_ARM:
pytest.skip("https://github.com/rapidsai/cuml/issues/5441")
model = cuEstimator(**test_data["kwargs"])
with using_device_type("cpu"):
if "y_train" in test_data:
model.fit(test_data["X_train"], test_data["y_train"])
else:
model.fit(test_data["X_train"])
with using_device_type("gpu"):
infer_func = getattr(model, test_data["infer_func"])
infer_func_kwargs = test_data.get("infer_func_kwargs", {})
cuml_output = infer_func(test_data["X_test"], **infer_func_kwargs)
assert_func = test_data["assert_func"]
assert_func(cuml_output, test_data)
def test_train_gpu_infer_gpu(test_data):
cuEstimator = test_data["cuEstimator"]
if cuEstimator is UMAP and IS_ARM:
pytest.skip("https://github.com/rapidsai/cuml/issues/5441")
model = cuEstimator(**test_data["kwargs"])
with using_device_type("gpu"):
if "y_train" in test_data:
model.fit(test_data["X_train"], test_data["y_train"])
else:
model.fit(test_data["X_train"])
infer_func = getattr(model, test_data["infer_func"])
infer_func_kwargs = test_data.get("infer_func_kwargs", {})
cuml_output = infer_func(test_data["X_test"], **infer_func_kwargs)
assert_func = test_data["assert_func"]
assert_func(cuml_output, test_data)
def test_pickle_interop(tmp_path, test_data):
pickle_filepath = tmp_path / "model.pickle"
cuEstimator = test_data["cuEstimator"]
if cuEstimator is UMAP:
pytest.skip("UMAP GPU training CPU inference not yet implemented")
model = cuEstimator(**test_data["kwargs"])
with using_device_type("gpu"):
if "y_train" in test_data:
model.fit(test_data["X_train"], test_data["y_train"])
else:
model.fit(test_data["X_train"])
with open(pickle_filepath, "wb") as pf:
pickle.dump(model, pf)
del model
with open(pickle_filepath, "rb") as pf:
pickled_model = pickle.load(pf)
with using_device_type("cpu"):
infer_func = getattr(pickled_model, test_data["infer_func"])
cuml_output = infer_func(test_data["X_test"])
assert_func = test_data["assert_func"]
assert_func(cuml_output, test_data)
@pytest.mark.skip("Hyperparameters defaults understandably different")
@pytest.mark.parametrize(
"estimator",
[
LinearRegression,
LogisticRegression,
Lasso,
ElasticNet,
Ridge,
UMAP,
PCA,
TruncatedSVD,
NearestNeighbors,
],
)
def test_hyperparams_defaults(estimator):
if estimator is UMAP and IS_ARM:
pytest.skip("https://github.com/rapidsai/cuml/issues/5441")
model = estimator()
cu_signature = inspect.signature(model.__init__).parameters
if hasattr(model, "_cpu_estimator_import_path"):
model_path = model._cpu_estimator_import_path
else:
model_path = "sklearn" + model.__class__.__module__[4:]
model_name = model.__class__.__name__
cpu_model = getattr(import_module(model_path), model_name)
cpu_signature = inspect.signature(cpu_model.__init__).parameters
common_hyperparams = list(
set(cu_signature.keys()) & set(cpu_signature.keys())
)
error_msg = "Different default values for hyperparameters:\n"
similar = True
for hyperparam in common_hyperparams:
if (
cu_signature[hyperparam].default
!= cpu_signature[hyperparam].default
):
similar = False
error_msg += (
"\t{} with cuML default :"
"'{}' and CPU default : '{}'"
"\n".format(
hyperparam,
cu_signature[hyperparam].default,
cpu_signature[hyperparam].default,
)
)
if not similar:
raise ValueError(error_msg)
@pytest.mark.parametrize("train_device", ["cpu", "gpu"])
@pytest.mark.parametrize("infer_device", ["cpu", "gpu"])
def test_linreg_methods(train_device, infer_device):
ref_model = skLinearRegression()
ref_model.fit(X_train_reg, y_train_reg)
ref_output = ref_model.score(X_train_reg, y_train_reg)
model = LinearRegression()
with using_device_type(train_device):
model.fit(X_train_reg, y_train_reg)
with using_device_type(infer_device):
output = model.score(X_train_reg, y_train_reg)
tol = 0.01
assert ref_output - tol <= output <= ref_output + tol
@pytest.mark.parametrize("train_device", ["cpu", "gpu"])
@pytest.mark.parametrize("infer_device", ["cpu", "gpu"])
@pytest.mark.parametrize(
"infer_func_name",
["decision_function", "predict_proba", "predict_log_proba", "score"],
)
def test_logreg_methods(train_device, infer_device, infer_func_name):
y_train_logreg = (y_train_reg > np.median(y_train_reg)).astype(np.int32)
ref_model = skLogisticRegression()
ref_model.fit(X_train_reg, y_train_logreg)
infer_func = getattr(ref_model, infer_func_name)
if infer_func_name == "score":
ref_output = infer_func(X_train_reg, y_train_logreg)
else:
ref_output = infer_func(X_test_reg)
model = LogisticRegression()
with using_device_type(train_device):
model.fit(X_train_reg, y_train_logreg)
with using_device_type(infer_device):
infer_func = getattr(model, infer_func_name)
if infer_func_name == "score":
output = infer_func(
X_train_reg.astype(np.float64),
y_train_logreg.astype(np.float64),
)
else:
output = infer_func(X_test_reg.astype(np.float64))
if infer_func_name == "score":
tol = 0.01
assert ref_output - tol <= output <= ref_output + tol
else:
output = to_output_type(output, "numpy")
mask = np.isfinite(output)
np.testing.assert_allclose(
ref_output[mask], output[mask], atol=0.1, rtol=0.15
)
@pytest.mark.parametrize("train_device", ["cpu", "gpu"])
@pytest.mark.parametrize("infer_device", ["cpu", "gpu"])
def test_lasso_methods(train_device, infer_device):
ref_model = skLasso()
ref_model.fit(X_train_reg, y_train_reg)
ref_output = ref_model.score(X_train_reg, y_train_reg)
model = Lasso()
with using_device_type(train_device):
model.fit(X_train_reg, y_train_reg)
with using_device_type(infer_device):
output = model.score(X_train_reg, y_train_reg)
tol = 0.01
assert ref_output - tol <= output <= ref_output + tol
@pytest.mark.parametrize("train_device", ["cpu", "gpu"])
@pytest.mark.parametrize("infer_device", ["cpu", "gpu"])
def test_elasticnet_methods(train_device, infer_device):
ref_model = skElasticNet()
ref_model.fit(X_train_reg, y_train_reg)
ref_output = ref_model.score(X_train_reg, y_train_reg)
model = ElasticNet()
with using_device_type(train_device):
model.fit(X_train_reg, y_train_reg)
with using_device_type(infer_device):
output = model.score(X_train_reg, y_train_reg)
tol = 0.01
assert ref_output - tol <= output <= ref_output + tol
@pytest.mark.parametrize("train_device", ["cpu", "gpu"])
@pytest.mark.parametrize("infer_device", ["cpu", "gpu"])
def test_ridge_methods(train_device, infer_device):
ref_model = skRidge()
ref_model.fit(X_train_reg, y_train_reg)
ref_output = ref_model.score(X_train_reg, y_train_reg)
model = Ridge()
with using_device_type(train_device):
model.fit(X_train_reg, y_train_reg)
with using_device_type(infer_device):
output = model.score(X_train_reg, y_train_reg)
tol = 0.01
assert ref_output - tol <= output <= ref_output + tol
@pytest.mark.parametrize("device", ["cpu", "gpu"])
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_umap_methods(device):
ref_model = refUMAP(n_neighbors=12)
ref_embedding = ref_model.fit_transform(X_train_blob, y_train_blob)
ref_trust = trustworthiness(X_train_blob, ref_embedding, n_neighbors=12)
model = UMAP(n_neighbors=12)
with using_device_type(device):
embedding = model.fit_transform(X_train_blob, y_train_blob)
trust = trustworthiness(X_train_blob, embedding, n_neighbors=12)
tol = 0.02
assert ref_trust - tol <= trust <= ref_trust + tol
@pytest.mark.parametrize("train_device", ["cpu", "gpu"])
@pytest.mark.parametrize("infer_device", ["cpu", "gpu"])
def test_pca_methods(train_device, infer_device):
n, p = 500, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1 + np.array([3, 4, 2, 3, 5])
model = PCA(n_components=3)
with using_device_type(train_device):
transformation = model.fit_transform(X)
with using_device_type(infer_device):
output = model.inverse_transform(transformation)
output = to_output_type(output, "numpy")
np.testing.assert_allclose(X, output, rtol=0.15)
@pytest.mark.parametrize("train_device", ["cpu", "gpu"])
@pytest.mark.parametrize("infer_device", ["cpu", "gpu"])
def test_tsvd_methods(train_device, infer_device):
n, p = 500, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1 + np.array([3, 4, 2, 3, 5])
model = TruncatedSVD(n_components=3)
with using_device_type(train_device):
transformation = model.fit_transform(X)
with using_device_type(infer_device):
output = model.inverse_transform(transformation)
output = to_output_type(output, "numpy")
np.testing.assert_allclose(X, output, rtol=0.15)
@pytest.mark.parametrize("train_device", ["cpu", "gpu"])
@pytest.mark.parametrize("infer_device", ["cpu", "gpu"])
def test_nn_methods(train_device, infer_device):
ref_model = skNearestNeighbors()
ref_model.fit(X_train_blob)
ref_output = ref_model.kneighbors_graph(X_train_blob)
model = NearestNeighbors()
with using_device_type(train_device):
model.fit(X_train_blob)
with using_device_type(infer_device):
output = model.kneighbors_graph(X_train_blob)
ref_output = ref_output.todense()
output = output.todense()
np.testing.assert_allclose(ref_output, output, rtol=0.15)
@pytest.mark.parametrize("train_device", ["cpu", "gpu"])
@pytest.mark.parametrize("infer_device", ["cpu", "gpu"])
def test_hdbscan_methods(train_device, infer_device):
if train_device == "gpu" and infer_device == "cpu":
pytest.skip("Can't transfer attributes to cpu for now")
ref_model = refHDBSCAN(
prediction_data=True,
approx_min_span_tree=False,
max_cluster_size=0,
min_cluster_size=30,
)
ref_trained_labels = ref_model.fit_predict(X_train_blob)
from hdbscan.prediction import (
all_points_membership_vectors as cpu_all_points_membership_vectors,
approximate_predict as cpu_approximate_predict,
)
ref_membership = cpu_all_points_membership_vectors(ref_model)
ref_labels, ref_probs = cpu_approximate_predict(ref_model, X_test_blob)
model = HDBSCAN(
prediction_data=True,
approx_min_span_tree=False,
max_cluster_size=0,
min_cluster_size=30,
)
with using_device_type(train_device):
trained_labels = model.fit_predict(X_train_blob)
with using_device_type(infer_device):
from cuml.cluster.hdbscan.prediction import (
all_points_membership_vectors,
approximate_predict,
)
membership = all_points_membership_vectors(model)
labels, probs = approximate_predict(model, X_test_blob)
assert adjusted_rand_score(trained_labels, ref_trained_labels) >= 0.95
assert_membership_vectors(membership, ref_membership)
assert adjusted_rand_score(labels, ref_labels) >= 0.98
assert array_equal(probs, ref_probs, unit_tol=0.001, total_tol=0.006)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_tsne.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from sklearn.manifold import TSNE as skTSNE
from sklearn import datasets
from sklearn.manifold import trustworthiness
from sklearn.datasets import make_blobs
from sklearn.neighbors import NearestNeighbors
from cuml.manifold import TSNE
from cuml.neighbors import NearestNeighbors as cuKNN
from cuml.metrics import pairwise_distances
from cuml.testing.utils import array_equal, stress_param
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
np = cpu_only_import("numpy")
scipy = cpu_only_import("scipy")
cupyx = gpu_only_import("cupyx")
pytestmark = pytest.mark.filterwarnings(
"ignore:Method 'fft' is " "experimental::"
)
DEFAULT_N_NEIGHBORS = 90
DEFAULT_PERPLEXITY = 30
tsne_datasets = {
"digits": datasets.load_digits(),
}
def validate_embedding(X, Y, score=0.74, n_neighbors=DEFAULT_N_NEIGHBORS):
"""Compares TSNE embedding trustworthiness, NAN and verbosity"""
nans = np.sum(np.isnan(Y))
trust = trustworthiness(X, Y, n_neighbors=n_neighbors)
print("Trust=%s" % trust)
assert trust > score
assert nans == 0
@pytest.mark.parametrize("type_knn_graph", ["cuml", "sklearn"])
@pytest.mark.parametrize("method", ["fft", "barnes_hut"])
def test_tsne_knn_graph_used(test_datasets, type_knn_graph, method):
X = test_datasets.data
neigh = cuKNN(n_neighbors=DEFAULT_N_NEIGHBORS, metric="euclidean").fit(X)
knn_graph = neigh.kneighbors_graph(X, mode="distance").astype("float32")
if type_knn_graph == "cuml":
knn_graph = cupyx.scipy.sparse.csr_matrix(knn_graph)
tsne = TSNE(
random_state=1,
n_neighbors=DEFAULT_N_NEIGHBORS,
method=method,
perplexity=DEFAULT_PERPLEXITY,
learning_rate_method="none",
min_grad_norm=1e-12,
)
# Perform tsne with normal knn_graph
Y = tsne.fit_transform(X, True, knn_graph)
trust_normal = trustworthiness(X, Y, n_neighbors=DEFAULT_N_NEIGHBORS)
X_garbage = np.ones(X.shape)
knn_graph_garbage = neigh.kneighbors_graph(
X_garbage, mode="distance"
).astype("float32")
if type_knn_graph == "cuml":
knn_graph_garbage = cupyx.scipy.sparse.csr_matrix(knn_graph_garbage)
tsne = TSNE(
random_state=1,
n_neighbors=DEFAULT_N_NEIGHBORS,
method=method,
perplexity=DEFAULT_PERPLEXITY,
learning_rate_method="none",
min_grad_norm=1e-12,
)
# Perform tsne with garbage knn_graph
Y = tsne.fit_transform(X, True, knn_graph_garbage)
trust_garbage = trustworthiness(X, Y, n_neighbors=DEFAULT_N_NEIGHBORS)
assert (trust_normal - trust_garbage) > 0.15
Y = tsne.fit_transform(X, True, knn_graph_garbage)
trust_garbage = trustworthiness(X, Y, n_neighbors=DEFAULT_N_NEIGHBORS)
assert (trust_normal - trust_garbage) > 0.15
Y = tsne.fit_transform(X, True, knn_graph_garbage)
trust_garbage = trustworthiness(X, Y, n_neighbors=DEFAULT_N_NEIGHBORS)
assert (trust_normal - trust_garbage) > 0.15
@pytest.mark.parametrize("type_knn_graph", ["cuml", "sklearn"])
@pytest.mark.parametrize("method", ["fft", "barnes_hut"])
def test_tsne_knn_parameters(test_datasets, type_knn_graph, method):
X = test_datasets.data
from sklearn.preprocessing import normalize
X = normalize(X, norm="l1")
neigh = cuKNN(n_neighbors=DEFAULT_N_NEIGHBORS, metric="euclidean").fit(X)
knn_graph = neigh.kneighbors_graph(X, mode="distance").astype("float32")
if type_knn_graph == "cuml":
knn_graph = cupyx.scipy.sparse.csr_matrix(knn_graph)
tsne = TSNE(
n_components=2,
random_state=1,
n_neighbors=DEFAULT_N_NEIGHBORS,
learning_rate_method="none",
method=method,
min_grad_norm=1e-12,
perplexity=DEFAULT_PERPLEXITY,
)
embed = tsne.fit_transform(X, True, knn_graph)
validate_embedding(X, embed)
embed = tsne.fit_transform(X, True, knn_graph.tocoo())
validate_embedding(X, embed)
embed = tsne.fit_transform(X, True, knn_graph.tocsc())
validate_embedding(X, embed)
@pytest.mark.parametrize(
"precomputed_type", ["knn_graph", "tuple", "pairwise"]
)
@pytest.mark.parametrize("sparse_input", [False, True])
def test_tsne_precomputed_knn(precomputed_type, sparse_input):
data, labels = make_blobs(
n_samples=2000, n_features=10, centers=5, random_state=0
)
data = data.astype(np.float32)
if sparse_input:
sparsification = np.random.choice(
[0.0, 1.0], p=[0.1, 0.9], size=data.shape
)
data = np.multiply(data, sparsification)
data = scipy.sparse.csr_matrix(data)
n_neighbors = DEFAULT_N_NEIGHBORS
if precomputed_type == "knn_graph":
nn = NearestNeighbors(n_neighbors=n_neighbors)
nn.fit(data)
precomputed_knn = nn.kneighbors_graph(data, mode="distance")
elif precomputed_type == "tuple":
nn = NearestNeighbors(n_neighbors=n_neighbors)
nn.fit(data)
precomputed_knn = nn.kneighbors(data, return_distance=True)
precomputed_knn = (precomputed_knn[1], precomputed_knn[0])
elif precomputed_type == "pairwise":
precomputed_knn = pairwise_distances(data)
model = TSNE(n_neighbors=n_neighbors, precomputed_knn=precomputed_knn)
embedding = model.fit_transform(data)
trust = trustworthiness(data, embedding, n_neighbors=n_neighbors)
assert trust >= 0.92
@pytest.mark.parametrize("method", ["fft", "barnes_hut"])
def test_tsne(test_datasets, method):
"""
This tests how TSNE handles a lot of input data across time.
(1) Numpy arrays are passed in
(2) Params are changed in the TSNE class
(3) The class gets re-used across time
(4) Trustworthiness is checked
(5) Tests NAN in TSNE output for learning rate explosions
(6) Tests verbosity
"""
X = test_datasets.data
tsne = TSNE(
n_components=2,
random_state=1,
n_neighbors=DEFAULT_N_NEIGHBORS,
learning_rate_method="none",
method=method,
min_grad_norm=1e-12,
perplexity=DEFAULT_PERPLEXITY,
)
Y = tsne.fit_transform(X)
validate_embedding(X, Y)
@pytest.mark.parametrize("nrows", [stress_param(2400000)])
@pytest.mark.parametrize("ncols", [stress_param(225)])
@pytest.mark.parametrize("method", ["fft", "barnes_hut"])
def test_tsne_large(nrows, ncols, method):
"""
This tests how TSNE handles large input
"""
X, y = make_blobs(
n_samples=nrows, centers=8, n_features=ncols, random_state=1
).astype(np.float32)
tsne = TSNE(
random_state=1,
exaggeration_iter=1,
n_iter=2,
method=method,
min_grad_norm=1e-12,
)
Y = tsne.fit_transform(X)
nans = np.sum(np.isnan(Y))
assert nans == 0
def test_components_exception():
with pytest.raises(ValueError):
TSNE(n_components=3)
@pytest.mark.parametrize("input_type", ["cupy", "scipy"])
@pytest.mark.parametrize("method", ["fft", "barnes_hut"])
def test_tsne_fit_transform_on_digits_sparse(input_type, method):
digits = tsne_datasets["digits"].data
if input_type == "cupy":
sp_prefix = cupyx.scipy.sparse
else:
sp_prefix = scipy.sparse
fitter = TSNE(
n_components=2,
random_state=1,
method=method,
min_grad_norm=1e-12,
n_neighbors=DEFAULT_N_NEIGHBORS,
learning_rate_method="none",
perplexity=DEFAULT_PERPLEXITY,
)
new_data = sp_prefix.csr_matrix(scipy.sparse.csr_matrix(digits)).astype(
"float32"
)
embedding = fitter.fit_transform(new_data, convert_dtype=True)
if input_type == "cupy":
embedding = embedding.get()
trust = trustworthiness(digits, embedding, n_neighbors=DEFAULT_N_NEIGHBORS)
assert trust >= 0.85
@pytest.mark.parametrize("type_knn_graph", ["cuml", "sklearn"])
@pytest.mark.parametrize("input_type", ["cupy", "scipy"])
@pytest.mark.parametrize("method", ["fft", "barnes_hut"])
def test_tsne_knn_parameters_sparse(type_knn_graph, input_type, method):
digits = tsne_datasets["digits"].data
neigh = cuKNN(n_neighbors=DEFAULT_N_NEIGHBORS, metric="euclidean").fit(
digits
)
knn_graph = neigh.kneighbors_graph(digits, mode="distance").astype(
"float32"
)
if type_knn_graph == "cuml":
knn_graph = cupyx.scipy.sparse.csr_matrix(knn_graph)
if input_type == "cupy":
sp_prefix = cupyx.scipy.sparse
else:
sp_prefix = scipy.sparse
tsne = TSNE(
n_components=2,
n_neighbors=DEFAULT_N_NEIGHBORS,
random_state=1,
learning_rate_method="none",
method=method,
min_grad_norm=1e-12,
perplexity=DEFAULT_PERPLEXITY,
)
new_data = sp_prefix.csr_matrix(scipy.sparse.csr_matrix(digits))
Y = tsne.fit_transform(new_data, True, knn_graph)
if input_type == "cupy":
Y = Y.get()
validate_embedding(digits, Y, 0.85)
Y = tsne.fit_transform(new_data, True, knn_graph.tocoo())
if input_type == "cupy":
Y = Y.get()
validate_embedding(digits, Y, 0.85)
Y = tsne.fit_transform(new_data, True, knn_graph.tocsc())
if input_type == "cupy":
Y = Y.get()
validate_embedding(digits, Y, 0.85)
@pytest.mark.parametrize(
"metric",
[
"l2",
"euclidean",
"sqeuclidean",
"cityblock",
"l1",
"manhattan",
"minkowski",
"chebyshev",
"cosine",
"correlation",
],
)
def test_tsne_distance_metrics(metric):
data, labels = make_blobs(
n_samples=1000, n_features=64, centers=5, random_state=42
)
tsne = TSNE(
n_components=2,
random_state=1,
n_neighbors=DEFAULT_N_NEIGHBORS,
method="exact",
learning_rate_method="none",
min_grad_norm=1e-12,
perplexity=DEFAULT_PERPLEXITY,
metric=metric,
)
sk_tsne = skTSNE(
n_components=2,
random_state=1,
min_grad_norm=1e-12,
method="exact",
perplexity=DEFAULT_PERPLEXITY,
metric=metric,
)
cuml_embedding = tsne.fit_transform(data)
sk_embedding = sk_tsne.fit_transform(data)
nans = np.sum(np.isnan(cuml_embedding))
cuml_trust = trustworthiness(data, cuml_embedding, metric=metric)
sk_trust = trustworthiness(data, sk_embedding, metric=metric)
assert cuml_trust > 0.85
assert nans == 0
assert array_equal(sk_trust, cuml_trust, 0.05, with_sign=True)
@pytest.mark.parametrize("method", ["fft", "barnes_hut", "exact"])
@pytest.mark.parametrize(
"metric", ["l2", "euclidean", "cityblock", "l1", "manhattan", "cosine"]
)
def test_tsne_distance_metrics_on_sparse_input(method, metric):
data, labels = make_blobs(
n_samples=1000, n_features=64, centers=5, random_state=42
)
data_sparse = scipy.sparse.csr_matrix(data)
cuml_tsne = TSNE(
n_components=2,
random_state=1,
n_neighbors=DEFAULT_N_NEIGHBORS,
method=method,
learning_rate_method="none",
min_grad_norm=1e-12,
perplexity=DEFAULT_PERPLEXITY,
metric=metric,
)
if method == "fft":
sk_tsne = skTSNE(
n_components=2,
random_state=1,
min_grad_norm=1e-12,
method="barnes_hut",
perplexity=DEFAULT_PERPLEXITY,
metric=metric,
init="random",
)
else:
sk_tsne = skTSNE(
n_components=2,
random_state=1,
min_grad_norm=1e-12,
method=method,
perplexity=DEFAULT_PERPLEXITY,
metric=metric,
init="random",
)
cuml_embedding = cuml_tsne.fit_transform(data_sparse)
nans = np.sum(np.isnan(cuml_embedding))
sk_embedding = sk_tsne.fit_transform(data_sparse)
cu_trust = trustworthiness(data, cuml_embedding, metric=metric)
sk_trust = trustworthiness(data, sk_embedding, metric=metric)
assert cu_trust > 0.85
assert nans == 0
assert array_equal(sk_trust, cu_trust, 0.06, with_sign=True)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_label_encoder.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuml.common.exceptions import NotFittedError
import pytest
from cuml.internals.safe_imports import cpu_only_import
from cuml.preprocessing.LabelEncoder import LabelEncoder
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
def _df_to_similarity_mat(df):
arr = df.to_numpy().reshape(1, -1)
return np.pad(arr, [(arr.shape[1] - 1, 0), (0, 0)], "edge")
@pytest.mark.parametrize("length", [10, 1000])
@pytest.mark.parametrize("cardinality", [5, 10, 50])
def test_labelencoder_fit_transform(length, cardinality):
"""Try encoding the entire df"""
df = cudf.Series(np.random.choice(cardinality, (length,)))
encoded = LabelEncoder().fit_transform(df)
df_arr = _df_to_similarity_mat(df)
encoded_arr = _df_to_similarity_mat(encoded)
assert ((encoded_arr == encoded_arr.T) == (df_arr == df_arr.T)).all()
@pytest.mark.parametrize("length", [10, 100, 1000])
@pytest.mark.parametrize("cardinality", [5, 10, 50])
def test_labelencoder_transform(length, cardinality):
"""Try fitting and then encoding a small subset of the df"""
df = cudf.Series(np.random.choice(cardinality, (length,)))
le = LabelEncoder().fit(df)
assert le._fitted
subset = df.iloc[0 : df.shape[0] // 2]
encoded = le.transform(subset)
subset_arr = _df_to_similarity_mat(subset)
encoded_arr = _df_to_similarity_mat(encoded)
assert (
(encoded_arr == encoded_arr.T) == (subset_arr == subset_arr.T)
).all()
def test_labelencoder_unseen():
"""Try encoding a value that was not present during fitting"""
df = cudf.Series(np.random.choice(10, (10,)))
le = LabelEncoder().fit(df)
assert le._fitted
with pytest.raises(KeyError):
le.transform(cudf.Series([-1]))
def test_labelencoder_unfitted():
"""Try calling `.transform()` without fitting first"""
df = cudf.Series(np.random.choice(10, (10,)))
le = LabelEncoder()
assert not le._fitted
with pytest.raises(NotFittedError):
le.transform(df)
@pytest.mark.parametrize("use_fit_transform", [False, True])
@pytest.mark.parametrize(
"orig_label, ord_label, expected_reverted, bad_ord_label",
[
(
cudf.Series(["a", "b", "c"]),
cudf.Series([2, 1, 2, 0]),
cudf.Series(["c", "b", "c", "a"]),
cudf.Series([-1, 1, 2, 0]),
),
(
cudf.Series(["Tokyo", "Paris", "Austin"]),
cudf.Series([0, 2, 0]),
cudf.Series(["Austin", "Tokyo", "Austin"]),
cudf.Series([0, 1, 2, 3]),
),
(
cudf.Series(["a", "b", "c1"]),
cudf.Series([2, 1]),
cudf.Series(["c1", "b"]),
cudf.Series([0, 1, 2, 3]),
),
(
cudf.Series(["1.09", "0.09", ".09", "09"]),
cudf.Series([0, 1, 2, 3]),
cudf.Series([".09", "0.09", "09", "1.09"]),
cudf.Series([0, 1, 2, 3, 4]),
),
],
)
def test_inverse_transform(
orig_label, ord_label, expected_reverted, bad_ord_label, use_fit_transform
):
# prepare LabelEncoder
le = LabelEncoder()
if use_fit_transform:
le.fit_transform(orig_label)
else:
le.fit(orig_label)
assert le._fitted is True
# test if inverse_transform is correct
reverted = le.inverse_transform(ord_label)
assert len(reverted) == len(expected_reverted)
assert len(reverted) == len(reverted[reverted == expected_reverted])
# test if correctly raies ValueError
with pytest.raises(ValueError, match="y contains previously unseen label"):
le.inverse_transform(bad_ord_label)
def test_unfitted_inverse_transform():
"""Try calling `.inverse_transform()` without fitting first"""
df = cudf.Series(np.random.choice(10, (10,)))
le = LabelEncoder()
assert not le._fitted
with pytest.raises(NotFittedError):
le.transform(df)
@pytest.mark.parametrize(
"empty, ord_label", [(cudf.Series([]), cudf.Series([2, 1]))]
)
def test_empty_input(empty, ord_label):
# prepare LabelEncoder
le = LabelEncoder()
le.fit(empty)
assert le._fitted is True
# test if correctly raies ValueError
with pytest.raises(ValueError, match="y contains previously unseen label"):
le.inverse_transform(ord_label)
# check fit_transform()
le = LabelEncoder()
transformed = le.fit_transform(empty)
assert le._fitted is True
assert len(transformed) == 0
def test_masked_encode():
int_values = [3, 1, 1, 2, 1, 1, 1, 1, 6, 5]
cat_values = ["a", "d", "b", "c", "d", "d", "d", "c", "b", "c"]
df = cudf.DataFrame({"filter_col": int_values, "cat_col": cat_values})
df_filter = df[df["filter_col"] == 1]
df_filter["cat_col"] = LabelEncoder().fit_transform(df_filter["cat_col"])
filtered_int_values = [
int_values[i] for i in range(len(int_values)) if int_values[i] == 1
]
filtered_cat_values = [
cat_values[i] for i in range(len(int_values)) if int_values[i] == 1
]
df_test = cudf.DataFrame(
{"filter_col": filtered_int_values, "cat_col": filtered_cat_values}
)
df_test["cat_col"] = LabelEncoder().fit_transform(df_test["cat_col"])
assert (df_filter["cat_col"].values == df_test["cat_col"].values).all()
def _array_to_similarity_mat(x):
arr = x.reshape(1, -1)
return np.pad(arr, [(arr.shape[1] - 1, 0), (0, 0)], "edge")
@pytest.mark.parametrize("length", [10, 1000])
@pytest.mark.parametrize("cardinality", [5, 10, 50])
@pytest.mark.parametrize("dtype", ["cupy", "numpy"])
def test_labelencoder_fit_transform_cupy_numpy(length, cardinality, dtype):
"""Try encoding the cupy array"""
x = cp.random.choice(cardinality, (length,))
if dtype == "numpy":
x = x.get()
encoded = LabelEncoder().fit_transform(x)
x_arr = _array_to_similarity_mat(x)
encoded_arr = _array_to_similarity_mat(encoded.values)
if dtype == "numpy":
encoded_arr = encoded_arr.get()
assert ((encoded_arr == encoded_arr.T) == (x == x_arr.T)).all()
@pytest.mark.parametrize("use_fit_transform", [False, True])
@pytest.mark.parametrize(
"orig_label, ord_label, expected_reverted, bad_ord_label",
[
(
cp.array([7, 5, 3, 1]),
cp.array([2, 1, 2, 3, 0]),
cp.array([5, 3, 5, 7, 1]),
cp.array([0, 1, 2, 3, 4]),
),
(
np.array([1.09, 0.09, 0.09, 0.09]),
np.array([1, 1, 0, 0, 1]),
cp.array([1.09, 1.09, 0.09, 0.09, 1.09]),
np.array([0, 1, 1, 1, 2]),
),
],
)
def test_inverse_transform_cupy_numpy(
orig_label, ord_label, expected_reverted, bad_ord_label, use_fit_transform
):
# prepare LabelEncoder
le = LabelEncoder()
if use_fit_transform:
le.fit_transform(orig_label)
else:
le.fit(orig_label)
assert le._fitted is True
# test if inverse_transform is correct
reverted = le.inverse_transform(ord_label)
assert len(reverted) == len(expected_reverted)
assert len(reverted) == len(reverted[reverted == expected_reverted])
# test if correctly raies ValueError
with pytest.raises(ValueError, match="y contains previously unseen label"):
le.inverse_transform(bad_ord_label)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_umap.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Please install UMAP before running the code
# use 'conda install -c conda-forge umap-learn' command to install it
import platform
import pytest
import copy
import joblib
from sklearn.metrics import adjusted_rand_score
from sklearn.manifold import trustworthiness
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn import datasets
from cuml.internals import logger
from cuml.metrics import pairwise_distances
from cuml.testing.utils import (
array_equal,
unit_param,
quality_param,
stress_param,
)
from cuml.manifold.umap import UMAP as cuUMAP
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
scipy_sparse = cpu_only_import("scipy.sparse")
IS_ARM = platform.processor() == "aarch64"
if not IS_ARM:
import umap
dataset_names = ["iris", "digits", "wine", "blobs"]
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"n_feats", [unit_param(20), quality_param(100), stress_param(1000)]
)
def test_blobs_cluster(nrows, n_feats):
data, labels = datasets.make_blobs(
n_samples=nrows, n_features=n_feats, centers=5, random_state=0
)
embedding = cuUMAP().fit_transform(data, convert_dtype=True)
if nrows < 500000:
score = adjusted_rand_score(labels, KMeans(5).fit_predict(embedding))
assert score == 1.0
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"n_feats", [unit_param(10), quality_param(100), stress_param(1000)]
)
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_umap_fit_transform_score(nrows, n_feats):
n_samples = nrows
n_features = n_feats
data, labels = make_blobs(
n_samples=n_samples, n_features=n_features, centers=10, random_state=42
)
model = umap.UMAP(n_neighbors=10, min_dist=0.1)
cuml_model = cuUMAP(n_neighbors=10, min_dist=0.01)
embedding = model.fit_transform(data)
cuml_embedding = cuml_model.fit_transform(data, convert_dtype=True)
assert not np.isnan(embedding).any()
assert not np.isnan(cuml_embedding).any()
if nrows < 500000:
cuml_score = adjusted_rand_score(
labels, KMeans(10).fit_predict(cuml_embedding)
)
score = adjusted_rand_score(labels, KMeans(10).fit_predict(embedding))
assert array_equal(score, cuml_score, 1e-2, with_sign=True)
def test_supervised_umap_trustworthiness_on_iris():
iris = datasets.load_iris()
data = iris.data
embedding = cuUMAP(
n_neighbors=10, random_state=0, min_dist=0.01
).fit_transform(data, iris.target, convert_dtype=True)
trust = trustworthiness(iris.data, embedding, n_neighbors=10)
assert trust >= 0.97
def test_semisupervised_umap_trustworthiness_on_iris():
iris = datasets.load_iris()
data = iris.data
target = iris.target.copy()
target[25:75] = -1
embedding = cuUMAP(
n_neighbors=10, random_state=0, min_dist=0.01
).fit_transform(data, target, convert_dtype=True)
trust = trustworthiness(iris.data, embedding, n_neighbors=10)
assert trust >= 0.97
def test_umap_trustworthiness_on_iris():
iris = datasets.load_iris()
data = iris.data
embedding = cuUMAP(
n_neighbors=10, min_dist=0.01, random_state=0
).fit_transform(data, convert_dtype=True)
trust = trustworthiness(iris.data, embedding, n_neighbors=10)
assert trust >= 0.97
@pytest.mark.parametrize("target_metric", ["categorical", "euclidean"])
def test_umap_transform_on_iris(target_metric):
iris = datasets.load_iris()
iris_selection = np.random.RandomState(42).choice(
[True, False], 150, replace=True, p=[0.75, 0.25]
)
data = iris.data[iris_selection]
fitter = cuUMAP(
n_neighbors=10,
init="random",
n_epochs=800,
min_dist=0.01,
random_state=42,
target_metric=target_metric,
)
fitter.fit(data, convert_dtype=True)
new_data = iris.data[~iris_selection]
embedding = fitter.transform(new_data, convert_dtype=True)
assert not np.isnan(embedding).any()
trust = trustworthiness(new_data, embedding, n_neighbors=10)
assert trust >= 0.85
@pytest.mark.parametrize("input_type", ["cupy", "scipy"])
@pytest.mark.parametrize("xform_method", ["fit", "fit_transform"])
@pytest.mark.parametrize("target_metric", ["categorical", "euclidean"])
def test_umap_transform_on_digits_sparse(
target_metric, input_type, xform_method
):
digits = datasets.load_digits()
digits_selection = np.random.RandomState(42).choice(
[True, False], 1797, replace=True, p=[0.75, 0.25]
)
if input_type == "cupy":
sp_prefix = cupyx.scipy.sparse
else:
sp_prefix = scipy_sparse
data = sp_prefix.csr_matrix(
scipy_sparse.csr_matrix(digits.data[digits_selection])
)
fitter = cuUMAP(
n_neighbors=15,
verbose=logger.level_info,
init="random",
n_epochs=0,
min_dist=0.01,
random_state=42,
target_metric=target_metric,
)
new_data = sp_prefix.csr_matrix(
scipy_sparse.csr_matrix(digits.data[~digits_selection])
)
if xform_method == "fit":
fitter.fit(data, convert_dtype=True)
embedding = fitter.transform(new_data, convert_dtype=True)
else:
embedding = fitter.fit_transform(new_data, convert_dtype=True)
if input_type == "cupy":
embedding = embedding.get()
trust = trustworthiness(
digits.data[~digits_selection], embedding, n_neighbors=15
)
assert trust >= 0.96
@pytest.mark.parametrize("target_metric", ["categorical", "euclidean"])
def test_umap_transform_on_digits(target_metric):
digits = datasets.load_digits()
digits_selection = np.random.RandomState(42).choice(
[True, False], 1797, replace=True, p=[0.75, 0.25]
)
data = digits.data[digits_selection]
fitter = cuUMAP(
n_neighbors=15,
verbose=logger.level_debug,
init="random",
n_epochs=0,
min_dist=0.01,
random_state=42,
target_metric=target_metric,
)
fitter.fit(data, convert_dtype=True)
new_data = digits.data[~digits_selection]
embedding = fitter.transform(new_data, convert_dtype=True)
trust = trustworthiness(
digits.data[~digits_selection], embedding, n_neighbors=15
)
assert trust >= 0.96
@pytest.mark.parametrize("target_metric", ["categorical", "euclidean"])
@pytest.mark.parametrize("name", dataset_names)
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_umap_fit_transform_trust(name, target_metric):
if name == "iris":
iris = datasets.load_iris()
data = iris.data
labels = iris.target
elif name == "digits":
digits = datasets.load_digits(n_class=5)
data = digits.data
labels = digits.target
elif name == "wine":
wine = datasets.load_wine()
data = wine.data
labels = wine.target
else:
data, labels = make_blobs(
n_samples=500, n_features=10, centers=10, random_state=42
)
model = umap.UMAP(
n_neighbors=10, min_dist=0.01, target_metric=target_metric
)
cuml_model = cuUMAP(
n_neighbors=10, min_dist=0.01, target_metric=target_metric
)
embedding = model.fit_transform(data)
cuml_embedding = cuml_model.fit_transform(data, convert_dtype=True)
trust = trustworthiness(data, embedding, n_neighbors=10)
cuml_trust = trustworthiness(data, cuml_embedding, n_neighbors=10)
assert array_equal(trust, cuml_trust, 1e-1, with_sign=True)
@pytest.mark.parametrize("target_metric", ["categorical", "euclidean"])
@pytest.mark.parametrize("name", [unit_param("digits")])
@pytest.mark.parametrize("nrows", [quality_param(5000), stress_param(500000)])
@pytest.mark.parametrize("n_feats", [quality_param(100), stress_param(1000)])
@pytest.mark.parametrize("should_downcast", [True])
@pytest.mark.parametrize("input_type", ["dataframe", "ndarray"])
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_umap_data_formats(
input_type, should_downcast, nrows, n_feats, name, target_metric
):
dtype = np.float32 if not should_downcast else np.float64
n_samples = nrows
n_feats = n_feats
if name == "digits":
# use the digits dataset for unit test
digits = datasets.load_digits(n_class=9)
X = digits["data"].astype(dtype)
else:
X, y = datasets.make_blobs(
n_samples=n_samples, n_features=n_feats, random_state=0
)
umap = cuUMAP(n_neighbors=3, n_components=2, target_metric=target_metric)
embeds = umap.fit_transform(X)
assert type(embeds) == np.ndarray
@pytest.mark.parametrize("target_metric", ["categorical", "euclidean"])
@pytest.mark.filterwarnings("ignore:(.*)connected(.*):UserWarning:sklearn[.*]")
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_umap_fit_transform_score_default(target_metric):
n_samples = 500
n_features = 20
data, labels = make_blobs(
n_samples=n_samples, n_features=n_features, centers=10, random_state=42
)
model = umap.UMAP(target_metric=target_metric)
cuml_model = cuUMAP(target_metric=target_metric)
embedding = model.fit_transform(data)
cuml_embedding = cuml_model.fit_transform(data, convert_dtype=True)
cuml_score = adjusted_rand_score(
labels, KMeans(10).fit_predict(cuml_embedding)
)
score = adjusted_rand_score(labels, KMeans(10).fit_predict(embedding))
assert array_equal(score, cuml_score, 1e-2, with_sign=True)
def test_umap_fit_transform_against_fit_and_transform():
n_samples = 500
n_features = 20
data, labels = make_blobs(
n_samples=n_samples, n_features=n_features, centers=10, random_state=42
)
"""
First test the default option does not hash the input
"""
cuml_model = cuUMAP()
ft_embedding = cuml_model.fit_transform(data, convert_dtype=True)
fit_embedding_same_input = cuml_model.transform(data, convert_dtype=True)
assert joblib.hash(ft_embedding) != joblib.hash(fit_embedding_same_input)
"""
Next, test explicitly enabling feature hashes the input
"""
cuml_model = cuUMAP(hash_input=True)
ft_embedding = cuml_model.fit_transform(data, convert_dtype=True)
fit_embedding_same_input = cuml_model.transform(data, convert_dtype=True)
assert joblib.hash(ft_embedding) == joblib.hash(fit_embedding_same_input)
fit_embedding_diff_input = cuml_model.transform(
data[1:], convert_dtype=True
)
assert joblib.hash(ft_embedding) != joblib.hash(fit_embedding_diff_input)
@pytest.mark.parametrize(
"n_components,random_state",
[
unit_param(2, None),
unit_param(2, 8),
unit_param(2, np.random.RandomState(42)),
unit_param(21, None),
unit_param(21, np.random.RandomState(42)),
unit_param(25, 8),
unit_param(50, None),
stress_param(50, 8),
],
)
def test_umap_fit_transform_reproducibility(n_components, random_state):
n_samples = 8000
n_features = 200
if random_state is None:
n_components *= 2
data, labels = make_blobs(
n_samples=n_samples, n_features=n_features, centers=10, random_state=42
)
def get_embedding(n_components, random_state):
reducer = cuUMAP(
init="random", n_components=n_components, random_state=random_state
)
return reducer.fit_transform(data, convert_dtype=True)
state = copy.copy(random_state)
cuml_embedding1 = get_embedding(n_components, state)
state = copy.copy(random_state)
cuml_embedding2 = get_embedding(n_components, state)
assert not np.isnan(cuml_embedding1).any()
assert not np.isnan(cuml_embedding2).any()
# Reproducibility threshold raised until intermittent failure is fixed
# Ref: https://github.com/rapidsai/cuml/issues/1903
mean_diff = np.mean(np.abs(cuml_embedding1 - cuml_embedding2))
if random_state is not None:
assert mean_diff == 0.0
else:
assert mean_diff > 0.5
@pytest.mark.parametrize(
"n_components,random_state",
[
unit_param(2, None),
unit_param(2, 8),
unit_param(2, np.random.RandomState(42)),
unit_param(21, None),
unit_param(25, 8),
unit_param(25, np.random.RandomState(42)),
unit_param(50, None),
stress_param(50, 8),
],
)
def test_umap_transform_reproducibility(n_components, random_state):
n_samples = 5000
n_features = 200
if random_state is None:
n_components *= 2
data, labels = make_blobs(
n_samples=n_samples, n_features=n_features, centers=10, random_state=42
)
selection = np.random.RandomState(42).choice(
[True, False], n_samples, replace=True, p=[0.5, 0.5]
)
fit_data = data[selection]
transform_data = data[~selection]
def get_embedding(n_components, random_state):
reducer = cuUMAP(
init="random", n_components=n_components, random_state=random_state
)
reducer.fit(fit_data, convert_dtype=True)
return reducer.transform(transform_data, convert_dtype=True)
state = copy.copy(random_state)
cuml_embedding1 = get_embedding(n_components, state)
state = copy.copy(random_state)
cuml_embedding2 = get_embedding(n_components, state)
assert not np.isnan(cuml_embedding1).any()
assert not np.isnan(cuml_embedding2).any()
# Reproducibility threshold raised until intermittent failure is fixed
# Ref: https://github.com/rapidsai/cuml/issues/1903
mean_diff = np.mean(np.abs(cuml_embedding1 - cuml_embedding2))
if random_state is not None:
assert mean_diff == 0.0
else:
assert mean_diff > 0.5
def test_umap_fit_transform_trustworthiness_with_consistency_enabled():
iris = datasets.load_iris()
data = iris.data
algo = cuUMAP(
n_neighbors=10, min_dist=0.01, init="random", random_state=42
)
embedding = algo.fit_transform(data, convert_dtype=True)
trust = trustworthiness(iris.data, embedding, n_neighbors=10)
assert trust >= 0.97
def test_umap_transform_trustworthiness_with_consistency_enabled():
iris = datasets.load_iris()
data = iris.data
selection = np.random.RandomState(42).choice(
[True, False], data.shape[0], replace=True, p=[0.5, 0.5]
)
fit_data = data[selection]
transform_data = data[~selection]
model = cuUMAP(
n_neighbors=10, min_dist=0.01, init="random", random_state=42
)
model.fit(fit_data, convert_dtype=True)
embedding = model.transform(transform_data, convert_dtype=True)
trust = trustworthiness(transform_data, embedding, n_neighbors=10)
assert trust >= 0.92
@pytest.mark.filterwarnings("ignore:(.*)zero(.*)::scipy[.*]|umap[.*]")
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_exp_decay_params():
def compare_exp_decay_params(a=None, b=None, min_dist=0.1, spread=1.0):
cuml_model = cuUMAP(a=a, b=b, min_dist=min_dist, spread=spread)
state = cuml_model.__getstate__()
cuml_a, cuml_b = state["a"], state["b"]
skl_model = umap.UMAP(a=a, b=b, min_dist=min_dist, spread=spread)
skl_model.fit(np.zeros((1, 1)))
sklearn_a, sklearn_b = skl_model._a, skl_model._b
assert abs(cuml_a) - abs(sklearn_a) < 1e-6
assert abs(cuml_b) - abs(sklearn_b) < 1e-6
compare_exp_decay_params(min_dist=0.1, spread=1.0)
compare_exp_decay_params(a=0.5, b=2.0)
compare_exp_decay_params(a=0.5)
compare_exp_decay_params(b=0.5)
compare_exp_decay_params(min_dist=0.1, spread=10.0)
@pytest.mark.parametrize("n_neighbors", [5, 15])
def test_umap_knn_graph(n_neighbors):
data, labels = datasets.make_blobs(
n_samples=2000, n_features=10, centers=5, random_state=0
)
data = data.astype(np.float32)
def fit_transform_embed(knn_graph=None):
model = cuUMAP(random_state=42, init="random", n_neighbors=n_neighbors)
return model.fit_transform(
data, knn_graph=knn_graph, convert_dtype=True
)
def transform_embed(knn_graph=None):
model = cuUMAP(random_state=42, init="random", n_neighbors=n_neighbors)
model.fit(data, knn_graph=knn_graph, convert_dtype=True)
return model.transform(data, convert_dtype=True)
def test_trustworthiness(embedding):
trust = trustworthiness(data, embedding, n_neighbors=n_neighbors)
assert trust >= 0.92
def test_equality(e1, e2):
mean_diff = np.mean(np.abs(e1 - e2))
print("mean diff: %s" % mean_diff)
assert mean_diff < 1.0
neigh = NearestNeighbors(n_neighbors=n_neighbors)
neigh.fit(data)
knn_graph = neigh.kneighbors_graph(data, mode="distance")
embedding1 = fit_transform_embed(None)
embedding2 = fit_transform_embed(knn_graph.tocsr())
embedding3 = fit_transform_embed(knn_graph.tocoo())
embedding4 = fit_transform_embed(knn_graph.tocsc())
embedding5 = transform_embed(knn_graph.tocsr())
embedding6 = transform_embed(knn_graph.tocoo())
embedding7 = transform_embed(knn_graph.tocsc())
test_trustworthiness(embedding1)
test_trustworthiness(embedding2)
test_trustworthiness(embedding3)
test_trustworthiness(embedding4)
test_trustworthiness(embedding5)
test_trustworthiness(embedding6)
test_trustworthiness(embedding7)
test_equality(embedding2, embedding3)
test_equality(embedding3, embedding4)
test_equality(embedding5, embedding6)
test_equality(embedding6, embedding7)
@pytest.mark.parametrize(
"precomputed_type", ["knn_graph", "tuple", "pairwise"]
)
@pytest.mark.parametrize("sparse_input", [False, True])
def test_umap_precomputed_knn(precomputed_type, sparse_input):
data, labels = make_blobs(
n_samples=2000, n_features=10, centers=5, random_state=0
)
data = data.astype(np.float32)
if sparse_input:
sparsification = np.random.choice(
[0.0, 1.0], p=[0.1, 0.9], size=data.shape
)
data = np.multiply(data, sparsification)
data = scipy_sparse.csr_matrix(data)
n_neighbors = 8
if precomputed_type == "knn_graph":
nn = NearestNeighbors(n_neighbors=n_neighbors)
nn.fit(data)
precomputed_knn = nn.kneighbors_graph(data, mode="distance")
elif precomputed_type == "tuple":
nn = NearestNeighbors(n_neighbors=n_neighbors)
nn.fit(data)
precomputed_knn = nn.kneighbors(data, return_distance=True)
precomputed_knn = (precomputed_knn[1], precomputed_knn[0])
elif precomputed_type == "pairwise":
precomputed_knn = pairwise_distances(data)
model = cuUMAP(n_neighbors=n_neighbors, precomputed_knn=precomputed_knn)
embedding = model.fit_transform(data)
trust = trustworthiness(data, embedding, n_neighbors=n_neighbors)
assert trust >= 0.92
def correctness_sparse(a, b, atol=0.1, rtol=0.2, threshold=0.95):
n_ref_zeros = (a == 0).sum()
n_ref_non_zero_elms = a.size - n_ref_zeros
n_correct = (cp.abs(a - b) <= (atol + rtol * cp.abs(b))).sum()
correctness = (n_correct - n_ref_zeros) / n_ref_non_zero_elms
return correctness >= threshold
@pytest.mark.parametrize("n_rows", [200, 800])
@pytest.mark.parametrize("n_features", [8, 32])
@pytest.mark.parametrize("n_neighbors", [8, 16])
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_fuzzy_simplicial_set(n_rows, n_features, n_neighbors):
n_clusters = 30
random_state = 42
X, _ = make_blobs(
n_samples=n_rows,
centers=n_clusters,
n_features=n_features,
random_state=random_state,
)
model = cuUMAP(n_neighbors=n_neighbors)
model.fit(X)
cu_fss_graph = model.graph_
model = umap.UMAP(n_neighbors=n_neighbors)
model.fit(X)
ref_fss_graph = model.graph_
cu_fss_graph = cu_fss_graph.todense()
ref_fss_graph = cp.sparse.coo_matrix(ref_fss_graph).todense()
assert correctness_sparse(
ref_fss_graph, cu_fss_graph, atol=0.1, rtol=0.2, threshold=0.95
)
@pytest.mark.parametrize(
"metric,supported",
[
("l2", True),
("euclidean", True),
("sqeuclidean", True),
("l1", True),
("manhattan", True),
("minkowski", True),
("chebyshev", True),
("cosine", True),
("correlation", True),
("jaccard", False),
("hamming", True),
("canberra", True),
],
)
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_umap_distance_metrics_fit_transform_trust(metric, supported):
data, labels = make_blobs(
n_samples=1000, n_features=64, centers=5, random_state=42
)
if metric == "jaccard":
data = data >= 0
umap_model = umap.UMAP(
n_neighbors=10, min_dist=0.01, metric=metric, init="random"
)
cuml_model = cuUMAP(
n_neighbors=10, min_dist=0.01, metric=metric, init="random"
)
if not supported:
with pytest.raises(NotImplementedError):
cuml_model.fit_transform(data)
return
umap_embedding = umap_model.fit_transform(data)
cuml_embedding = cuml_model.fit_transform(data)
umap_trust = trustworthiness(
data, umap_embedding, n_neighbors=10, metric=metric
)
cuml_trust = trustworthiness(
data, cuml_embedding, n_neighbors=10, metric=metric
)
assert array_equal(umap_trust, cuml_trust, 0.05, with_sign=True)
@pytest.mark.parametrize(
"metric,supported,umap_learn_supported",
[
("l2", True, False),
("euclidean", True, True),
("sqeuclidean", True, False),
("l1", True, True),
("manhattan", True, True),
("minkowski", True, True),
("chebyshev", True, True),
("cosine", True, True),
("correlation", True, True),
("jaccard", True, True),
("hamming", True, True),
("canberra", True, True),
],
)
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_umap_distance_metrics_fit_transform_trust_on_sparse_input(
metric, supported, umap_learn_supported
):
data, labels = make_blobs(
n_samples=1000, n_features=64, centers=5, random_state=42
)
data_selection = np.random.RandomState(42).choice(
[True, False], 1000, replace=True, p=[0.75, 0.25]
)
if metric == "jaccard":
data = data >= 0
new_data = scipy_sparse.csr_matrix(data[~data_selection])
if umap_learn_supported:
umap_model = umap.UMAP(
n_neighbors=10, min_dist=0.01, metric=metric, init="random"
)
umap_embedding = umap_model.fit_transform(new_data)
umap_trust = trustworthiness(
data[~data_selection],
umap_embedding,
n_neighbors=10,
metric=metric,
)
cuml_model = cuUMAP(
n_neighbors=10, min_dist=0.01, metric=metric, init="random"
)
if not supported:
with pytest.raises(NotImplementedError):
cuml_model.fit_transform(new_data)
return
cuml_embedding = cuml_model.fit_transform(new_data)
cuml_trust = trustworthiness(
data[~data_selection], cuml_embedding, n_neighbors=10, metric=metric
)
if umap_learn_supported:
assert array_equal(umap_trust, cuml_trust, 0.05, with_sign=True)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_dbscan.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import pairwise_distances
from sklearn.datasets import make_blobs
from sklearn.cluster import DBSCAN as skDBSCAN
from cuml.testing.utils import (
get_pattern,
unit_param,
quality_param,
stress_param,
array_equal,
assert_dbscan_equal,
)
from cuml import DBSCAN as cuDBSCAN
from cuml.testing.utils import get_handle
import pytest
from cuml.internals.safe_imports import cpu_only_import_from
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
assert_raises = cpu_only_import_from("numpy.testing", "assert_raises")
@pytest.mark.parametrize("max_mbytes_per_batch", [1e3, None])
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"ncols", [unit_param(20), quality_param(100), stress_param(1000)]
)
@pytest.mark.parametrize(
"out_dtype",
[
unit_param("int32"),
unit_param(np.int32),
unit_param("int64"),
unit_param(np.int64),
quality_param("int32"),
stress_param("int32"),
],
)
def test_dbscan(
datatype, use_handle, nrows, ncols, max_mbytes_per_batch, out_dtype
):
if nrows == 500000 and pytest.max_gpu_memory < 32:
if pytest.adapt_stress_test:
nrows = nrows * pytest.max_gpu_memory // 32
else:
pytest.skip(
"Insufficient GPU memory for this test. "
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
n_samples = nrows
n_feats = ncols
X, y = make_blobs(
n_samples=n_samples,
cluster_std=0.01,
n_features=n_feats,
random_state=0,
)
handle, stream = get_handle(use_handle)
eps = 1
cuml_dbscan = cuDBSCAN(
handle=handle,
eps=eps,
min_samples=2,
max_mbytes_per_batch=max_mbytes_per_batch,
output_type="numpy",
)
cu_labels = cuml_dbscan.fit_predict(X, out_dtype=out_dtype)
if nrows < 500000:
sk_dbscan = skDBSCAN(eps=1, min_samples=2, algorithm="brute")
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
if out_dtype == "int32" or out_dtype == np.int32:
assert cu_labels.dtype == np.int32
elif out_dtype == "int64" or out_dtype == np.int64:
assert cu_labels.dtype == np.int64
@pytest.mark.parametrize(
"max_mbytes_per_batch",
[unit_param(1), quality_param(1e2), stress_param(None)],
)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(10000)]
)
@pytest.mark.parametrize("out_dtype", ["int32", "int64"])
def test_dbscan_precomputed(datatype, nrows, max_mbytes_per_batch, out_dtype):
# 2-dimensional dataset for easy distance matrix computation
X, y = make_blobs(
n_samples=nrows, cluster_std=0.01, n_features=2, random_state=0
)
# Precompute distances
X_dist = pairwise_distances(X).astype(datatype)
eps = 1
cuml_dbscan = cuDBSCAN(
eps=eps,
min_samples=2,
metric="precomputed",
max_mbytes_per_batch=max_mbytes_per_batch,
output_type="numpy",
)
cu_labels = cuml_dbscan.fit_predict(X_dist, out_dtype=out_dtype)
sk_dbscan = skDBSCAN(
eps=eps, min_samples=2, metric="precomputed", algorithm="brute"
)
sk_labels = sk_dbscan.fit_predict(X_dist)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
@pytest.mark.parametrize(
"max_mbytes_per_batch",
[unit_param(1), quality_param(1e2), stress_param(None)],
)
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(10000)]
)
@pytest.mark.parametrize("out_dtype", ["int32", "int64"])
def test_dbscan_cosine(nrows, max_mbytes_per_batch, out_dtype):
# 2-dimensional dataset for easy distance matrix computation
X, y = make_blobs(
n_samples=nrows, cluster_std=0.01, n_features=2, random_state=0
)
eps = 0.1
cuml_dbscan = cuDBSCAN(
eps=eps,
min_samples=5,
metric="cosine",
max_mbytes_per_batch=max_mbytes_per_batch,
output_type="numpy",
)
cu_labels = cuml_dbscan.fit_predict(X, out_dtype=out_dtype)
sk_dbscan = skDBSCAN(
eps=eps, min_samples=5, metric="cosine", algorithm="brute"
)
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
@pytest.mark.parametrize("name", ["noisy_moons", "blobs", "no_structure"])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
# Vary the eps to get a range of core point counts
@pytest.mark.parametrize("eps", [0.05, 0.1, 0.5])
def test_dbscan_sklearn_comparison(name, nrows, eps):
if nrows == 500000 and name == "blobs" and pytest.max_gpu_memory < 32:
if pytest.adapt_stress_test:
nrows = nrows * pytest.max_gpu_memory // 32
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
default_base = {
"quantile": 0.2,
"eps": eps,
"damping": 0.9,
"preference": -200,
"n_neighbors": 10,
"n_clusters": 2,
}
n_samples = nrows
pat = get_pattern(name, n_samples)
params = default_base.copy()
params.update(pat[1])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cuml_dbscan = cuDBSCAN(eps=eps, min_samples=5, output_type="numpy")
cu_labels = cuml_dbscan.fit_predict(X)
if nrows < 500000:
sk_dbscan = skDBSCAN(eps=eps, min_samples=5)
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
@pytest.mark.parametrize("name", ["noisy_moons", "blobs", "no_structure"])
def test_dbscan_default(name):
default_base = {
"quantile": 0.3,
"eps": 0.5,
"damping": 0.9,
"preference": -200,
"n_neighbors": 10,
"n_clusters": 2,
}
n_samples = 500
pat = get_pattern(name, n_samples)
params = default_base.copy()
params.update(pat[1])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cuml_dbscan = cuDBSCAN(output_type="numpy")
cu_labels = cuml_dbscan.fit_predict(X)
sk_dbscan = skDBSCAN(eps=params["eps"], min_samples=5)
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels,
cu_labels,
X,
cuml_dbscan.core_sample_indices_,
params["eps"],
)
@pytest.mark.xfail(strict=True, raises=ValueError)
def test_dbscan_out_dtype_fails_invalid_input():
X, _ = make_blobs(n_samples=500)
cuml_dbscan = cuDBSCAN(output_type="numpy")
cuml_dbscan.fit_predict(X, out_dtype="bad_input")
def test_core_point_prop1():
params = {"eps": 1.1, "min_samples": 4}
# The input looks like a latin cross or a star with a chain:
# .
# . . . . .
# .
# There is 1 core-point (intersection of the bars)
# and the two points to the very right are not reachable from it
# So there should be one cluster (the plus/star on the left)
# and two noise points
X = np.array(
[[0, 0], [1, 0], [1, 1], [1, -1], [2, 0], [3, 0], [4, 0]],
dtype=np.float32,
)
cuml_dbscan = cuDBSCAN(**params)
cu_labels = cuml_dbscan.fit_predict(X)
sk_dbscan = skDBSCAN(**params)
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels,
cu_labels,
X,
cuml_dbscan.core_sample_indices_,
params["eps"],
)
def test_core_point_prop2():
params = {"eps": 1.1, "min_samples": 4}
# The input looks like a long two-barred (orhodox) cross or
# two stars next to each other:
# . .
# . . . . . .
# . .
# There are 2 core-points but they are not reachable from each other
# So there should be two clusters, both in the form of a plus/star
X = np.array(
[
[0, 0],
[1, 0],
[1, 1],
[1, -1],
[2, 0],
[3, 0],
[4, 0],
[4, 1],
[4, -1],
[5, 0],
],
dtype=np.float32,
)
cuml_dbscan = cuDBSCAN(**params)
cu_labels = cuml_dbscan.fit_predict(X)
sk_dbscan = skDBSCAN(**params)
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels,
cu_labels,
X,
cuml_dbscan.core_sample_indices_,
params["eps"],
)
def test_core_point_prop3():
params = {"eps": 1.1, "min_samples": 4}
# The input looks like a two-barred (orhodox) cross or
# two stars sharing a link:
# . .
# . . . . .
# . .
# There are 2 core-points but they are not reachable from each other
# So there should be two clusters.
# However, the link that is shared between the stars
# actually has an ambiguous label (to the best of my knowledge)
# as it will depend on the order in which we process the core-points.
# So we exclude that point from the comparison with sklearn
# TODO: the above text does not correspond to the actual test!
X = np.array(
[
[0, 0],
[1, 0],
[1, 1],
[1, -1],
[3, 0],
[4, 0],
[4, 1],
[4, -1],
[5, 0],
[2, 0],
],
dtype=np.float32,
)
cuml_dbscan = cuDBSCAN(**params)
cu_labels = cuml_dbscan.fit_predict(X)
sk_dbscan = skDBSCAN(**params)
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels,
cu_labels,
X,
cuml_dbscan.core_sample_indices_,
params["eps"],
)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize("out_dtype", ["int32", np.int32, "int64", np.int64])
@pytest.mark.parametrize("n_samples", [unit_param(500), stress_param(5000)])
def test_dbscan_propagation(datatype, use_handle, out_dtype, n_samples):
X, y = make_blobs(
n_samples,
centers=1,
cluster_std=8.0,
center_box=(-100.0, 100.0),
random_state=8,
)
X = X.astype(datatype)
handle, stream = get_handle(use_handle)
eps = 0.5
cuml_dbscan = cuDBSCAN(
handle=handle, eps=eps, min_samples=5, output_type="numpy"
)
cu_labels = cuml_dbscan.fit_predict(X, out_dtype=out_dtype)
sk_dbscan = skDBSCAN(eps=eps, min_samples=5)
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
def test_dbscan_no_calc_core_point_indices():
params = {"eps": 1.1, "min_samples": 4}
n_samples = 1000
pat = get_pattern("noisy_moons", n_samples)
X, y = pat[0]
X = StandardScaler().fit_transform(X)
# Set calc_core_sample_indices=False
cuml_dbscan = cuDBSCAN(
eps=params["eps"],
min_samples=5,
output_type="numpy",
calc_core_sample_indices=False,
)
cuml_dbscan.fit_predict(X)
# Make sure we are None
assert cuml_dbscan.core_sample_indices_ is None
def test_dbscan_on_empty_array():
X = np.array([])
cuml_dbscan = cuDBSCAN()
assert_raises(ValueError, cuml_dbscan.fit, X)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_tfidf.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.feature_extraction.text import TfidfTransformer as SkTfidfTransfo
from cuml.feature_extraction.text import TfidfTransformer
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
# data_ids correspond to data, order is important
data_ids = ["base_case", "diag", "empty_feature", "123", "empty_doc"]
data = [
np.array(
[
[0, 1, 1, 1, 0, 0, 1, 0, 1],
[0, 2, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 0, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 0, 1, 0, 1],
]
),
np.array([[1, 1, 1], [1, 1, 0], [1, 0, 0]]),
np.array([[1, 1, 0], [1, 1, 0], [1, 0, 0]]),
np.array([[1], [2], [3]]),
np.array([[1, 1, 1], [1, 1, 0], [0, 0, 0]]),
]
@pytest.mark.parametrize("data", data, ids=data_ids)
@pytest.mark.parametrize("norm", ["l1", "l2", None])
@pytest.mark.parametrize("use_idf", [True, False])
@pytest.mark.parametrize("smooth_idf", [True, False])
@pytest.mark.parametrize("sublinear_tf", [True, False])
@pytest.mark.filterwarnings(
"ignore:divide by zero(.*):RuntimeWarning:" "sklearn[.*]"
)
def test_tfidf_transformer(data, norm, use_idf, smooth_idf, sublinear_tf):
data_gpu = cp.array(data)
tfidf = TfidfTransformer(
norm=norm,
use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf,
)
sk_tfidf = SkTfidfTransfo(
norm=norm,
use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf,
)
res = tfidf.fit_transform(data_gpu).todense()
ref = sk_tfidf.fit_transform(data).todense()
cp.testing.assert_array_almost_equal(res, ref)
@pytest.mark.parametrize("norm", ["l1", "l2", None])
@pytest.mark.parametrize("use_idf", [True, False])
@pytest.mark.parametrize("smooth_idf", [True, False])
@pytest.mark.parametrize("sublinear_tf", [True, False])
def test_tfidf_transformer_copy(norm, use_idf, smooth_idf, sublinear_tf):
if use_idf:
pytest.xfail(
"cupyx.scipy.sparse.csr does not support inplace multiply."
)
data_gpu = cupyx.scipy.sparse.csr_matrix(
cp.array([[0, 1, 1, 1], [0, 2, 0, 1]], dtype=cp.float64, order="F")
)
tfidf = TfidfTransformer(
norm=norm,
use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf,
)
res = tfidf.fit_transform(data_gpu, copy=False)
cp.testing.assert_array_almost_equal(data_gpu.todense(), res.todense())
def test_tfidf_transformer_sparse():
X = cupyx.scipy.sparse.rand(10, 2000, dtype=np.float64, random_state=123)
X_csc = cupyx.scipy.sparse.csc_matrix(X)
X_csr = cupyx.scipy.sparse.csr_matrix(X)
X_trans_csc = TfidfTransformer().fit_transform(X_csc).todense()
X_trans_csr = TfidfTransformer().fit_transform(X_csr).todense()
cp.testing.assert_array_almost_equal(X_trans_csc, X_trans_csr)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_metrics.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import platform
from cuml.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import v_measure_score as sklearn_v_measure_score
from scipy.special import rel_entr as scipy_kl_divergence
from sklearn.metrics import pairwise_distances as sklearn_pairwise_distances
from cuml.metrics import (
pairwise_distances,
sparse_pairwise_distances,
PAIRWISE_DISTANCE_METRICS,
PAIRWISE_DISTANCE_SPARSE_METRICS,
)
from sklearn.metrics import (
precision_recall_curve as sklearn_precision_recall_curve,
)
from sklearn.metrics import roc_auc_score as sklearn_roc_auc_score
from cuml.metrics import log_loss
from cuml.metrics import precision_recall_curve
from cuml.metrics import roc_auc_score
from cuml.common.sparsefuncs import csr_row_normalize_l1
from cuml.common import has_scipy
from sklearn.metrics import mean_squared_log_error as sklearn_msle
from sklearn.metrics import mean_absolute_error as sklearn_mae
from cuml.metrics import confusion_matrix
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from sklearn.metrics import mean_squared_error as sklearn_mse
from cuml.metrics.regression import (
mean_squared_error,
mean_squared_log_error,
mean_absolute_error,
)
from cuml.model_selection import train_test_split
from cuml.metrics.cluster import entropy
from cuml.metrics import kl_divergence as cu_kl_divergence
from cuml.metrics import hinge_loss as cuml_hinge
from cuml import LogisticRegression as cu_log
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.metrics.cluster import silhouette_samples as sk_silhouette_samples
from sklearn.metrics.cluster import silhouette_score as sk_silhouette_score
from sklearn.metrics.cluster import mutual_info_score as sk_mutual_info_score
from sklearn.metrics.cluster import completeness_score as sk_completeness_score
from sklearn.metrics.cluster import homogeneity_score as sk_homogeneity_score
from sklearn.metrics.cluster import adjusted_rand_score as sk_ars
from sklearn.metrics import log_loss as sklearn_log_loss
from sklearn.metrics import accuracy_score as sk_acc_score
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import hinge_loss as sk_hinge
from cuml.internals.safe_imports import cpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.testing.utils import (
get_handle,
get_pattern,
array_equal,
unit_param,
quality_param,
stress_param,
generate_random_labels,
score_labeling_with_handle,
)
from cuml.metrics.cluster import silhouette_samples as cu_silhouette_samples
from cuml.metrics.cluster import silhouette_score as cu_silhouette_score
from cuml.metrics import accuracy_score as cu_acc_score
from cuml.metrics.cluster import adjusted_rand_score as cu_ars
from cuml.ensemble import RandomForestClassifier as curfc
from cuml.internals.safe_imports import cpu_only_import
import pytest
import random
from itertools import chain, permutations
from functools import partial
import cuml
import cuml.internals.logger as logger
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
np = cpu_only_import("numpy")
cudf = gpu_only_import("cudf")
cuda = gpu_only_import_from("numba", "cuda")
assert_almost_equal = cpu_only_import_from(
"numpy.testing", "assert_almost_equal"
)
scipy_pairwise_distances = cpu_only_import_from("scipy.spatial", "distance")
IS_ARM = platform.processor() == "aarch64"
@pytest.fixture(scope="module")
def random_state():
random_state = random.randint(0, 1e6)
with logger.set_level(logger.level_debug):
logger.debug("Random seed: {}".format(random_state))
return random_state
@pytest.fixture(
scope="module",
params=(
{
"n_clusters": 2,
"n_features": 2,
"label_type": "int64",
"data_type": "float32",
},
{
"n_clusters": 5,
"n_features": 1000,
"label_type": "int32",
"data_type": "float64",
},
),
)
def labeled_clusters(request, random_state):
data, labels = make_blobs(
n_samples=1000,
n_features=request.param["n_features"],
random_state=random_state,
centers=request.param["n_clusters"],
center_box=(-1, 1),
cluster_std=1.5, # Allow some cluster overlap
)
return (
data.astype(request.param["data_type"]),
labels.astype(request.param["label_type"]),
)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("use_handle", [True, False])
def test_r2_score(datatype, use_handle):
a = np.array([0.1, 0.2, 0.3, 0.4, 0.5], dtype=datatype)
b = np.array([0.12, 0.22, 0.32, 0.42, 0.52], dtype=datatype)
a_dev = cuda.to_device(a)
b_dev = cuda.to_device(b)
handle, stream = get_handle(use_handle)
score = cuml.metrics.r2_score(a_dev, b_dev, handle=handle)
np.testing.assert_almost_equal(score, 0.98, decimal=7)
def test_sklearn_search():
"""Test ensures scoring function works with sklearn machinery"""
import numpy as np
from cuml import Ridge as cumlRidge
import cudf
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
diabetes = datasets.load_diabetes()
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data,
diabetes.target,
test_size=0.2,
shuffle=False,
random_state=1,
)
alpha = np.array([1.0])
fit_intercept = True
normalize = False
params = {"alpha": np.logspace(-3, -1, 10)}
cu_clf = cumlRidge(
alpha=alpha,
fit_intercept=fit_intercept,
normalize=normalize,
solver="eig",
)
assert getattr(cu_clf, "score", False)
sk_cu_grid = GridSearchCV(cu_clf, params, cv=5)
gdf_data = cudf.DataFrame(X_train)
gdf_train = cudf.DataFrame(dict(train=y_train))
sk_cu_grid.fit(gdf_data, gdf_train.train)
assert sk_cu_grid.best_params_ == {"alpha": 0.1}
@pytest.mark.parametrize(
"nrows", [unit_param(30), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"ncols", [unit_param(10), quality_param(100), stress_param(200)]
)
@pytest.mark.parametrize(
"n_info", [unit_param(7), quality_param(50), stress_param(100)]
)
@pytest.mark.parametrize("datatype", [np.float32])
def test_accuracy(nrows, ncols, n_info, datatype):
use_handle = True
train_rows = np.int32(nrows * 0.8)
X, y = make_classification(
n_samples=nrows,
n_features=ncols,
n_clusters_per_class=1,
n_informative=n_info,
random_state=123,
n_classes=5,
)
X_test = np.asarray(X[train_rows:, 0:]).astype(datatype)
y_test = np.asarray(
y[
train_rows:,
]
).astype(np.int32)
X_train = np.asarray(X[0:train_rows, :]).astype(datatype)
y_train = np.asarray(
y[
0:train_rows,
]
).astype(np.int32)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=8)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc(
max_features=1.0,
n_bins=8,
split_criterion=0,
min_samples_leaf=2,
n_estimators=40,
handle=handle,
max_leaves=-1,
max_depth=16,
)
cuml_model.fit(X_train, y_train)
cu_predict = cuml_model.predict(X_test)
cu_acc = cu_acc_score(y_test, cu_predict)
cu_acc_using_sk = sk_acc_score(y_test, cu_predict)
# compare the accuracy of the two models
assert array_equal(cu_acc, cu_acc_using_sk)
dataset_names = ["noisy_circles", "noisy_moons", "aniso"] + [
pytest.param(ds, marks=pytest.mark.xfail) for ds in ["blobs", "varied"]
]
@pytest.mark.parametrize("name", dataset_names)
@pytest.mark.parametrize(
"nrows", [unit_param(20), quality_param(5000), stress_param(500000)]
)
def test_rand_index_score(name, nrows):
default_base = {
"quantile": 0.3,
"eps": 0.3,
"damping": 0.9,
"preference": -200,
"n_neighbors": 10,
"n_clusters": 3,
}
pat = get_pattern(name, nrows)
params = default_base.copy()
params.update(pat[1])
cuml_kmeans = cuml.KMeans(n_clusters=params["n_clusters"])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cu_y_pred = cuml_kmeans.fit_predict(X)
cu_score = cu_ars(y, cu_y_pred)
cu_score_using_sk = sk_ars(y, cp.asnumpy(cu_y_pred))
assert array_equal(cu_score, cu_score_using_sk)
@pytest.mark.parametrize(
"metric", ("cityblock", "cosine", "euclidean", "l1", "sqeuclidean")
)
@pytest.mark.parametrize("chunk_divider", [1, 3, 5])
@pytest.mark.skipif(
IS_ARM,
reason="Test fails unexpectedly on ARM. "
"github.com/rapidsai/cuml/issues/5025",
)
def test_silhouette_score_batched(metric, chunk_divider, labeled_clusters):
X, labels = labeled_clusters
cuml_score = cu_silhouette_score(
X, labels, metric=metric, chunksize=int(X.shape[0] / chunk_divider)
)
sk_score = sk_silhouette_score(X, labels, metric=metric)
assert_almost_equal(cuml_score, sk_score, decimal=2)
@pytest.mark.parametrize(
"metric", ("cityblock", "cosine", "euclidean", "l1", "sqeuclidean")
)
@pytest.mark.parametrize("chunk_divider", [1, 3, 5])
def test_silhouette_samples_batched(metric, chunk_divider, labeled_clusters):
X, labels = labeled_clusters
cuml_scores = cu_silhouette_samples(
X, labels, metric=metric, chunksize=int(X.shape[0] / chunk_divider)
)
sk_scores = sk_silhouette_samples(X, labels, metric=metric)
cu_trunc = cp.around(cuml_scores, decimals=3)
sk_trunc = cp.around(sk_scores, decimals=3)
diff = cp.absolute(cu_trunc - sk_trunc) > 0
over_diff = cp.all(diff)
# 0.5% elements allowed to be different
if len(over_diff.shape) > 0:
assert over_diff.shape[0] <= 0.005 * X.shape[0]
# different elements should not differ more than 1e-1
tolerance_diff = cp.absolute(cu_trunc[diff] - sk_trunc[diff]) > 1e-1
diff_change = cp.all(tolerance_diff)
if len(diff_change.shape) > 0:
assert False
@pytest.mark.xfail
def test_silhouette_score_batched_non_monotonic():
vecs = np.array(
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [10.0, 10.0, 10.0]]
)
labels = np.array([0, 0, 1, 3])
cuml_samples = cu_silhouette_samples(X=vecs, labels=labels)
sk_samples = sk_silhouette_samples(X=vecs, labels=labels)
assert array_equal(cuml_samples, sk_samples)
vecs = np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [10.0, 10.0, 10.0]])
labels = np.array([1, 1, 3])
cuml_samples = cu_silhouette_samples(X=vecs, labels=labels)
sk_samples = sk_silhouette_samples(X=vecs, labels=labels)
assert array_equal(cuml_samples, sk_samples)
def score_homogeneity(ground_truth, predictions, use_handle):
return score_labeling_with_handle(
cuml.metrics.homogeneity_score,
ground_truth,
predictions,
use_handle,
dtype=np.int32,
)
def score_completeness(ground_truth, predictions, use_handle):
return score_labeling_with_handle(
cuml.metrics.completeness_score,
ground_truth,
predictions,
use_handle,
dtype=np.int32,
)
def score_mutual_info(ground_truth, predictions, use_handle):
return score_labeling_with_handle(
cuml.metrics.mutual_info_score,
ground_truth,
predictions,
use_handle,
dtype=np.int32,
)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"data", [([0, 0, 1, 1], [1, 1, 0, 0]), ([0, 0, 1, 1], [0, 0, 1, 1])]
)
def test_homogeneity_perfect_labeling(use_handle, data):
# Perfect labelings are homogeneous
hom = score_homogeneity(*data, use_handle)
assert_almost_equal(hom, 1.0, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"data", [([0, 0, 1, 1], [0, 0, 1, 2]), ([0, 0, 1, 1], [0, 1, 2, 3])]
)
def test_homogeneity_non_perfect_labeling(use_handle, data):
# Non-perfect labelings that further split classes into more clusters can
# be perfectly homogeneous
hom = score_homogeneity(*data, use_handle)
assert_almost_equal(hom, 1.0, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"data", [([0, 0, 1, 1], [0, 1, 0, 1]), ([0, 0, 1, 1], [0, 0, 0, 0])]
)
def test_homogeneity_non_homogeneous_labeling(use_handle, data):
# Clusters that include samples from different classes do not make for an
# homogeneous labeling
hom = score_homogeneity(*data, use_handle)
assert_almost_equal(hom, 0.0, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize("input_range", [[0, 1000], [-1000, 1000]])
def test_homogeneity_score_big_array(use_handle, input_range):
a, b, _, _ = generate_random_labels(
lambda rd: rd.randint(*input_range, int(10e4), dtype=np.int32)
)
score = score_homogeneity(a, b, use_handle)
ref = sk_homogeneity_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"input_range", [[0, 2], [-5, 20], [int(-10e2), int(10e2)]]
)
def test_homogeneity_completeness_symmetry(use_handle, input_range):
a, b, _, _ = generate_random_labels(
lambda rd: rd.randint(*input_range, int(10e3), dtype=np.int32)
)
hom = score_homogeneity(a, b, use_handle)
com = score_completeness(b, a, use_handle)
np.testing.assert_almost_equal(hom, com, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"input_labels",
[
([0, 0, 1, 1], [1, 1, 0, 0]),
([0, 0, 1, 1], [0, 0, 1, 1]),
([0, 0, 1, 1], [0, 0, 1, 2]),
([0, 0, 1, 1], [0, 1, 2, 3]),
([0, 0, 1, 1], [0, 1, 0, 1]),
([0, 0, 1, 1], [0, 0, 0, 0]),
],
)
def test_mutual_info_score(use_handle, input_labels):
score = score_mutual_info(*input_labels, use_handle)
ref = sk_mutual_info_score(*input_labels)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize("input_range", [[0, 1000], [-1000, 1000]])
def test_mutual_info_score_big_array(use_handle, input_range):
a, b, _, _ = generate_random_labels(
lambda rd: rd.randint(*input_range, int(10e4), dtype=np.int32)
)
score = score_mutual_info(a, b, use_handle)
ref = sk_mutual_info_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize("n", [14])
def test_mutual_info_score_range_equal_samples(use_handle, n):
input_range = (-n, n)
a, b, _, _ = generate_random_labels(
lambda rd: rd.randint(*input_range, n, dtype=np.int32)
)
score = score_mutual_info(a, b, use_handle)
ref = sk_mutual_info_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize("input_range", [[0, 19], [0, 2], [-5, 20]])
@pytest.mark.parametrize("n_samples", [129, 258])
def test_mutual_info_score_many_blocks(use_handle, input_range, n_samples):
a, b, _, _ = generate_random_labels(
lambda rd: rd.randint(*input_range, n_samples, dtype=np.int32)
)
score = score_mutual_info(a, b, use_handle)
ref = sk_mutual_info_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"data", [([0, 0, 1, 1], [1, 1, 0, 0]), ([0, 0, 1, 1], [0, 0, 1, 1])]
)
def test_completeness_perfect_labeling(use_handle, data):
# Perfect labelings are complete
com = score_completeness(*data, use_handle)
np.testing.assert_almost_equal(com, 1.0, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"data", [([0, 0, 1, 1], [0, 0, 0, 0]), ([0, 1, 2, 3], [0, 0, 1, 1])]
)
def test_completeness_non_perfect_labeling(use_handle, data):
# Non-perfect labelings that assign all classes members to the same
# clusters are still complete
com = score_completeness(*data, use_handle)
np.testing.assert_almost_equal(com, 1.0, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"data", [([0, 0, 1, 1], [0, 1, 0, 1]), ([0, 0, 0, 0], [0, 1, 2, 3])]
)
def test_completeness_non_complete_labeling(use_handle, data):
# If classes members are split across different clusters, the assignment
# cannot be complete
com = score_completeness(*data, use_handle)
np.testing.assert_almost_equal(com, 0.0, decimal=4)
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize("input_range", [[0, 1000], [-1000, 1000]])
def test_completeness_score_big_array(use_handle, input_range):
a, b, _, _ = generate_random_labels(
lambda rd: rd.randint(*input_range, int(10e4), dtype=np.int32)
)
score = score_completeness(a, b, use_handle)
ref = sk_completeness_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
def test_regression_metrics():
y_true = np.arange(50, dtype=int)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.0)
assert_almost_equal(
mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred)),
)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.0)
@pytest.mark.parametrize("n_samples", [50, stress_param(500000)])
@pytest.mark.parametrize(
"y_dtype", [np.int32, np.int64, np.float32, np.float64]
)
@pytest.mark.parametrize(
"pred_dtype", [np.int32, np.int64, np.float32, np.float64]
)
@pytest.mark.parametrize("function", ["mse", "mae", "msle"])
def test_regression_metrics_random_with_mixed_dtypes(
n_samples, y_dtype, pred_dtype, function
):
y_true, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples).astype(y_dtype)
)
y_pred, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples).astype(pred_dtype)
)
cuml_reg, sklearn_reg = {
"mse": (mean_squared_error, sklearn_mse),
"mae": (mean_absolute_error, sklearn_mae),
"msle": (mean_squared_log_error, sklearn_msle),
}[function]
res = cuml_reg(y_true, y_pred, multioutput="raw_values")
ref = sklearn_reg(y_true, y_pred, multioutput="raw_values")
cp.testing.assert_array_almost_equal(res, ref, decimal=2)
@pytest.mark.parametrize("function", ["mse", "mse_not_squared", "mae", "msle"])
def test_regression_metrics_at_limits(function):
y_true = np.array([0.0], dtype=float)
y_pred = np.array([0.0], dtype=float)
cuml_reg = {
"mse": mean_squared_error,
"mse_not_squared": partial(mean_squared_error, squared=False),
"mae": mean_absolute_error,
"msle": mean_squared_log_error,
}[function]
assert_almost_equal(cuml_reg(y_true, y_pred), 0.00, decimal=2)
@pytest.mark.parametrize(
"inputs",
[
([-1.0], [-1.0]),
([1.0, 2.0, 3.0], [1.0, -2.0, 3.0]),
([1.0, -2.0, 3.0], [1.0, 2.0, 3.0]),
],
)
def test_mean_squared_log_error_exceptions(inputs):
with pytest.raises(ValueError):
mean_squared_log_error(np.array(inputs[0]), np.array(inputs[1]))
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1.0 + 2.0 / 3) / 4.0)
error = mean_squared_error(y_true, y_pred, squared=False)
assert_almost_equal(error, 0.645, decimal=2)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1.0 + 2.0 / 3) / 4.0)
def test_regression_metrics_multioutput_array():
y_true = np.array([[1, 2], [2.5, -1], [4.5, 3], [5, 7]], dtype=float)
y_pred = np.array([[1, 1], [2, -1], [5, 4], [5, 6.5]], dtype=float)
mse = mean_squared_error(y_true, y_pred, multioutput="raw_values")
mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values")
cp.testing.assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
cp.testing.assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
weights = np.array([0.4, 0.6], dtype=float)
msew = mean_squared_error(y_true, y_pred, multioutput=weights)
rmsew = mean_squared_error(
y_true, y_pred, multioutput=weights, squared=False
)
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(rmsew, 0.62, decimal=2)
y_true = np.array([[0, 0]] * 4, dtype=int)
y_pred = np.array([[1, 1]] * 4, dtype=int)
mse = mean_squared_error(y_true, y_pred, multioutput="raw_values")
mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values")
cp.testing.assert_array_almost_equal(mse, [1.0, 1.0], decimal=2)
cp.testing.assert_array_almost_equal(mae, [1.0, 1.0], decimal=2)
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput="raw_values")
msle2 = mean_squared_error(
np.log(1 + y_true), np.log(1 + y_pred), multioutput="raw_values"
)
cp.testing.assert_array_almost_equal(msle, msle2, decimal=2)
@pytest.mark.parametrize("function", ["mse", "mae"])
def test_regression_metrics_custom_weights(function):
y_true = np.array([1, 2, 2.5, -1], dtype=float)
y_pred = np.array([1, 1, 2, -1], dtype=float)
weights = np.array([0.2, 0.25, 0.4, 0.15], dtype=float)
cuml_reg, sklearn_reg = {
"mse": (mean_squared_error, sklearn_mse),
"mae": (mean_absolute_error, sklearn_mae),
}[function]
score = cuml_reg(y_true, y_pred, sample_weight=weights)
ref = sklearn_reg(y_true, y_pred, sample_weight=weights)
assert_almost_equal(score, ref, decimal=2)
def test_mse_vs_msle_custom_weights():
y_true = np.array([0.5, 2, 7, 6], dtype=float)
y_pred = np.array([0.5, 1, 8, 8], dtype=float)
weights = np.array([0.2, 0.25, 0.4, 0.15], dtype=float)
msle = mean_squared_log_error(y_true, y_pred, sample_weight=weights)
msle2 = mean_squared_error(
np.log(1 + y_true), np.log(1 + y_pred), sample_weight=weights
)
assert_almost_equal(msle, msle2, decimal=2)
@pytest.mark.parametrize("use_handle", [True, False])
def test_entropy(use_handle):
handle, stream = get_handle(use_handle)
# The outcome of a fair coin is the most uncertain:
# in base 2 the result is 1 (One bit of entropy).
cluster = np.array([0, 1], dtype=np.int32)
assert_almost_equal(entropy(cluster, base=2.0, handle=handle), 1.0)
# The outcome of a biased coin is less uncertain:
cluster = np.array(([0] * 9) + [1], dtype=np.int32)
assert_almost_equal(entropy(cluster, base=2.0, handle=handle), 0.468995593)
# base e
assert_almost_equal(entropy(cluster, handle=handle), 0.32508297339144826)
@pytest.mark.parametrize("n_samples", [50, stress_param(500000)])
@pytest.mark.parametrize("base", [None, 2, 10, 50])
@pytest.mark.parametrize("use_handle", [True, False])
def test_entropy_random(n_samples, base, use_handle):
if has_scipy():
from scipy.stats import entropy as sp_entropy
else:
pytest.skip("Skipping test_entropy_random because Scipy is missing")
handle, stream = get_handle(use_handle)
clustering, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples)
)
# generate unormalized probabilities from clustering
pk = np.bincount(clustering)
# scipy's entropy uses probabilities
sp_S = sp_entropy(pk, base=base)
# we use a clustering
S = entropy(np.array(clustering, dtype=np.int32), base, handle=handle)
assert_almost_equal(S, sp_S, decimal=2)
def test_confusion_matrix():
y_true = cp.array([2, 0, 2, 2, 0, 1])
y_pred = cp.array([0, 0, 2, 2, 0, 2])
cm = confusion_matrix(y_true, y_pred)
ref = cp.array([[2, 0, 0], [0, 0, 1], [1, 0, 2]])
cp.testing.assert_array_equal(cm, ref)
def test_confusion_matrix_binary():
y_true = cp.array([0, 1, 0, 1])
y_pred = cp.array([1, 1, 1, 0])
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
ref = cp.array([0, 2, 1, 1])
cp.testing.assert_array_equal(ref, cp.array([tn, fp, fn, tp]))
@pytest.mark.parametrize("n_samples", [50, 3000, stress_param(500000)])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32])
@pytest.mark.parametrize("problem_type", ["binary", "multiclass"])
def test_confusion_matrix_random(n_samples, dtype, problem_type):
upper_range = 2 if problem_type == "binary" else 1000
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, upper_range, n_samples).astype(dtype)
)
convert_dtype = True if dtype == np.float32 else False
cm = confusion_matrix(y_true, y_pred, convert_dtype=convert_dtype)
ref = sk_confusion_matrix(y_true, y_pred)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
@pytest.mark.parametrize(
"normalize, expected_results",
[
("true", 0.333333333),
("pred", 0.333333333),
("all", 0.1111111111),
(None, 2),
],
)
def test_confusion_matrix_normalize(normalize, expected_results):
y_test = cp.array([0, 1, 2] * 6)
y_pred = cp.array(list(chain(*permutations([0, 1, 2]))))
cm = confusion_matrix(y_test, y_pred, normalize=normalize)
cp.testing.assert_allclose(cm, cp.array(expected_results))
@pytest.mark.parametrize("labels", [(0, 1), (2, 1), (2, 1, 4, 7), (2, 20)])
def test_confusion_matrix_multiclass_subset_labels(labels):
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 3, 10).astype(np.int32)
)
ref = sk_confusion_matrix(y_true, y_pred, labels=labels)
labels = cp.array(labels, dtype=np.int32)
cm = confusion_matrix(y_true, y_pred, labels=labels)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
@pytest.mark.parametrize("n_samples", [50, 3000, stress_param(500000)])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("weights_dtype", ["int", "float"])
def test_confusion_matrix_random_weights(n_samples, dtype, weights_dtype):
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 10, n_samples).astype(dtype)
)
if weights_dtype == "int":
sample_weight = np.random.RandomState(0).randint(0, 10, n_samples)
else:
sample_weight = np.random.RandomState(0).rand(n_samples)
cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
ref = sk_confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
def test_roc_auc_score():
y_true = np.array([0, 0, 1, 1])
y_pred = np.array([0.1, 0.4, 0.35, 0.8])
assert_almost_equal(
roc_auc_score(y_true, y_pred), sklearn_roc_auc_score(y_true, y_pred)
)
y_true = np.array([0, 0, 1, 1, 0])
y_pred = np.array([0.8, 0.4, 0.4, 0.8, 0.8])
assert_almost_equal(
roc_auc_score(y_true, y_pred), sklearn_roc_auc_score(y_true, y_pred)
)
@pytest.mark.parametrize("n_samples", [50, 500000])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
def test_roc_auc_score_random(n_samples, dtype):
y_true, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 2, n_samples).astype(dtype)
)
y_pred, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples).astype(dtype)
)
auc = roc_auc_score(y_true, y_pred)
skl_auc = sklearn_roc_auc_score(y_true, y_pred)
assert_almost_equal(auc, skl_auc)
def test_roc_auc_score_at_limits():
y_true = np.array([0.0, 0.0, 0.0], dtype=float)
y_pred = np.array([0.0, 0.5, 1.0], dtype=float)
err_msg = (
"roc_auc_score cannot be used when "
"only one class present in y_true. ROC AUC score "
"is not defined in that case."
)
with pytest.raises(ValueError, match=err_msg):
roc_auc_score(y_true, y_pred)
y_true = np.array([0.0, 0.5, 1.0], dtype=float)
y_pred = np.array([0.0, 0.5, 1.0], dtype=float)
err_msg = "Continuous format of y_true " "is not supported."
with pytest.raises(ValueError, match=err_msg):
roc_auc_score(y_true, y_pred)
@pytest.mark.skip(
reason="shape discrepancy with sklearn 1.2"
"https://github.com/rapidsai/cuml/issues/5164"
)
def test_precision_recall_curve():
y_true = np.array([0, 0, 1, 1])
y_score = np.array([0.1, 0.4, 0.35, 0.8])
(
precision_using_sk,
recall_using_sk,
thresholds_using_sk,
) = sklearn_precision_recall_curve(y_true, y_score)
precision, recall, thresholds = precision_recall_curve(y_true, y_score)
assert array_equal(precision, precision_using_sk)
assert array_equal(recall, recall_using_sk)
assert array_equal(thresholds, thresholds_using_sk)
def test_precision_recall_curve_at_limits():
y_true = np.array([0.0, 0.0, 0.0], dtype=float)
y_pred = np.array([0.0, 0.5, 1.0], dtype=float)
err_msg = (
"precision_recall_curve cannot be used when " "y_true is all zero."
)
with pytest.raises(ValueError, match=err_msg):
precision_recall_curve(y_true, y_pred)
y_true = np.array([0.0, 0.5, 1.0], dtype=float)
y_pred = np.array([0.0, 0.5, 1.0], dtype=float)
err_msg = "Continuous format of y_true " "is not supported."
with pytest.raises(ValueError, match=err_msg):
precision_recall_curve(y_true, y_pred)
@pytest.mark.skip(
reason="shape discrepancy with sklearn 1.2"
"https://github.com/rapidsai/cuml/issues/5164"
)
@pytest.mark.parametrize("n_samples", [50, 500000])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
def test_precision_recall_curve_random(n_samples, dtype):
y_true, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 2, n_samples).astype(dtype)
)
y_score, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples).astype(dtype)
)
(
precision_using_sk,
recall_using_sk,
thresholds_using_sk,
) = sklearn_precision_recall_curve(y_true, y_score)
precision, recall, thresholds = precision_recall_curve(y_true, y_score)
assert array_equal(precision, precision_using_sk)
assert array_equal(recall, recall_using_sk)
assert array_equal(thresholds, thresholds_using_sk)
def test_log_loss():
y_true = np.array([0, 0, 1, 1])
y_pred = np.array([0.1, 0.4, 0.35, 0.8])
assert_almost_equal(
log_loss(y_true, y_pred), sklearn_log_loss(y_true, y_pred)
)
y_true = np.array([0, 0, 1, 1, 0])
y_pred = np.array([0.8, 0.4, 0.4, 0.8, 0.8])
assert_almost_equal(
log_loss(y_true, y_pred), sklearn_log_loss(y_true, y_pred)
)
@pytest.mark.parametrize("n_samples", [500, 500000])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
def test_log_loss_random(n_samples, dtype):
y_true, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 10, n_samples).astype(dtype)
)
y_pred, _, _, _ = generate_random_labels(
lambda rng: rng.rand(n_samples, 10)
)
assert_almost_equal(
log_loss(y_true, y_pred), sklearn_log_loss(y_true, y_pred)
)
def test_log_loss_at_limits():
y_true = np.array([0.0, 1.0, 2.0], dtype=float)
y_pred = np.array([0.0, 0.5, 1.0], dtype=float)
err_msg = "The shape of y_pred doesn't " "match the number of classes"
with pytest.raises(ValueError, match=err_msg):
log_loss(y_true, y_pred)
y_true = np.array([0.0, 0.5, 1.0], dtype=float)
y_pred = np.array([0.0, 0.5, 1.0], dtype=float)
err_msg = "'y_true' can only have integer values"
with pytest.raises(ValueError, match=err_msg):
log_loss(y_true, y_pred)
def naive_kl_divergence_dist(X, Y):
return 0.5 * np.array(
[
[
np.sum(np.where(yj != 0, scipy_kl_divergence(xi, yj), 0.0))
for yj in Y
]
for xi in X
]
)
def ref_dense_pairwise_dist(X, Y=None, metric=None, convert_dtype=False):
# Select sklearn except for Hellinger that
# sklearn doesn't support
if Y is None:
Y = X
if metric == "hellinger":
return naive_hellinger(X, Y)
elif metric == "jensenshannon":
return scipy_pairwise_distances.cdist(X, Y, "jensenshannon")
elif metric == "kldivergence":
return naive_kl_divergence_dist(X, Y)
else:
return sklearn_pairwise_distances(X, Y, metric)
def prep_dense_array(array, metric, col_major=0):
if metric in ["hellinger", "jensenshannon", "kldivergence"]:
norm_array = preprocessing.normalize(array, norm="l1")
return np.asfortranarray(norm_array) if col_major else norm_array
else:
return np.asfortranarray(array) if col_major else array
@pytest.mark.parametrize("metric", PAIRWISE_DISTANCE_METRICS.keys())
@pytest.mark.parametrize(
"matrix_size", [(5, 4), (1000, 3), (2, 10), (500, 400)]
)
@pytest.mark.parametrize("is_col_major", [True, False])
def test_pairwise_distances(metric: str, matrix_size, is_col_major):
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
compare_precision = 2 if metric == "nan_euclidean" else 4
# Compare to sklearn, single input
X = prep_dense_array(
rng.random_sample(matrix_size), metric=metric, col_major=is_col_major
)
S = pairwise_distances(X, metric=metric)
S2 = ref_dense_pairwise_dist(X, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, double input with same dimensions
Y = X
S = pairwise_distances(X, Y, metric=metric)
S2 = ref_dense_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare single and double inputs to each other
S = pairwise_distances(X, metric=metric)
S2 = pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, with Y dim != X dim
Y = prep_dense_array(
rng.random_sample((2, matrix_size[1])),
metric=metric,
col_major=is_col_major,
)
S = pairwise_distances(X, Y, metric=metric)
S2 = ref_dense_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Change precision of one parameter
Y = np.asfarray(Y, dtype=np.float32)
S = pairwise_distances(X, Y, metric=metric)
S2 = ref_dense_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# For fp32, compare at 5 decimals, (2 places less than the ~7 max)
compare_precision = 2
# Change precision of both parameters to float
X = np.asfarray(X, dtype=np.float32)
Y = np.asfarray(Y, dtype=np.float32)
S = pairwise_distances(X, Y, metric=metric)
S2 = ref_dense_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Test sending an int type with convert_dtype=True
if metric != "kldivergence":
Y = prep_dense_array(
rng.randint(10, size=Y.shape),
metric=metric,
col_major=is_col_major,
)
S = pairwise_distances(X, Y, metric=metric, convert_dtype=True)
S2 = ref_dense_pairwise_dist(X, Y, metric=metric, convert_dtype=True)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Test that uppercase on the metric name throws an error.
with pytest.raises(ValueError):
pairwise_distances(X, Y, metric=metric.capitalize())
@pytest.mark.parametrize("metric", PAIRWISE_DISTANCE_METRICS.keys())
@pytest.mark.parametrize(
"matrix_size",
[
unit_param((1000, 100)),
quality_param((2000, 1000)),
stress_param((10000, 10000)),
],
)
def test_pairwise_distances_sklearn_comparison(metric: str, matrix_size):
# Test larger sizes to sklearn
rng = np.random.RandomState(1)
element_count = matrix_size[0] * matrix_size[1]
X = prep_dense_array(
rng.random_sample(matrix_size), metric=metric, col_major=0
)
Y = prep_dense_array(
rng.random_sample(matrix_size), metric=metric, col_major=0
)
# For fp64, compare at 10 decimals, (5 places less than the ~15 max)
compare_precision = 10
print(X.shape, Y.shape, metric)
# Compare to sklearn, fp64
S = pairwise_distances(X, Y, metric=metric)
if element_count <= 2000000:
S2 = ref_dense_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# For fp32, compare at 4 decimals, (3 places less than the ~7 max)
compare_precision = 4
X = np.asfarray(X, dtype=np.float32)
Y = np.asfarray(Y, dtype=np.float32)
# Compare to sklearn, fp32
S = pairwise_distances(X, Y, metric=metric)
if element_count <= 2000000:
S2 = ref_dense_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
@pytest.mark.parametrize("metric", PAIRWISE_DISTANCE_METRICS.keys())
def test_pairwise_distances_one_dimension_order(metric: str):
# Test the pairwise_distance helper function for 1 dimensional cases which
# can break down when using a size of 1 for either dimension
rng = np.random.RandomState(2)
Xc = prep_dense_array(
rng.random_sample((1, 4)), metric=metric, col_major=0
)
Yc = prep_dense_array(
rng.random_sample((10, 4)), metric=metric, col_major=0
)
Xf = np.asfortranarray(Xc)
Yf = np.asfortranarray(Yc)
# For fp64, compare at 13 decimals, (2 places less than the ~15 max)
compare_precision = 13
# Compare to sklearn, C/C order
S = pairwise_distances(Xc, Yc, metric=metric)
S2 = ref_dense_pairwise_dist(Xc, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, C/F order
S = pairwise_distances(Xc, Yf, metric=metric)
S2 = ref_dense_pairwise_dist(Xc, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/C order
S = pairwise_distances(Xf, Yc, metric=metric)
S2 = ref_dense_pairwise_dist(Xf, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/F order
S = pairwise_distances(Xf, Yf, metric=metric)
S2 = ref_dense_pairwise_dist(Xf, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Switch which input has single dimension
Xc = prep_dense_array(
rng.random_sample((1, 4)), metric=metric, col_major=0
)
Yc = prep_dense_array(
rng.random_sample((10, 4)), metric=metric, col_major=0
)
Xf = np.asfortranarray(Xc)
Yf = np.asfortranarray(Yc)
# Compare to sklearn, C/C order
S = pairwise_distances(Xc, Yc, metric=metric)
S2 = ref_dense_pairwise_dist(Xc, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, C/F order
S = pairwise_distances(Xc, Yf, metric=metric)
S2 = ref_dense_pairwise_dist(Xc, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/C order
S = pairwise_distances(Xf, Yc, metric=metric)
S2 = ref_dense_pairwise_dist(Xf, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/F order
S = pairwise_distances(Xf, Yf, metric=metric)
S2 = ref_dense_pairwise_dist(Xf, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
@pytest.mark.parametrize("metric", ["haversine"])
def test_pairwise_distances_unsuppored_metrics(metric):
rng = np.random.RandomState(3)
X = rng.random_sample((5, 4))
with pytest.raises(ValueError):
pairwise_distances(X, metric=metric)
def test_pairwise_distances_exceptions():
rng = np.random.RandomState(4)
X_int = rng.randint(10, size=(5, 4))
X_double = rng.random_sample((5, 4))
X_float = np.asfarray(X_double, dtype=np.float32)
X_bool = rng.choice([True, False], size=(5, 4))
# Test int inputs (only float/double accepted at this time)
with pytest.raises(TypeError):
pairwise_distances(X_int, metric="euclidean")
# Test second int inputs (should not have an exception with
# convert_dtype=True)
pairwise_distances(X_double, X_int, metric="euclidean")
# Test bool inputs (only float/double accepted at this time)
with pytest.raises(TypeError):
pairwise_distances(X_bool, metric="euclidean")
# Test sending different types with convert_dtype=False
with pytest.raises(TypeError):
pairwise_distances(
X_double, X_float, metric="euclidean", convert_dtype=False
)
# Invalid metric name
with pytest.raises(ValueError):
pairwise_distances(X_double, metric="Not a metric")
# Invalid dimensions
X = rng.random_sample((5, 4))
Y = rng.random_sample((5, 7))
with pytest.raises(ValueError):
pairwise_distances(X, Y, metric="euclidean")
@pytest.mark.parametrize("input_type", ["cudf", "numpy", "cupy"])
@pytest.mark.parametrize("output_type", ["cudf", "numpy", "cupy"])
@pytest.mark.parametrize("use_global", [True, False])
def test_pairwise_distances_output_types(input_type, output_type, use_global):
# Test larger sizes to sklearn
rng = np.random.RandomState(5)
X = rng.random_sample((100, 100))
Y = rng.random_sample((100, 100))
if input_type == "cudf":
X = cudf.DataFrame(X)
Y = cudf.DataFrame(Y)
elif input_type == "cupy":
X = cp.asarray(X)
Y = cp.asarray(Y)
# Set to None if we are using the global object
output_type_param = None if use_global else output_type
# Use the global manager object. Should do nothing unless use_global is set
with cuml.using_output_type(output_type):
# Compare to sklearn, fp64
S = pairwise_distances(
X, Y, metric="euclidean", output_type=output_type_param
)
if output_type == "input":
assert isinstance(S, type(X))
elif output_type == "cudf":
assert isinstance(S, cudf.DataFrame)
elif output_type == "numpy":
assert isinstance(S, np.ndarray)
elif output_type == "cupy":
assert isinstance(S, cp.ndarray)
def naive_inner(X, Y, metric=None):
return X.dot(Y.T)
def naive_hellinger(X, Y, metric=None):
return sklearn_pairwise_distances(
np.sqrt(X), np.sqrt(Y), metric="euclidean"
) / np.sqrt(2)
def prepare_sparse_data(size0, size1, dtype, density, metric):
# create sparse array, then normalize every row to one
data = cupyx.scipy.sparse.random(
size0, size1, dtype=dtype, random_state=123, density=density
).tocsr()
if metric == "hellinger":
data = csr_row_normalize_l1(data)
return data
def ref_sparse_pairwise_dist(X, Y=None, metric=None):
# Select sklearn except for IP and Hellinger that sklearn doesn't support
# Use sparse input for sklearn calls when possible
if Y is None:
Y = X
if metric not in [
"cityblock",
"cosine",
"euclidean",
"l1",
"l2",
"manhattan",
"haversine",
]:
X = X.todense()
Y = Y.todense()
X = X.get()
Y = Y.get()
if metric == "inner_product":
return naive_inner(X, Y, metric)
elif metric == "hellinger":
return naive_hellinger(X, Y)
else:
return sklearn_pairwise_distances(X, Y, metric)
@pytest.mark.parametrize("metric", PAIRWISE_DISTANCE_SPARSE_METRICS.keys())
@pytest.mark.parametrize(
"matrix_size, density", [((3, 3), 0.7), ((5, 40), 0.2)]
)
# ignoring boolean conversion warning for both cuml and sklearn
@pytest.mark.filterwarnings("ignore:(.*)converted(.*)::")
def test_sparse_pairwise_distances_corner_cases(
metric: str, matrix_size, density: float
):
# Test the sparse_pairwise_distance helper function.
# For fp64, compare at 7 decimals, (5 places less than the ~15 max)
compare_precision = 7
# Compare to sklearn, single input
X = prepare_sparse_data(
matrix_size[0], matrix_size[1], cp.float64, density, metric
)
S = sparse_pairwise_distances(X, metric=metric)
S2 = ref_sparse_pairwise_dist(X, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, double input with same dimensions
Y = X
S = pairwise_distances(X, Y, metric=metric)
S2 = ref_sparse_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, with Y dim != X dim
Y = prepare_sparse_data(2, matrix_size[1], cp.float64, density, metric)
S = pairwise_distances(X, Y, metric=metric)
S2 = ref_sparse_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Change precision of one parameter, should work (convert_dtype=True)
Y = Y.astype(cp.float32)
S = sparse_pairwise_distances(X, Y, metric=metric)
S2 = ref_sparse_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# For fp32, compare at 3 decimals, (4 places less than the ~7 max)
compare_precision = 3
# Change precision of both parameters to float
X = prepare_sparse_data(
matrix_size[0], matrix_size[1], cp.float32, density, metric
)
Y = prepare_sparse_data(
matrix_size[0], matrix_size[1], cp.float32, density, metric
)
S = sparse_pairwise_distances(X, Y, metric=metric)
S2 = ref_sparse_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Test sending an int type (convert_dtype=True)
if metric != "hellinger":
compare_precision = 2
Y = Y * 100
Y.data = Y.data.astype(cp.int32)
S = sparse_pairwise_distances(X, Y, metric=metric)
S2 = ref_sparse_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Test that uppercase on the metric name throws an error.
with pytest.raises(ValueError):
sparse_pairwise_distances(X, Y, metric=metric.capitalize())
def test_sparse_pairwise_distances_exceptions():
if not has_scipy():
pytest.skip(
"Skipping sparse_pairwise_distances_exceptions "
"if Scipy is missing"
)
from scipy import sparse
X_int = (
sparse.random(5, 4, dtype=np.float32, random_state=123, density=0.3)
* 10
)
X_int.dtype = cp.int32
X_bool = sparse.random(5, 4, dtype=bool, random_state=123, density=0.3)
X_double = cupyx.scipy.sparse.random(
5, 4, dtype=cp.float64, random_state=123, density=0.3
)
X_float = cupyx.scipy.sparse.random(
5, 4, dtype=cp.float32, random_state=123, density=0.3
)
# Test int inputs (only float/double accepted at this time)
with pytest.raises(TypeError):
sparse_pairwise_distances(X_int, metric="euclidean")
# Test second int inputs (should not have an exception with
# convert_dtype=True)
sparse_pairwise_distances(X_double, X_int, metric="euclidean")
# Test bool inputs (only float/double accepted at this time)
with pytest.raises(TypeError):
sparse_pairwise_distances(X_bool, metric="euclidean")
# Test sending different types with convert_dtype=False
with pytest.raises(TypeError):
sparse_pairwise_distances(
X_double, X_float, metric="euclidean", convert_dtype=False
)
# Invalid metric name
with pytest.raises(ValueError):
sparse_pairwise_distances(X_double, metric="Not a metric")
# Invalid dimensions
X = cupyx.scipy.sparse.random(5, 4, dtype=np.float32, random_state=123)
Y = cupyx.scipy.sparse.random(5, 7, dtype=np.float32, random_state=123)
with pytest.raises(ValueError):
sparse_pairwise_distances(X, Y, metric="euclidean")
@pytest.mark.parametrize(
"metric",
[
metric
if metric != "hellinger"
else pytest.param(
metric,
marks=pytest.mark.xfail(
reason="intermittent failure (Issue #4354)"
),
)
for metric in PAIRWISE_DISTANCE_SPARSE_METRICS.keys()
],
)
@pytest.mark.parametrize(
"matrix_size,density",
[
unit_param((1000, 100), 0.4),
unit_param((20, 10000), 0.01),
quality_param((2000, 1000), 0.05),
stress_param((10000, 10000), 0.01),
],
)
# ignoring boolean conversion warning for both cuml and sklearn
@pytest.mark.filterwarnings("ignore:(.*)converted(.*)::")
def test_sparse_pairwise_distances_sklearn_comparison(
metric: str, matrix_size, density: float
):
# Test larger sizes to sklearn
element_count = matrix_size[0] * matrix_size[1]
X = prepare_sparse_data(
matrix_size[0], matrix_size[1], cp.float64, density, metric
)
Y = prepare_sparse_data(
matrix_size[0], matrix_size[1], cp.float64, density, metric
)
# For fp64, compare at 9 decimals, (6 places less than the ~15 max)
compare_precision = 9
# Compare to sklearn, fp64
S = sparse_pairwise_distances(X, Y, metric=metric)
if element_count <= 2000000:
S2 = ref_sparse_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# For fp32, compare at 3 decimals, (4 places less than the ~7 max)
compare_precision = 3
X = X.astype(np.float32)
Y = Y.astype(np.float32)
# Compare to sklearn, fp32
S = sparse_pairwise_distances(X, Y, metric=metric)
if element_count <= 2000000:
S2 = ref_sparse_pairwise_dist(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
@pytest.mark.parametrize("input_type", ["numpy", "cupy"])
@pytest.mark.parametrize("output_type", ["cudf", "numpy", "cupy"])
def test_sparse_pairwise_distances_output_types(input_type, output_type):
# Test larger sizes to sklearn
if not has_scipy():
pytest.skip("Skipping sparse_pairwise_distances if Scipy is missing")
import scipy
if input_type == "cupy":
X = cupyx.scipy.sparse.random(
100, 100, dtype=cp.float64, random_state=123
)
Y = cupyx.scipy.sparse.random(
100, 100, dtype=cp.float64, random_state=456
)
else:
X = scipy.sparse.random(100, 100, dtype=np.float64, random_state=123)
Y = scipy.sparse.random(100, 100, dtype=np.float64, random_state=456)
# Use the global manager object.
with cuml.using_output_type(output_type):
S = sparse_pairwise_distances(X, Y, metric="euclidean")
if output_type == "cudf":
assert isinstance(S, cudf.DataFrame)
elif output_type == "numpy":
assert isinstance(S, np.ndarray)
elif output_type == "cupy":
assert isinstance(S, cp.ndarray)
@pytest.mark.xfail(
reason="Temporarily disabling this test. " "See rapidsai/cuml#3569"
)
@pytest.mark.parametrize(
"nrows, ncols, n_info",
[
unit_param(30, 10, 7),
quality_param(5000, 100, 50),
stress_param(500000, 200, 100),
],
)
@pytest.mark.parametrize("input_type", ["cudf", "cupy"])
@pytest.mark.parametrize("n_classes", [2, 5])
def test_hinge_loss(nrows, ncols, n_info, input_type, n_classes):
train_rows = np.int32(nrows * 0.8)
X, y = make_classification(
n_samples=nrows,
n_features=ncols,
n_clusters_per_class=1,
n_informative=n_info,
random_state=123,
n_classes=n_classes,
)
if input_type == "cudf":
X = cudf.DataFrame(X)
y = cudf.Series(y)
elif input_type == "cupy":
X = cp.asarray(X)
y = cp.asarray(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_rows, shuffle=True
)
cuml_model = cu_log()
cuml_model.fit(X_train, y_train)
cu_predict_decision = cuml_model.decision_function(X_test)
cu_loss = cuml_hinge(y_test, cu_predict_decision.T, labels=cp.unique(y))
if input_type == "cudf":
y_test = y_test.to_numpy()
y = y.to_numpy()
cu_predict_decision = cp.asnumpy(cu_predict_decision.values)
elif input_type == "cupy":
y = cp.asnumpy(y)
y_test = cp.asnumpy(y_test)
cu_predict_decision = cp.asnumpy(cu_predict_decision)
cu_loss_using_sk = sk_hinge(
y_test, cu_predict_decision.T, labels=np.unique(y)
)
# compare the accuracy of the two models
cp.testing.assert_array_almost_equal(cu_loss, cu_loss_using_sk)
@pytest.mark.parametrize(
"nfeatures",
[
unit_param(10),
unit_param(300),
unit_param(30000),
stress_param(500000000),
],
)
@pytest.mark.parametrize("input_type", ["cudf", "cupy"])
@pytest.mark.parametrize("dtypeP", [cp.float32, cp.float64])
@pytest.mark.parametrize("dtypeQ", [cp.float32, cp.float64])
def test_kl_divergence(nfeatures, input_type, dtypeP, dtypeQ):
if not has_scipy():
pytest.skip("Skipping test_kl_divergence because Scipy is missing")
from scipy.stats import entropy as sp_entropy
rng = np.random.RandomState(5)
P = rng.random_sample((nfeatures))
Q = rng.random_sample((nfeatures))
P /= P.sum()
Q /= Q.sum()
sk_res = sp_entropy(P, Q)
if input_type == "cudf":
P = cudf.DataFrame(P, dtype=dtypeP)
Q = cudf.DataFrame(Q, dtype=dtypeQ)
elif input_type == "cupy":
P = cp.asarray(P, dtype=dtypeP)
Q = cp.asarray(Q, dtype=dtypeQ)
if dtypeP != dtypeQ:
with pytest.raises(TypeError):
cu_kl_divergence(P, Q, convert_dtype=False)
cu_res = cu_kl_divergence(P, Q)
else:
cu_res = cu_kl_divergence(P, Q, convert_dtype=False)
cp.testing.assert_array_almost_equal(cu_res, sk_res)
def test_mean_squared_error():
y1 = np.array([[1], [2], [3]])
y2 = y1.squeeze()
assert mean_squared_error(y1, y2) == 0
assert mean_squared_error(y2, y1) == 0
def test_mean_squared_error_cudf_series():
a = cudf.Series([1.1, 2.2, 3.3, 4.4])
b = cudf.Series([0.1, 0.2, 0.3, 0.4])
err1 = mean_squared_error(a, b)
err2 = mean_squared_error(a.values, b.values)
assert err1 == err2
@pytest.mark.parametrize("beta", [0.0, 0.5, 1.0, 2.0])
def test_v_measure_score(beta):
labels_true = np.array([0, 0, 1, 1], dtype=np.int32)
labels_pred = np.array([1, 0, 1, 1], dtype=np.int32)
res = v_measure_score(labels_true, labels_pred, beta=beta)
ref = sklearn_v_measure_score(labels_true, labels_pred, beta=beta)
assert_almost_equal(res, ref)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_kneighbors_regressor.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.utils import array_equal
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import cpu_only_import_from
from sklearn.model_selection import train_test_split
from sklearn.utils.validation import check_random_state
from sklearn.datasets import make_blobs
from cuml.neighbors import KNeighborsRegressor as cuKNN
import pytest
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
assert_array_almost_equal = cpu_only_import_from(
"numpy.testing", "assert_array_almost_equal"
)
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
def test_kneighbors_regressor(
n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0
):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X**2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
knn = cuKNN(n_neighbors=n_neighbors)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert np.all(abs(y_pred - y_target) < 0.3)
def test_kneighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
knn = cuKNN()
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False).astype(np.int32)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert y_pred.shape[0] == y_test.shape[0]
assert y_pred_idx.shape == y_test.shape
assert_array_almost_equal(y_pred, y_pred_idx)
@pytest.mark.parametrize("datatype", ["dataframe", "numpy"])
@pytest.mark.parametrize("nrows", [1000, 10000])
@pytest.mark.parametrize("ncols", [50, 100])
@pytest.mark.parametrize("n_neighbors", [2, 5, 10])
@pytest.mark.parametrize("n_clusters", [2, 5, 10])
def test_score(nrows, ncols, n_neighbors, n_clusters, datatype):
# Using make_blobs here to check averages and neighborhoods
X, y = make_blobs(
n_samples=nrows,
centers=n_clusters,
cluster_std=0.01,
n_features=ncols,
random_state=0,
)
X = X.astype(np.float32)
y = y.astype(np.float32)
if datatype == "dataframe":
X = cudf.DataFrame(X)
y = cudf.DataFrame(y.reshape(nrows, 1))
knn_cu = cuKNN(n_neighbors=n_neighbors)
knn_cu.fit(X, y)
assert knn_cu.score(X, y) >= 0.9999
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_score_dtype(dtype):
# Using make_blobs here to check averages and neighborhoods
X, y = make_blobs(
n_samples=1000,
centers=2,
cluster_std=0.01,
n_features=50,
random_state=0,
)
X = X.astype(dtype)
y = y.astype(dtype)
knn_cu = cuKNN(n_neighbors=5)
knn_cu.fit(X, y)
pred = knn_cu.predict(X)
assert pred.dtype == dtype
assert knn_cu.score(X, y) >= 0.9999
@pytest.mark.parametrize("input_type", ["cudf", "numpy", "cupy"])
@pytest.mark.parametrize("output_type", ["cudf", "numpy", "cupy"])
def test_predict_multioutput(input_type, output_type):
X = np.array([[0, 0, 1, 0], [1, 0, 1, 0]]).astype(np.float32)
y = np.array([[15, 2], [5, 4]]).astype(np.int32)
if input_type == "cudf":
X = cudf.DataFrame(X)
y = cudf.DataFrame(y)
elif input_type == "cupy":
X = cp.asarray(X)
y = cp.asarray(y)
knn_cu = cuKNN(n_neighbors=1, output_type=output_type)
knn_cu.fit(X, y)
p = knn_cu.predict(X)
if output_type == "cudf":
assert isinstance(p, cudf.DataFrame)
elif output_type == "numpy":
assert isinstance(p, np.ndarray)
elif output_type == "cupy":
assert isinstance(p, cp.ndarray)
assert array_equal(p.astype(np.int32), y)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_dataset_generator_types.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.datasets import (
make_arima,
make_blobs,
make_classification,
make_regression,
)
import cuml
import pytest
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
numba = gpu_only_import("numba")
np = cpu_only_import("numpy")
TEST_OUTPUT_TYPES = (
(None, (cp.ndarray, cp.ndarray)), # Default is cupy if None is used
("numpy", (np.ndarray, np.ndarray)),
("cupy", (cp.ndarray, cp.ndarray)),
(
"numba",
(
numba.cuda.devicearray.DeviceNDArrayBase,
numba.cuda.devicearray.DeviceNDArrayBase,
),
),
("cudf", (cudf.DataFrame, cudf.Series)),
)
GENERATORS = (make_blobs, make_classification, make_regression)
@pytest.mark.parametrize("generator", GENERATORS)
@pytest.mark.parametrize("output_str,output_types", TEST_OUTPUT_TYPES)
def test_xy_output_type(generator, output_str, output_types):
# Set the output type and ensure data of that type is generated
with cuml.using_output_type(output_str):
data = generator(n_samples=10, random_state=0)
for data, type_ in zip(data, output_types):
assert isinstance(data, type_)
@pytest.mark.parametrize("output_str,output_types", TEST_OUTPUT_TYPES)
def test_time_series_label_output_type(output_str, output_types):
# Set the output type and ensure data of that type is generated
with cuml.using_output_type(output_str):
data = make_arima(n_obs=10, random_state=0)[0]
assert isinstance(data, output_types[1])
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_sgd.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_blobs
from cuml.solvers import SGD as cumlSGD
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cudf = gpu_only_import("cudf")
@pytest.mark.parametrize("lrate", ["constant", "invscaling", "adaptive"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("penalty", ["none", "l1", "l2", "elasticnet"])
@pytest.mark.parametrize("loss", ["hinge", "log", "squared_loss"])
@pytest.mark.parametrize("datatype", ["dataframe", "numpy"])
def test_sgd(dtype, lrate, penalty, loss, datatype):
X, y = make_blobs(n_samples=100, n_features=3, centers=2, random_state=0)
X = X.astype(dtype)
y = y.astype(dtype)
if loss == "hinge" or loss == "squared_loss":
y[y == 0] = -1
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
if datatype == "dataframe":
X_train = cudf.DataFrame(X_train)
X_test = cudf.DataFrame(X_test)
y_train = cudf.Series(y_train)
cu_sgd = cumlSGD(
learning_rate=lrate,
eta0=0.005,
epochs=2000,
fit_intercept=True,
batch_size=4096,
tol=0.0,
penalty=penalty,
loss=loss,
power_t=0.4,
)
cu_sgd.fit(X_train, y_train)
cu_pred = cu_sgd.predict(X_test)
if datatype == "dataframe":
assert isinstance(cu_pred, cudf.Series)
cu_pred = cu_pred.to_numpy()
else:
assert isinstance(cu_pred, np.ndarray)
if loss == "log":
cu_pred[cu_pred < 0.5] = 0
cu_pred[cu_pred >= 0.5] = 1
elif loss == "squared_loss":
cu_pred[cu_pred < 0] = -1
cu_pred[cu_pred >= 0] = 1
# Adjust for squared loss (we don't need to test for high accuracy,
# just that the loss function tended towards the expected classes.
assert np.array_equal(cu_pred, y_test)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("datatype", ["dataframe", "numpy"])
def test_sgd_default(dtype, datatype):
X, y = make_blobs(n_samples=100, n_features=3, centers=2, random_state=0)
X = X.astype(dtype)
y = y.astype(dtype)
# Default loss is squared_loss
y[y == 0] = -1
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
if datatype == "dataframe":
X_train = cudf.DataFrame(X_train)
X_test = cudf.DataFrame(X_test)
y_train = cudf.Series(y_train)
cu_sgd = cumlSGD()
cu_sgd.fit(X_train, y_train)
cu_pred = cu_sgd.predict(X_test)
if datatype == "dataframe":
assert isinstance(cu_pred, cudf.Series)
cu_pred = cu_pred.to_numpy()
else:
assert isinstance(cu_pred, np.ndarray)
# Adjust for squared loss (we don't need to test for high accuracy,
# just that the loss function tended towards the expected classes.
cu_pred[cu_pred < 0] = -1
cu_pred[cu_pred >= 0] = 1
assert np.array_equal(cu_pred, y_test)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_naive_bayes.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import cpu_only_import
import math
from sklearn.naive_bayes import GaussianNB as skGNB
from sklearn.naive_bayes import ComplementNB as skComplementNB
from sklearn.naive_bayes import CategoricalNB as skCNB
from sklearn.naive_bayes import BernoulliNB as skBNB
from sklearn.naive_bayes import MultinomialNB as skNB
from numpy.testing import assert_array_almost_equal, assert_raises
from numpy.testing import assert_allclose, assert_array_equal
from cuml.datasets import make_classification
from cuml.internals.input_utils import sparse_scipy_to_cp
from cuml.naive_bayes import GaussianNB
from cuml.naive_bayes import ComplementNB
from cuml.naive_bayes import CategoricalNB
from cuml.naive_bayes import BernoulliNB
from cuml.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
@pytest.mark.parametrize("x_dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
def test_sparse_integral_dtype_fails(x_dtype, y_dtype, nlp_20news):
X, y = nlp_20news
X = X.astype(x_dtype)
y = y.astype(y_dtype)
model = MultinomialNB()
with pytest.raises(ValueError):
model.fit(X, y)
X = X.astype(cp.float32)
model.fit(X, y)
X = X.astype(x_dtype)
with pytest.raises(ValueError):
model.predict(X)
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64, cp.int32])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
def test_multinomial_basic_fit_predict_dense_numpy(
x_dtype, y_dtype, nlp_20news
):
"""
Cupy Test
"""
X, y = nlp_20news
n_rows = 500
n_cols = 10000
X = sparse_scipy_to_cp(X, cp.float32).tocsr()[:n_rows, :n_cols]
y = y[:n_rows].astype(y_dtype)
model = MultinomialNB()
model.fit(np.ascontiguousarray(cp.asnumpy(X.todense()).astype(x_dtype)), y)
y_hat = model.predict(X).get()
modelsk = skNB()
modelsk.fit(X.get(), y.get())
y_sk = model.predict(X.get())
assert_allclose(y_hat, y_sk)
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.float32, cp.float64])
def test_multinomial_partial_fit(x_dtype, y_dtype, nlp_20news):
chunk_size = 500
X, y = nlp_20news
X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
y = y.astype(y_dtype)
X = X.tocsr()
model = MultinomialNB()
classes = np.unique(y)
total_fit = 0
for i in range(math.ceil(X.shape[0] / chunk_size)):
upper = i * chunk_size + chunk_size
if upper > X.shape[0]:
upper = -1
if upper > 0:
x = X[i * chunk_size : upper]
y_c = y[i * chunk_size : upper]
else:
x = X[i * chunk_size :]
y_c = y[i * chunk_size :]
model.partial_fit(x, y_c, classes=classes)
total_fit += upper - (i * chunk_size)
if upper == -1:
break
y_hat = model.predict(X)
y_hat = cp.asnumpy(y_hat)
y = cp.asnumpy(y)
assert accuracy_score(y, y_hat) >= 0.924
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
def test_multinomial(x_dtype, y_dtype, nlp_20news):
X, y = nlp_20news
cu_X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
cu_y = y.astype(y_dtype)
cu_X = cu_X.tocsr()
y = y.get()
cuml_model = MultinomialNB()
sk_model = skNB()
cuml_model.fit(cu_X, cu_y)
sk_model.fit(X, y)
cuml_log_proba = cuml_model.predict_log_proba(cu_X).get()
sk_log_proba = sk_model.predict_log_proba(X)
cuml_proba = cuml_model.predict_proba(cu_X).get()
sk_proba = sk_model.predict_proba(X)
cuml_score = cuml_model.score(cu_X, cu_y)
sk_score = sk_model.score(X, y)
y_hat = cuml_model.predict(cu_X)
y_hat = cp.asnumpy(y_hat)
cu_y = cp.asnumpy(cu_y)
THRES = 1e-4
assert_allclose(cuml_log_proba, sk_log_proba, atol=1e-2, rtol=1e-2)
assert_allclose(cuml_proba, sk_proba, atol=1e-6, rtol=1e-2)
assert sk_score - THRES <= cuml_score <= sk_score + THRES
assert accuracy_score(y, y_hat) >= 0.924
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("is_sparse", [True, False])
def test_bernoulli(x_dtype, y_dtype, is_sparse, nlp_20news):
X, y = nlp_20news
n_rows = 500
n_cols = 20000
X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
y = y.astype(y_dtype)
X = X.tocsr()[:n_rows, :n_cols]
y = y[:n_rows]
if not is_sparse:
X = X.todense()
sk_model = skBNB()
cuml_model = BernoulliNB()
sk_model.fit(X.get(), y.get())
cuml_model.fit(X, y)
sk_score = sk_model.score(X.get(), y.get())
cuml_score = cuml_model.score(X, y)
cuml_proba = cuml_model.predict_log_proba(X).get()
sk_proba = sk_model.predict_log_proba(X.get())
THRES = 1e-3
assert_array_equal(sk_model.class_count_, cuml_model.class_count_.get())
assert_allclose(
sk_model.class_log_prior_, cuml_model.class_log_prior_.get(), 1e-6
)
assert_allclose(cuml_proba, sk_proba, atol=1e-2, rtol=1e-2)
assert sk_score - THRES <= cuml_score <= sk_score + THRES
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.float32, cp.float64])
def test_bernoulli_partial_fit(x_dtype, y_dtype, nlp_20news):
chunk_size = 500
n_rows = 1500
X, y = nlp_20news
X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
y = y.astype(y_dtype)[:n_rows]
X = X.tocsr()[:n_rows]
model = BernoulliNB()
modelsk = skBNB()
classes = np.unique(y)
for i in range(math.ceil(X.shape[0] / chunk_size)):
upper = i * chunk_size + chunk_size
if upper > X.shape[0]:
upper = -1
if upper > 0:
x = X[i * chunk_size : upper]
y_c = y[i * chunk_size : upper]
else:
x = X[i * chunk_size :]
y_c = y[i * chunk_size :]
model.partial_fit(x, y_c, classes=classes)
modelsk.partial_fit(x.get(), y_c.get(), classes=classes.get())
if upper == -1:
break
y_hat = model.predict(X).get()
y_sk = modelsk.predict(X.get())
assert_allclose(y_hat, y_sk)
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("is_sparse", [True, False])
@pytest.mark.parametrize("norm", [True, False])
def test_complement(x_dtype, y_dtype, is_sparse, norm, nlp_20news):
X, y = nlp_20news
n_rows = 500
n_cols = 20000
X = sparse_scipy_to_cp(X, x_dtype).astype(x_dtype)
y = y.astype(y_dtype)
X = X.tocsr()[:n_rows, :n_cols]
y = y[:n_rows]
if not is_sparse:
X = X.todense()
sk_model = skComplementNB(norm=norm)
cuml_model = ComplementNB(norm=norm)
sk_model.fit(X.get(), y.get())
cuml_model.fit(X, y)
sk_score = sk_model.score(X.get(), y.get())
cuml_score = cuml_model.score(X, y)
cuml_proba = cuml_model.predict_log_proba(X).get()
sk_proba = sk_model.predict_log_proba(X.get())
THRES = 1e-3
assert_array_equal(sk_model.class_count_, cuml_model.class_count_.get())
assert_allclose(
sk_model.class_log_prior_, cuml_model.class_log_prior_.get(), 1e-6
)
assert_allclose(cuml_proba, sk_proba, atol=1e-2, rtol=1e-2)
assert sk_score - THRES <= cuml_score <= sk_score + THRES
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.float32, cp.float64])
@pytest.mark.parametrize("norm", [True, False])
def test_complement_partial_fit(x_dtype, y_dtype, norm):
chunk_size = 500
n_rows, n_cols = 1500, 100
weights = [0.6, 0.2, 0.15, 0.05]
rtol = 1e-3 if x_dtype == cp.float32 else 1e-6
X, y = make_classification(
n_rows,
n_cols,
n_classes=len(weights),
weights=weights,
dtype=x_dtype,
n_informative=9,
random_state=1,
)
X -= X.min(0) # Make all inputs positive
y = y.astype(y_dtype)
model = ComplementNB(norm=norm)
modelsk = skComplementNB(norm=norm)
classes = np.unique(y)
for i in range(math.ceil(X.shape[0] / chunk_size)):
upper = i * chunk_size + chunk_size
if upper > X.shape[0]:
upper = -1
if upper > 0:
x = X[i * chunk_size : upper]
y_c = y[i * chunk_size : upper]
else:
x = X[i * chunk_size :]
y_c = y[i * chunk_size :]
model.partial_fit(x, y_c, classes=classes)
modelsk.partial_fit(x.get(), y_c.get(), classes=classes.get())
if upper == -1:
break
y_hat = model.predict_proba(X).get()
y_sk = modelsk.predict_proba(X.get())
assert_allclose(y_hat, y_sk, rtol=rtol)
def test_gaussian_basic():
# Data is just 6 separable points in the plane
X = cp.array(
[
[-2, -1, -1],
[-1, -1, -1],
[-1, -2, -1],
[1, 1, 1],
[1, 2, 1],
[2, 1, 1],
],
dtype=cp.float32,
)
y = cp.array([1, 1, 1, 2, 2, 2])
skclf = skGNB()
skclf.fit(X.get(), y.get())
clf = GaussianNB()
clf.fit(X, y)
assert_array_almost_equal(clf.theta_.get(), skclf.theta_, 6)
assert_array_almost_equal(clf.sigma_.get(), skclf.var_, 6)
y_pred = clf.predict(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
y_pred_proba_sk = skclf.predict_proba(X.get())
y_pred_log_proba_sk = skclf.predict_log_proba(X.get())
assert_array_equal(y_pred.get(), y.get())
assert_array_almost_equal(y_pred_proba.get(), y_pred_proba_sk, 8)
assert_allclose(
y_pred_log_proba.get(), y_pred_log_proba_sk, atol=1e-2, rtol=1e-2
)
@pytest.mark.parametrize("x_dtype", [cp.float32, cp.float64])
@pytest.mark.parametrize(
"y_dtype", [cp.int32, cp.int64, cp.float32, cp.float64]
)
@pytest.mark.parametrize("is_sparse", [True, False])
def test_gaussian_fit_predict(x_dtype, y_dtype, is_sparse, nlp_20news):
"""
Cupy Test
"""
X, y = nlp_20news
model = GaussianNB()
n_rows = 500
n_cols = 50000
X = sparse_scipy_to_cp(X, x_dtype)
X = X.tocsr()[:n_rows, :n_cols]
if is_sparse:
y = y.astype(y_dtype)[:n_rows]
model.fit(X, y)
else:
X = X.todense()
y = y[:n_rows].astype(y_dtype)
model.fit(np.ascontiguousarray(cp.asnumpy(X).astype(x_dtype)), y)
y_hat = model.predict(X)
y_hat = cp.asnumpy(y_hat)
y = cp.asnumpy(y)
assert accuracy_score(y, y_hat) >= 0.99
def test_gaussian_partial_fit(nlp_20news):
chunk_size = 250
n_rows = 1500
n_cols = 60000
x_dtype, y_dtype = cp.float32, cp.int32
X, y = nlp_20news
X = sparse_scipy_to_cp(X, x_dtype).tocsr()[:n_rows, :n_cols]
y = y.astype(y_dtype)[:n_rows]
model = GaussianNB()
classes = np.unique(y)
total_fit = 0
for i in range(math.ceil(X.shape[0] / chunk_size)):
upper = i * chunk_size + chunk_size
if upper > X.shape[0]:
upper = -1
if upper > 0:
x = X[i * chunk_size : upper]
y_c = y[i * chunk_size : upper]
else:
x = X[i * chunk_size :]
y_c = y[i * chunk_size :]
model.partial_fit(x, y_c, classes=classes)
total_fit += upper - (i * chunk_size)
if upper == -1:
break
y_hat = model.predict(X)
y_hat = cp.asnumpy(y_hat)
y = cp.asnumpy(y)
assert accuracy_score(y, y_hat) >= 0.99
# Test whether label mismatch between target y and classes raises an Error
assert_raises(
ValueError, GaussianNB().partial_fit, X, y, classes=cp.array([0, 1])
)
# Raise because classes is required on first call of partial_fit
assert_raises(ValueError, GaussianNB().partial_fit, X, y)
@pytest.mark.parametrize("priors", [None, "balanced", "unbalanced"])
@pytest.mark.parametrize("var_smoothing", [1e-5, 1e-7, 1e-9])
def test_gaussian_parameters(priors, var_smoothing, nlp_20news):
x_dtype = cp.float32
y_dtype = cp.int32
nrows = 150
ncols = 20000
X, y = nlp_20news
X = sparse_scipy_to_cp(X[:nrows], x_dtype).todense()[:, :ncols]
y = y.astype(y_dtype)[:nrows]
if priors == "balanced":
priors = cp.array([1 / 20] * 20)
elif priors == "unbalanced":
priors = cp.linspace(0.01, 0.09, 20)
model = GaussianNB(priors=priors, var_smoothing=var_smoothing)
model_sk = skGNB(
priors=priors.get() if priors is not None else None,
var_smoothing=var_smoothing,
)
model.fit(X, y)
model_sk.fit(X.get(), y.get())
y_hat = model.predict(X)
y_hat_sk = model_sk.predict(X.get())
y_hat = cp.asnumpy(y_hat)
y = cp.asnumpy(y)
assert_allclose(model.epsilon_.get(), model_sk.epsilon_, rtol=1e-4)
assert_array_equal(y_hat, y_hat_sk)
@pytest.mark.parametrize("x_dtype", [cp.int32, cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("is_sparse", [True, False])
def test_categorical(x_dtype, y_dtype, is_sparse, nlp_20news):
if x_dtype == cp.int32 and is_sparse:
pytest.skip("Sparse matrices with integers dtype are not supported")
X, y = nlp_20news
n_rows = 500
n_cols = 400
X = sparse_scipy_to_cp(X, dtype=cp.float32)
X = X.tocsr()[:n_rows, :n_cols]
y = y.astype(y_dtype)[:n_rows]
if not is_sparse:
X = X.todense()
X = X.astype(x_dtype)
cuml_model = CategoricalNB()
cuml_model.fit(X, y)
cuml_score = cuml_model.score(X, y)
cuml_proba = cuml_model.predict_log_proba(X).get()
X = X.todense().get() if is_sparse else X.get()
y = y.get()
sk_model = skCNB()
sk_model.fit(X, y)
sk_score = sk_model.score(X, y)
sk_proba = sk_model.predict_log_proba(X)
THRES = 1e-3
assert_array_equal(sk_model.class_count_, cuml_model.class_count_.get())
assert_allclose(
sk_model.class_log_prior_, cuml_model.class_log_prior_.get(), 1e-6
)
assert_allclose(cuml_proba, sk_proba, atol=1e-2, rtol=1e-2)
assert sk_score - THRES <= cuml_score <= sk_score + THRES
@pytest.mark.parametrize("x_dtype", [cp.int32, cp.float32, cp.float64])
@pytest.mark.parametrize("y_dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("is_sparse", [True, False])
def test_categorical_partial_fit(x_dtype, y_dtype, is_sparse, nlp_20news):
if x_dtype == cp.int32 and is_sparse:
pytest.skip("Sparse matrices with integers dtype are not supported")
n_rows = 5000
n_cols = 500
chunk_size = 1000
expected_score = 0.1040
X, y = nlp_20news
X = sparse_scipy_to_cp(X, "float32").tocsr()[:n_rows, :n_cols]
if is_sparse:
X.data = X.data.astype(x_dtype)
else:
X = X.todense().astype(x_dtype)
y = y.astype(y_dtype)[:n_rows]
model = CategoricalNB()
classes = np.unique(y)
for i in range(math.ceil(X.shape[0] / chunk_size)):
upper = i * chunk_size + chunk_size
if upper > X.shape[0]:
upper = -1
if upper > 0:
x = X[i * chunk_size : upper]
y_c = y[i * chunk_size : upper]
else:
x = X[i * chunk_size :]
y_c = y[i * chunk_size :]
model.partial_fit(x, y_c, classes=classes)
if upper == -1:
break
cuml_score = model.score(X, y)
THRES = 1e-4
assert expected_score - THRES <= cuml_score <= expected_score + THRES
@pytest.mark.parametrize("class_prior", [None, "balanced", "unbalanced"])
@pytest.mark.parametrize("alpha", [0.1, 0.5, 1.5])
@pytest.mark.parametrize("fit_prior", [False, True])
@pytest.mark.parametrize("is_sparse", [False, True])
def test_categorical_parameters(
class_prior, alpha, fit_prior, is_sparse, nlp_20news
):
x_dtype = cp.float32
y_dtype = cp.int32
nrows = 2000
ncols = 500
X, y = nlp_20news
X = sparse_scipy_to_cp(X, x_dtype).tocsr()[:nrows, :ncols]
if not is_sparse:
X = X.todense()
y = y.astype(y_dtype)[:nrows]
if class_prior == "balanced":
class_prior = np.array([1 / 20] * 20)
elif class_prior == "unbalanced":
class_prior = np.linspace(0.01, 0.09, 20)
model = CategoricalNB(
class_prior=class_prior, alpha=alpha, fit_prior=fit_prior
)
model_sk = skCNB(class_prior=class_prior, alpha=alpha, fit_prior=fit_prior)
model.fit(X, y)
y_hat = model.predict(X).get()
y_log_prob = model.predict_log_proba(X).get()
X = X.todense().get() if is_sparse else X.get()
model_sk.fit(X, y.get())
y_hat_sk = model_sk.predict(X)
y_log_prob_sk = model_sk.predict_log_proba(X)
assert_allclose(y_log_prob, y_log_prob_sk, rtol=1e-4)
assert_array_equal(y_hat, y_hat_sk)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_simpl_set.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import platform
from cuml.manifold.umap import (
simplicial_set_embedding as cu_simplicial_set_embedding,
)
from cuml.manifold.umap import fuzzy_simplicial_set as cu_fuzzy_simplicial_set
from cuml.neighbors import NearestNeighbors
from cuml.manifold.umap import UMAP
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.datasets import make_blobs
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
IS_ARM = platform.processor() == "aarch64"
if not IS_ARM:
from umap.umap_ import (
simplicial_set_embedding as ref_simplicial_set_embedding,
)
from umap.umap_ import fuzzy_simplicial_set as ref_fuzzy_simplicial_set
import umap.distances as dist
def correctness_dense(a, b, rtol=0.1, threshold=0.95):
n_elms = a.size
n_correct = (cp.abs(a - b) <= (rtol * cp.abs(b))).sum()
correctness = n_correct / n_elms
return correctness >= threshold
def correctness_sparse(a, b, atol=0.1, rtol=0.2, threshold=0.95):
n_ref_zeros = (a == 0).sum()
n_ref_non_zero_elms = a.size - n_ref_zeros
n_correct = (cp.abs(a - b) <= (atol + rtol * cp.abs(b))).sum()
correctness = (n_correct - n_ref_zeros) / n_ref_non_zero_elms
return correctness >= threshold
@pytest.mark.parametrize("n_rows", [800, 5000])
@pytest.mark.parametrize("n_features", [8, 32])
@pytest.mark.parametrize("n_neighbors", [8, 16])
@pytest.mark.parametrize("precomputed_nearest_neighbors", [False, True])
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_fuzzy_simplicial_set(
n_rows, n_features, n_neighbors, precomputed_nearest_neighbors
):
n_clusters = 30
random_state = 42
metric = "euclidean"
X, _ = make_blobs(
n_samples=n_rows,
centers=n_clusters,
n_features=n_features,
random_state=random_state,
)
if precomputed_nearest_neighbors:
nn = NearestNeighbors(n_neighbors=n_neighbors, metric=metric)
nn.fit(X)
knn_dists, knn_indices = nn.kneighbors(
X, n_neighbors, return_distance=True
)
cu_fss_graph = cu_fuzzy_simplicial_set(
X,
n_neighbors,
random_state,
metric,
knn_indices=knn_indices,
knn_dists=knn_dists,
)
knn_indices = knn_indices.get()
knn_dists = knn_dists.get()
ref_fss_graph = ref_fuzzy_simplicial_set(
X,
n_neighbors,
random_state,
metric,
knn_indices=knn_indices,
knn_dists=knn_dists,
)[0].tocoo()
else:
cu_fss_graph = cu_fuzzy_simplicial_set(
X, n_neighbors, random_state, metric
)
X = X.get()
ref_fss_graph = ref_fuzzy_simplicial_set(
X, n_neighbors, random_state, metric
)[0].tocoo()
cu_fss_graph = cu_fss_graph.todense()
ref_fss_graph = cp.sparse.coo_matrix(ref_fss_graph).todense()
assert correctness_sparse(
ref_fss_graph, cu_fss_graph, atol=0.1, rtol=0.2, threshold=0.95
)
@pytest.mark.parametrize("n_rows", [800, 5000])
@pytest.mark.parametrize("n_features", [8, 32])
@pytest.mark.parametrize("n_neighbors", [8, 16])
@pytest.mark.parametrize("n_components", [2, 5])
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_simplicial_set_embedding(
n_rows, n_features, n_neighbors, n_components
):
n_clusters = 30
random_state = 42
metric = "euclidean"
initial_alpha = 1.0
a, b = UMAP.find_ab_params(1.0, 0.1)
gamma = 0
negative_sample_rate = 5
n_epochs = 500
init = "random"
metric = "euclidean"
metric_kwds = {}
densmap = False
densmap_kwds = {}
output_dens = False
output_metric = "euclidean"
output_metric_kwds = {}
X, _ = make_blobs(
n_samples=n_rows,
centers=n_clusters,
n_features=n_features,
random_state=random_state,
)
X = X.get()
ref_fss_graph = ref_fuzzy_simplicial_set(
X, n_neighbors, random_state, metric
)[0]
ref_embedding = ref_simplicial_set_embedding(
X,
ref_fss_graph,
n_components,
initial_alpha,
a,
b,
gamma,
negative_sample_rate,
n_epochs,
init,
np.random.RandomState(random_state),
dist.named_distances_with_gradients[metric],
metric_kwds,
densmap,
densmap_kwds,
output_dens,
output_metric=output_metric,
output_metric_kwds=output_metric_kwds,
)[0]
cu_fss_graph = cu_fuzzy_simplicial_set(
X, n_neighbors, random_state, metric
)
cu_embedding = cu_simplicial_set_embedding(
X,
cu_fss_graph,
n_components,
initial_alpha,
a,
b,
gamma,
negative_sample_rate,
n_epochs,
init,
random_state,
metric,
metric_kwds,
output_metric=output_metric,
output_metric_kwds=output_metric_kwds,
)
ref_embedding = cp.array(ref_embedding)
assert correctness_dense(
ref_embedding, cu_embedding, rtol=0.1, threshold=0.95
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_logger.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from contextlib import redirect_stdout
import cuml.internals.logger as logger
from io import StringIO, TextIOWrapper, BytesIO
def test_logger():
logger.trace("This is a trace message")
logger.debug("This is a debug message")
logger.info("This is an info message")
logger.warn("This is a warn message")
logger.error("This is a error message")
logger.critical("This is a critical message")
with logger.set_level(logger.level_warn):
assert logger.should_log_for(logger.level_warn)
assert not logger.should_log_for(logger.level_info)
with logger.set_pattern("%v"):
logger.info("This is an info message")
def test_redirected_logger():
new_stdout = StringIO()
with logger.set_level(logger.level_trace):
# We do not test trace because CUML_LOG_TRACE is not compiled by
# default
test_msg = "This is a debug message"
with redirect_stdout(new_stdout):
logger.debug(test_msg)
assert test_msg in new_stdout.getvalue()
test_msg = "This is an info message"
with redirect_stdout(new_stdout):
logger.info(test_msg)
assert test_msg in new_stdout.getvalue()
test_msg = "This is a warn message"
with redirect_stdout(new_stdout):
logger.warn(test_msg)
assert test_msg in new_stdout.getvalue()
test_msg = "This is an error message"
with redirect_stdout(new_stdout):
logger.error(test_msg)
assert test_msg in new_stdout.getvalue()
test_msg = "This is a critical message"
with redirect_stdout(new_stdout):
logger.critical(test_msg)
assert test_msg in new_stdout.getvalue()
# Check that logging does not error with sys.stdout of None
with redirect_stdout(None):
test_msg = "This is a debug message"
logger.debug(test_msg)
def test_log_flush():
stdout_buffer = BytesIO()
new_stdout = TextIOWrapper(stdout_buffer)
with logger.set_level(logger.level_trace):
test_msg = "This is a debug message"
with redirect_stdout(new_stdout):
logger.debug(test_msg)
assert test_msg not in stdout_buffer.getvalue().decode("utf-8")
logger.flush()
assert test_msg in stdout_buffer.getvalue().decode("utf-8")
# Check that logging flush does not error with sys.stdout of None
with redirect_stdout(None):
logger.flush()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_serialize.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distributed.protocol.serialize import serialize as ser
from cuml.naive_bayes.naive_bayes import MultinomialNB
import pickle
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
def test_naive_bayes_cuda():
"""
Assuming here that the Dask serializers are well-tested.
This test-case is only validating that the Naive Bayes class
actually gets registered w/ `dask` and `cuda` serializers.
"""
mnb = MultinomialNB()
X = cupyx.scipy.sparse.random(1, 5)
y = cp.array([0])
mnb.fit(X, y)
# Unfortunately, Dask has no `unregister` function and Pytest
# shares the same process so cannot test the base-state here.
stype, sbytes = ser(mnb, serializers=["cuda"])
assert stype["serializer"] == "cuda"
stype, sbytes = ser(mnb, serializers=["dask"])
assert stype["serializer"] == "dask"
stype, sbytes = ser(mnb, serializers=["pickle"])
assert stype["serializer"] == "pickle"
def test_cupy_sparse_patch():
sp = cupyx.scipy.sparse.random(50, 2, format="csr")
pickled = pickle.dumps(sp)
sp_deser = pickle.loads(pickled)
# Using internal API pieces only until
# https://github.com/cupy/cupy/issues/3061
# is fixed.
assert sp_deser._descr.descriptor != sp._descr.descriptor
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_fil.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.ensemble import (
GradientBoostingClassifier,
GradientBoostingRegressor,
RandomForestClassifier,
RandomForestRegressor,
ExtraTreesClassifier,
ExtraTreesRegressor,
)
from sklearn.datasets import make_classification, make_regression
from cuml.internals.import_utils import has_xgboost
from cuml.testing.utils import (
array_equal,
unit_param,
quality_param,
stress_param,
)
from cuml import ForestInference
from math import ceil
import os
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
# from cuml.internals.import_utils import has_lightgbm
if has_xgboost():
import xgboost as xgb
# pytestmark = pytest.mark.skip
def simulate_data(
m,
n,
k=2,
n_informative="auto",
random_state=None,
classification=True,
bias=0.0,
):
if n_informative == "auto":
n_informative = n // 5
if classification:
features, labels = make_classification(
n_samples=m,
n_features=n,
n_informative=n_informative,
n_redundant=n - n_informative,
n_classes=k,
random_state=random_state,
)
else:
features, labels = make_regression(
n_samples=m,
n_features=n,
n_informative=n_informative,
n_targets=1,
bias=bias,
random_state=random_state,
)
return (
np.c_[features].astype(np.float32),
np.c_[labels].astype(np.float32).flatten(),
)
# absolute tolerance for FIL predict_proba
# False is binary classification, True is multiclass
proba_atol = {False: 3e-7, True: 3e-6}
def _build_and_save_xgboost(
model_path,
X_train,
y_train,
classification=True,
num_rounds=5,
n_classes=2,
xgboost_params={},
):
"""Trains a small xgboost classifier and saves it to model_path"""
dtrain = xgb.DMatrix(X_train, label=y_train)
# instantiate params
params = {"eval_metric": "error", "max_depth": 25}
# learning task params
if classification:
if n_classes == 2:
params["objective"] = "binary:logistic"
else:
params["num_class"] = n_classes
params["objective"] = "multi:softprob"
else:
params["objective"] = "reg:squarederror"
params["base_score"] = 0.0
params.update(xgboost_params)
bst = xgb.train(params, dtrain, num_rounds)
bst.save_model(model_path)
return bst
@pytest.mark.parametrize(
"n_rows", [unit_param(1000), quality_param(10000), stress_param(500000)]
)
@pytest.mark.parametrize(
"n_columns", [unit_param(30), quality_param(100), stress_param(1000)]
)
@pytest.mark.parametrize(
"num_rounds",
[unit_param(1), unit_param(5), quality_param(50), stress_param(90)],
)
@pytest.mark.parametrize("n_classes", [2, 5, 25])
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
def test_fil_classification(
n_rows, n_columns, num_rounds, n_classes, tmp_path
):
# settings
classification = True # change this to false to use regression
random_state = np.random.RandomState(43210)
X, y = simulate_data(
n_rows,
n_columns,
n_classes,
random_state=random_state,
classification=classification,
)
# identify shape and indices
n_rows, n_columns = X.shape
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0
)
model_path = os.path.join(tmp_path, "xgb_class.model")
bst = _build_and_save_xgboost(
model_path,
X_train,
y_train,
num_rounds=num_rounds,
classification=classification,
n_classes=n_classes,
)
dvalidation = xgb.DMatrix(X_validation, label=y_validation)
if n_classes == 2:
xgb_preds = bst.predict(dvalidation)
xgb_preds_int = np.around(xgb_preds)
xgb_proba = np.stack([1 - xgb_preds, xgb_preds], axis=1)
else:
xgb_proba = bst.predict(dvalidation)
xgb_preds_int = xgb_proba.argmax(axis=1)
xgb_acc = accuracy_score(y_validation, xgb_preds_int)
fm = ForestInference.load(
model_path, algo="auto", output_class=True, threshold=0.50
)
fil_preds = np.asarray(fm.predict(X_validation))
fil_proba = np.asarray(fm.predict_proba(X_validation))
fil_acc = accuracy_score(y_validation, fil_preds)
assert fil_acc == pytest.approx(xgb_acc, abs=0.01)
assert array_equal(fil_preds, xgb_preds_int)
np.testing.assert_allclose(
fil_proba, xgb_proba, atol=proba_atol[n_classes > 2]
)
@pytest.mark.parametrize(
"n_rows", [unit_param(1000), quality_param(10000), stress_param(500000)]
)
@pytest.mark.parametrize(
"n_columns", [unit_param(20), quality_param(100), stress_param(1000)]
)
@pytest.mark.parametrize(
"num_rounds", [unit_param(5), quality_param(10), stress_param(90)]
)
@pytest.mark.parametrize(
"max_depth", [unit_param(3), unit_param(7), stress_param(11)]
)
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
def test_fil_regression(n_rows, n_columns, num_rounds, tmp_path, max_depth):
# settings
classification = False # change this to false to use regression
n_rows = n_rows # we'll use 1 millions rows
n_columns = n_columns
random_state = np.random.RandomState(43210)
X, y = simulate_data(
n_rows,
n_columns,
random_state=random_state,
classification=classification,
bias=10.0,
)
# identify shape and indices
n_rows, n_columns = X.shape
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0
)
model_path = os.path.join(tmp_path, "xgb_reg.model")
bst = _build_and_save_xgboost(
model_path,
X_train,
y_train,
classification=classification,
num_rounds=num_rounds,
xgboost_params={"max_depth": max_depth},
)
dvalidation = xgb.DMatrix(X_validation, label=y_validation)
xgb_preds = bst.predict(dvalidation)
xgb_mse = mean_squared_error(y_validation, xgb_preds)
fm = ForestInference.load(model_path, algo="auto", output_class=False)
fil_preds = np.asarray(fm.predict(X_validation))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds))
fil_mse = mean_squared_error(y_validation, fil_preds)
assert fil_mse == pytest.approx(xgb_mse, abs=0.01)
assert np.allclose(fil_preds, xgb_preds, 1e-3)
@pytest.mark.parametrize("n_rows", [1000])
@pytest.mark.parametrize("n_columns", [30])
# Skip depth 20 for dense tests
@pytest.mark.parametrize(
"max_depth,storage_type",
[(2, False), (2, True), (10, False), (10, True), (20, True)],
)
# When n_classes=25, fit a single estimator only to reduce test time
@pytest.mark.parametrize(
"n_classes,model_class,n_estimators,precision",
[
(2, GradientBoostingClassifier, 1, "native"),
(2, GradientBoostingClassifier, 10, "native"),
(2, RandomForestClassifier, 1, "native"),
(5, RandomForestClassifier, 1, "native"),
(2, RandomForestClassifier, 10, "native"),
(5, RandomForestClassifier, 10, "native"),
(2, ExtraTreesClassifier, 1, "native"),
(2, ExtraTreesClassifier, 10, "native"),
(5, GradientBoostingClassifier, 1, "native"),
(5, GradientBoostingClassifier, 10, "native"),
(25, GradientBoostingClassifier, 1, "native"),
(25, RandomForestClassifier, 1, "native"),
(2, RandomForestClassifier, 10, "float32"),
(2, RandomForestClassifier, 10, "float64"),
(5, RandomForestClassifier, 10, "float32"),
(5, RandomForestClassifier, 10, "float64"),
],
)
def test_fil_skl_classification(
n_rows,
n_columns,
n_estimators,
max_depth,
n_classes,
storage_type,
precision,
model_class,
):
# settings
classification = True # change this to false to use regression
random_state = np.random.RandomState(43210)
X, y = simulate_data(
n_rows,
n_columns,
n_classes,
random_state=random_state,
classification=classification,
)
# identify shape and indices
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0
)
init_kwargs = {
"n_estimators": n_estimators,
"max_depth": max_depth,
}
if model_class in [RandomForestClassifier, ExtraTreesClassifier]:
init_kwargs["max_features"] = 0.3
init_kwargs["n_jobs"] = -1
else:
# model_class == GradientBoostingClassifier
init_kwargs["init"] = "zero"
skl_model = model_class(**init_kwargs, random_state=random_state)
skl_model.fit(X_train, y_train)
skl_preds = skl_model.predict(X_validation)
skl_preds_int = np.around(skl_preds)
skl_proba = skl_model.predict_proba(X_validation)
skl_acc = accuracy_score(y_validation, skl_preds_int)
algo = "NAIVE" if storage_type else "BATCH_TREE_REORG"
fm = ForestInference.load_from_sklearn(
skl_model,
algo=algo,
output_class=True,
threshold=0.50,
storage_type=storage_type,
precision=precision,
)
fil_preds = np.asarray(fm.predict(X_validation))
fil_preds = np.reshape(fil_preds, np.shape(skl_preds_int))
fil_acc = accuracy_score(y_validation, fil_preds)
# fil_acc is within p99 error bars of skl_acc (diff == 0.017 +- 0.012)
# however, some tests have a delta as big as 0.04.
# sklearn uses float64 thresholds, while FIL uses float32
# TODO(levsnv): once FIL supports float64 accuracy, revisit thresholds
threshold = 1e-5 if n_classes == 2 else 0.1
assert fil_acc == pytest.approx(skl_acc, abs=threshold)
if n_classes == 2:
assert array_equal(fil_preds, skl_preds_int)
fil_proba = np.asarray(fm.predict_proba(X_validation))
fil_proba = np.reshape(fil_proba, np.shape(skl_proba))
np.testing.assert_allclose(
fil_proba, skl_proba, atol=proba_atol[n_classes > 2]
)
@pytest.mark.parametrize("n_rows", [1000])
@pytest.mark.parametrize("n_columns", [20])
@pytest.mark.parametrize(
"n_classes,model_class,n_estimators",
[
(1, GradientBoostingRegressor, 1),
(1, GradientBoostingRegressor, 10),
(1, RandomForestRegressor, 1),
(1, RandomForestRegressor, 10),
(5, RandomForestRegressor, 1),
(5, RandomForestRegressor, 10),
(1, ExtraTreesRegressor, 1),
(1, ExtraTreesRegressor, 10),
(5, GradientBoostingRegressor, 10),
],
)
@pytest.mark.parametrize("max_depth", [2, 10, 20])
@pytest.mark.parametrize("storage_type", [False, True])
@pytest.mark.skip("https://github.com/rapidsai/cuml/issues/5138")
def test_fil_skl_regression(
n_rows,
n_columns,
n_classes,
model_class,
n_estimators,
max_depth,
storage_type,
):
# skip depth 20 for dense tests
if max_depth == 20 and not storage_type:
return
# settings
random_state = np.random.RandomState(43210)
X, y = simulate_data(
n_rows,
n_columns,
n_classes,
random_state=random_state,
classification=False,
)
# identify shape and indices
train_size = 0.80
X_train, X_validation, y_train, y_validation = train_test_split(
X, y, train_size=train_size, random_state=0
)
init_kwargs = {
"n_estimators": n_estimators,
"max_depth": max_depth,
}
if model_class in [RandomForestRegressor, ExtraTreesRegressor]:
init_kwargs["max_features"] = 0.3
init_kwargs["n_jobs"] = -1
else:
# model_class == GradientBoostingRegressor
init_kwargs["init"] = "zero"
skl_model = model_class(**init_kwargs)
skl_model.fit(X_train, y_train)
skl_preds = skl_model.predict(X_validation)
skl_mse = mean_squared_error(y_validation, skl_preds)
algo = "NAIVE" if storage_type else "BATCH_TREE_REORG"
fm = ForestInference.load_from_sklearn(
skl_model, algo=algo, output_class=False, storage_type=storage_type
)
fil_preds = np.asarray(fm.predict(X_validation))
fil_preds = np.reshape(fil_preds, np.shape(skl_preds))
fil_mse = mean_squared_error(y_validation, fil_preds)
# NOTE(wphicks): Tolerance has been temporarily increased from 1.e-6/1e-4
# to 1e-4/1e-2. This is too high of a tolerance for this test, but we will
# use it to unblock CI while investigating the underlying issue.
# https://github.com/rapidsai/cuml/issues/5138
assert fil_mse <= skl_mse * (1.0 + 1e-4) + 1e-2
# NOTE(wphicks): Tolerance has been temporarily increased from 1.2e-3 to
# 1.2e-2. This test began failing CI due to the previous tolerance more
# regularly, and while the root cause is under investigation
# (https://github.com/rapidsai/cuml/issues/5138), the tolerance has simply
# been reduced. Combined with the above assertion, this is still a very
# reasonable threshold.
assert np.allclose(fil_preds, skl_preds, 1.2e-2)
@pytest.fixture(scope="session", params=["binary", "json"])
def small_classifier_and_preds(tmpdir_factory, request):
X, y = simulate_data(500, 10, random_state=43210, classification=True)
ext = "json" if request.param == "json" else "model"
model_type = "xgboost_json" if request.param == "json" else "xgboost"
model_path = str(
tmpdir_factory.mktemp("models").join(f"small_class.{ext}")
)
bst = _build_and_save_xgboost(model_path, X, y)
# just do within-sample since it's not an accuracy test
dtrain = xgb.DMatrix(X, label=y)
xgb_preds = bst.predict(dtrain)
return (model_path, model_type, X, xgb_preds)
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
@pytest.mark.parametrize(
"algo",
[
"AUTO",
"NAIVE",
"TREE_REORG",
"BATCH_TREE_REORG",
"auto",
"naive",
"tree_reorg",
"batch_tree_reorg",
],
)
def test_output_algos(algo, small_classifier_and_preds):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(
model_path,
model_type=model_type,
algo=algo,
output_class=True,
threshold=0.50,
)
xgb_preds_int = np.around(xgb_preds)
fil_preds = np.asarray(fm.predict(X))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds_int))
assert np.allclose(fil_preds, xgb_preds_int, 1e-3)
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
@pytest.mark.parametrize("precision", ["native", "float32", "float64"])
def test_precision_xgboost(precision, small_classifier_and_preds):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(
model_path,
model_type=model_type,
output_class=True,
threshold=0.50,
precision=precision,
)
xgb_preds_int = np.around(xgb_preds)
fil_preds = np.asarray(fm.predict(X))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds_int))
assert np.allclose(fil_preds, xgb_preds_int, 1e-3)
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
@pytest.mark.parametrize(
"storage_type", [False, True, "auto", "dense", "sparse", "sparse8"]
)
def test_output_storage_type(storage_type, small_classifier_and_preds):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(
model_path,
model_type=model_type,
output_class=True,
storage_type=storage_type,
threshold=0.50,
)
xgb_preds_int = np.around(xgb_preds)
fil_preds = np.asarray(fm.predict(X))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds_int))
assert np.allclose(fil_preds, xgb_preds_int, 1e-3)
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
@pytest.mark.parametrize("storage_type", ["dense", "sparse"])
@pytest.mark.parametrize("blocks_per_sm", [1, 2, 3, 4])
def test_output_blocks_per_sm(
storage_type, blocks_per_sm, small_classifier_and_preds
):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(
model_path,
model_type=model_type,
output_class=True,
storage_type=storage_type,
threshold=0.50,
blocks_per_sm=blocks_per_sm,
)
xgb_preds_int = np.around(xgb_preds)
fil_preds = np.asarray(fm.predict(X))
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds_int))
assert np.allclose(fil_preds, xgb_preds_int, 1e-3)
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
@pytest.mark.parametrize("threads_per_tree", [2, 4, 8, 16, 32, 64, 128, 256])
def test_threads_per_tree(threads_per_tree, small_classifier_and_preds):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(
model_path,
model_type=model_type,
output_class=True,
storage_type="auto",
threshold=0.50,
threads_per_tree=threads_per_tree,
n_items=1,
)
fil_preds = np.asarray(fm.predict(X))
fil_proba = np.asarray(fm.predict_proba(X))
xgb_proba = np.stack([1 - xgb_preds, xgb_preds], axis=1)
np.testing.assert_allclose(fil_proba, xgb_proba, atol=proba_atol[False])
xgb_preds_int = np.around(xgb_preds)
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds_int))
assert np.allclose(fil_preds, xgb_preds_int, 1e-3)
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
def test_print_forest_shape(small_classifier_and_preds):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
m = ForestInference.load(
model_path,
model_type=model_type,
output_class=True,
compute_shape_str=True,
)
for substr in [
"model size",
" MB",
"Depth histogram:",
"Leaf depth",
"Depth histogram fingerprint",
"Avg nodes per tree",
]:
assert substr in m.shape_str
@pytest.mark.parametrize("output_class", [True, False])
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
def test_thresholding(output_class, small_classifier_and_preds):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(
model_path,
model_type=model_type,
algo="TREE_REORG",
output_class=output_class,
threshold=0.50,
)
fil_preds = np.asarray(fm.predict(X))
if output_class:
assert ((fil_preds != 0.0) & (fil_preds != 1.0)).sum() == 0
else:
assert ((fil_preds != 0.0) & (fil_preds != 1.0)).sum() > 0
@pytest.mark.skipif(has_xgboost() is False, reason="need to install xgboost")
def test_output_args(small_classifier_and_preds):
model_path, model_type, X, xgb_preds = small_classifier_and_preds
fm = ForestInference.load(
model_path,
model_type=model_type,
algo="TREE_REORG",
output_class=False,
threshold=0.50,
)
X = np.asarray(X)
fil_preds = fm.predict(X)
fil_preds = np.reshape(fil_preds, np.shape(xgb_preds))
assert array_equal(fil_preds, xgb_preds, 1e-3)
def to_categorical(features, n_categorical, invalid_frac, random_state):
"""returns data in two formats: pandas (for LightGBM) and numpy (for FIL)
LightGBM needs a DataFrame to recognize and fit on categorical columns.
Second fp32 output is to test invalid categories for prediction only.
"""
features = features.copy() # avoid clobbering source matrix
rng = np.random.default_rng(hash(random_state)) # allow RandomState object
# the main bottleneck (>80%) of to_categorical() is the pandas operations
n_features = features.shape[1]
# all categorical columns
cat_cols = features[:, :n_categorical]
# axis=1 means 0th dimension remains. Row-major FIL means 0th dimension is
# the number of columns. We reduce within columns, across rows.
cat_cols = cat_cols - cat_cols.min(axis=0, keepdims=True) # range [0, ?]
cat_cols /= cat_cols.max(axis=0, keepdims=True) # range [0, 1]
rough_n_categories = 100
# round into rough_n_categories bins
cat_cols = (cat_cols * rough_n_categories).astype(int)
# mix categorical and numerical columns
new_col_idx = rng.choice(
n_features, n_features, replace=False, shuffle=True
)
df_cols = {}
for icol in range(n_categorical):
col = cat_cols[:, icol]
df_cols[new_col_idx[icol]] = pd.Series(
pd.Categorical(col, categories=np.unique(col))
)
# all numerical columns
for icol in range(n_categorical, n_features):
df_cols[new_col_idx[icol]] = pd.Series(features[:, icol])
fit_df = pd.DataFrame(df_cols)
# randomly inject invalid categories only into predict_matrix
invalid_idx = rng.choice(
a=cat_cols.size,
size=ceil(cat_cols.size * invalid_frac),
replace=False,
shuffle=False,
)
cat_cols.flat[invalid_idx] += rough_n_categories
# mix categorical and numerical columns
predict_matrix = np.concatenate(
[cat_cols, features[:, n_categorical:]], axis=1
)
predict_matrix[:, new_col_idx] = predict_matrix
return fit_df, predict_matrix
@pytest.mark.parametrize("num_classes", [2, 5])
@pytest.mark.parametrize("n_categorical", [0, 5])
@pytest.mark.skip(reason="Causing CI to hang.")
# @pytest.mark.skipif(has_lightgbm() is False,
# reason="need to install lightgbm")
def test_lightgbm(tmp_path, num_classes, n_categorical):
import lightgbm as lgb
if n_categorical > 0:
n_features = 10
n_rows = 1000
n_informative = n_features
else:
n_features = 10 if num_classes == 2 else 50
n_rows = 500
n_informative = "auto"
X, y = simulate_data(
n_rows,
n_features,
num_classes,
n_informative=n_informative,
random_state=43210,
classification=True,
)
if n_categorical > 0:
X_fit, X_predict = to_categorical(
X,
n_categorical=n_categorical,
invalid_frac=0.1,
random_state=43210,
)
else:
X_fit, X_predict = X, X
train_data = lgb.Dataset(X_fit, label=y)
num_round = 5
model_path = str(os.path.join(tmp_path, "lgb.model"))
if num_classes == 2:
param = {
"objective": "binary",
"metric": "binary_logloss",
"num_class": 1,
}
bst = lgb.train(param, train_data, num_round)
bst.save_model(model_path)
fm = ForestInference.load(
model_path,
algo="TREE_REORG",
output_class=True,
model_type="lightgbm",
)
# binary classification
gbm_proba = bst.predict(X_predict)
fil_proba = fm.predict_proba(X_predict)[:, 1]
gbm_preds = (gbm_proba > 0.5).astype(float)
fil_preds = fm.predict(X_predict)
assert array_equal(gbm_preds, fil_preds)
np.testing.assert_allclose(
gbm_proba, fil_proba, atol=proba_atol[num_classes > 2]
)
else:
# multi-class classification
lgm = lgb.LGBMClassifier(
objective="multiclass",
boosting_type="gbdt",
n_estimators=num_round,
)
lgm.fit(X_fit, y)
lgm.booster_.save_model(model_path)
lgm_preds = lgm.predict(X_predict).astype(int)
fm = ForestInference.load(
model_path,
algo="TREE_REORG",
output_class=True,
model_type="lightgbm",
)
assert array_equal(
lgm.booster_.predict(X_predict).argmax(axis=1), lgm_preds
)
assert array_equal(lgm_preds, fm.predict(X_predict))
# lightgbm uses float64 thresholds, while FIL uses float32
np.testing.assert_allclose(
lgm.predict_proba(X_predict),
fm.predict_proba(X_predict),
atol=proba_atol[num_classes > 2],
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_kneighbors_classifier.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.utils import array_equal
from cuml.internals.safe_imports import cpu_only_import
from sklearn.datasets import make_blobs
from sklearn.neighbors import KNeighborsClassifier as skKNN
from cuml.neighbors import KNeighborsClassifier as cuKNN
import cuml
import pytest
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
cp = gpu_only_import("cupy")
def _build_train_test_data(X, y, datatype, train_ratio=0.9):
train_selection = np.random.RandomState(42).choice(
[True, False],
X.shape[0],
replace=True,
p=[train_ratio, 1.0 - train_ratio],
)
X_train = X[train_selection]
y_train = y[train_selection]
X_test = X[~train_selection]
y_test = y[~train_selection]
if datatype == "dataframe":
X_train = cudf.DataFrame(X_train)
y_train = cudf.DataFrame(y_train.reshape(y_train.shape[0], 1))
X_test = cudf.DataFrame(X_test)
y_test = cudf.DataFrame(y_test.reshape(y_test.shape[0], 1))
return X_train, X_test, y_train, y_test
@pytest.mark.parametrize("datatype", ["dataframe", "numpy"])
@pytest.mark.parametrize("nrows", [1000, 20000])
@pytest.mark.parametrize("ncols", [50, 100])
@pytest.mark.parametrize("n_neighbors", [2, 5, 10])
@pytest.mark.parametrize("n_clusters", [2, 5, 10])
def test_neighborhood_predictions(
nrows, ncols, n_neighbors, n_clusters, datatype
):
X, y = make_blobs(
n_samples=nrows,
centers=n_clusters,
n_features=ncols,
cluster_std=0.01,
random_state=0,
)
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = _build_train_test_data(X, y, datatype)
knn_cu = cuKNN(n_neighbors=n_neighbors)
knn_cu.fit(X_train, y_train)
predictions = knn_cu.predict(X_test)
if datatype == "dataframe":
assert isinstance(predictions, cudf.Series)
assert array_equal(
predictions.to_frame().astype(np.int32), y_test.astype(np.int32)
)
else:
assert isinstance(predictions, np.ndarray)
assert array_equal(
predictions.astype(np.int32), y_test.astype(np.int32)
)
@pytest.mark.parametrize("datatype", ["dataframe", "numpy"])
@pytest.mark.parametrize("nrows", [1000, 20000])
@pytest.mark.parametrize("ncols", [50, 100])
@pytest.mark.parametrize("n_neighbors", [2, 5, 10])
@pytest.mark.parametrize("n_clusters", [2, 5, 10])
def test_score(nrows, ncols, n_neighbors, n_clusters, datatype):
X, y = make_blobs(
n_samples=nrows,
centers=n_clusters,
n_features=ncols,
random_state=0,
cluster_std=0.01,
)
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = _build_train_test_data(X, y, datatype)
knn_cu = cuKNN(n_neighbors=n_neighbors)
knn_cu.fit(X_train, y_train)
assert knn_cu.score(X_test, y_test) >= (1.0 - 0.004)
@pytest.mark.parametrize("datatype", ["dataframe", "numpy"])
@pytest.mark.parametrize("nrows", [1000, 20000])
@pytest.mark.parametrize("ncols", [50, 100])
@pytest.mark.parametrize("n_neighbors", [2, 5, 10])
@pytest.mark.parametrize("n_clusters", [2, 5, 10])
def test_predict_proba(nrows, ncols, n_neighbors, n_clusters, datatype):
X, y = make_blobs(
n_samples=nrows,
centers=n_clusters,
n_features=ncols,
cluster_std=0.01,
random_state=0,
)
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = _build_train_test_data(X, y, datatype)
knn_cu = cuKNN(n_neighbors=n_neighbors)
knn_cu.fit(X_train, y_train)
predictions = knn_cu.predict_proba(X_test)
if datatype == "dataframe":
assert isinstance(predictions, cudf.DataFrame)
predictions = predictions.to_numpy()
y_test = y_test.to_numpy().reshape(y_test.shape[0])
else:
assert isinstance(predictions, np.ndarray)
y_hat = np.argmax(predictions, axis=1)
assert array_equal(y_hat.astype(np.int32), y_test.astype(np.int32))
assert array_equal(predictions.sum(axis=1), np.ones(y_test.shape[0]))
@pytest.mark.parametrize("datatype", ["dataframe", "numpy"])
def test_predict_proba_large_n_classes(datatype):
nrows = 10000
ncols = 100
n_neighbors = 10
n_clusters = 10000
X, y = make_blobs(
n_samples=nrows,
centers=n_clusters,
n_features=ncols,
cluster_std=0.01,
random_state=0,
)
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = _build_train_test_data(X, y, datatype)
knn_cu = cuKNN(n_neighbors=n_neighbors)
knn_cu.fit(X_train, y_train)
predictions = knn_cu.predict_proba(X_test)
if datatype == "dataframe":
predictions = predictions.to_numpy()
assert np.rint(np.sum(predictions)) == len(y_test)
@pytest.mark.parametrize("datatype", ["dataframe", "numpy"])
def test_predict_large_n_classes(datatype):
nrows = 10000
ncols = 100
n_neighbors = 2
n_clusters = 1000
X, y = make_blobs(
n_samples=nrows,
centers=n_clusters,
n_features=ncols,
cluster_std=0.01,
random_state=0,
)
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = _build_train_test_data(X, y, datatype)
knn_cu = cuKNN(n_neighbors=n_neighbors)
knn_cu.fit(X_train, y_train)
y_hat = knn_cu.predict(X_test)
if datatype == "dataframe":
y_hat = y_hat.to_numpy()
y_test = y_test.to_numpy().ravel()
assert array_equal(y_hat.astype(np.int32), y_test.astype(np.int32))
@pytest.mark.parametrize("n_samples", [100])
@pytest.mark.parametrize("n_features", [40])
@pytest.mark.parametrize("n_neighbors", [4])
@pytest.mark.parametrize("n_query", [100])
def test_predict_non_gaussian(n_samples, n_features, n_neighbors, n_query):
np.random.seed(123)
X_host_train = pd.DataFrame(
np.random.uniform(0, 1, (n_samples, n_features))
)
y_host_train = pd.DataFrame(np.random.randint(0, 5, (n_samples, 1)))
X_host_test = pd.DataFrame(np.random.uniform(0, 1, (n_query, n_features)))
X_device_train = cudf.DataFrame.from_pandas(X_host_train)
y_device_train = cudf.DataFrame.from_pandas(y_host_train)
X_device_test = cudf.DataFrame.from_pandas(X_host_test)
knn_sk = skKNN(algorithm="brute", n_neighbors=n_neighbors, n_jobs=1)
knn_sk.fit(X_host_train, y_host_train.values.ravel())
sk_result = knn_sk.predict(X_host_test)
knn_cuml = cuKNN(n_neighbors=n_neighbors)
knn_cuml.fit(X_device_train, y_device_train)
with cuml.using_output_type("numpy"):
cuml_result = knn_cuml.predict(X_device_test)
assert np.array_equal(cuml_result, sk_result)
@pytest.mark.parametrize("n_classes", [2, 5])
@pytest.mark.parametrize("n_rows", [1000])
@pytest.mark.parametrize("n_cols", [25, 50])
@pytest.mark.parametrize("n_neighbors", [3, 5])
@pytest.mark.parametrize("datatype", ["numpy", "dataframe"])
def test_nonmonotonic_labels(n_classes, n_rows, n_cols, datatype, n_neighbors):
X, y = make_blobs(
n_samples=n_rows,
centers=n_classes,
n_features=n_cols,
cluster_std=0.01,
random_state=0,
)
X = X.astype(np.float32)
# Draw labels from non-monotonically increasing set
classes = np.arange(0, n_classes * 5, 5)
for i in range(n_classes):
y[y == i] = classes[i]
X_train, X_test, y_train, y_test = _build_train_test_data(X, y, datatype)
knn_cu = cuKNN(n_neighbors=n_neighbors)
knn_cu.fit(X_train, y_train)
p = knn_cu.predict(X_test)
if datatype == "dataframe":
assert isinstance(p, cudf.Series)
p = p.to_frame().to_numpy().reshape(p.shape[0])
y_test = y_test.to_numpy().reshape(y_test.shape[0])
assert array_equal(p.astype(np.int32), y_test.astype(np.int32))
@pytest.mark.parametrize("input_type", ["cudf", "numpy", "cupy"])
@pytest.mark.parametrize("output_type", ["cudf", "numpy", "cupy"])
def test_predict_multioutput(input_type, output_type):
X = np.array([[0, 0, 1, 0], [1, 0, 1, 0]]).astype(np.float32)
y = np.array([[15, 2], [5, 4]]).astype(np.int32)
if input_type == "cudf":
X = cudf.DataFrame(X)
y = cudf.DataFrame(y)
elif input_type == "cupy":
X = cp.asarray(X)
y = cp.asarray(y)
knn_cu = cuKNN(n_neighbors=1, output_type=output_type)
knn_cu.fit(X, y)
p = knn_cu.predict(X)
if output_type == "cudf":
assert isinstance(p, cudf.DataFrame)
elif output_type == "numpy":
assert isinstance(p, np.ndarray)
elif output_type == "cupy":
assert isinstance(p, cp.ndarray)
assert array_equal(p.astype(np.int32), y)
@pytest.mark.parametrize("input_type", ["cudf", "numpy", "cupy"])
@pytest.mark.parametrize("output_type", ["cudf", "numpy", "cupy"])
def test_predict_proba_multioutput(input_type, output_type):
X = np.array([[0, 0, 1, 0], [1, 0, 1, 0]]).astype(np.float32)
y = np.array([[15, 2], [5, 4]]).astype(np.int32)
if input_type == "cudf":
X = cudf.DataFrame(X)
y = cudf.DataFrame(y)
elif input_type == "cupy":
X = cp.asarray(X)
y = cp.asarray(y)
expected = (
np.array([[0.0, 1.0], [1.0, 0.0]]).astype(np.float32),
np.array([[1.0, 0.0], [0.0, 1.0]]).astype(np.float32),
)
knn_cu = cuKNN(n_neighbors=1, output_type=output_type)
knn_cu.fit(X, y)
p = knn_cu.predict_proba(X)
assert isinstance(p, tuple)
for i in p:
if output_type == "cudf":
assert isinstance(i, cudf.DataFrame)
elif output_type == "numpy":
assert isinstance(i, np.ndarray)
elif output_type == "cupy":
assert isinstance(i, cp.ndarray)
assert array_equal(p[0].astype(np.float32), expected[0])
assert array_equal(p[1].astype(np.float32), expected[1])
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_kernel_ridge.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.utils import as_type
from hypothesis.extra.numpy import arrays
from hypothesis import given, settings, assume, strategies as st
from sklearn.kernel_ridge import KernelRidge as sklKernelRidge
import inspect
import math
import pytest
from sklearn.metrics.pairwise import pairwise_kernels as skl_pairwise_kernels
from cuml.metrics import pairwise_kernels, PAIRWISE_KERNEL_FUNCTIONS
from cuml import KernelRidge as cuKernelRidge
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
linalg = gpu_only_import_from("cupy", "linalg")
np = cpu_only_import("numpy")
cuda = gpu_only_import_from("numba", "cuda")
def gradient_norm(model, X, y, K, sw=None):
if sw is None:
sw = cp.ones(X.shape[0])
else:
sw = cp.atleast_1d(cp.array(sw, dtype=np.float64))
X = cp.array(X, dtype=np.float64)
y = cp.array(y, dtype=np.float64)
K = cp.array(K, dtype=np.float64)
betas = cp.array(
as_type("cupy", model.dual_coef_), dtype=np.float64
).reshape(y.shape)
# initialise to NaN in case below loop has 0 iterations
grads = cp.full_like(y, np.NAN)
for i, (beta, target, current_alpha) in enumerate(
zip(betas.T, y.T, model.alpha)
):
grads[:, i] = 0.0
grads[:, i] = -cp.dot(K * sw, target)
grads[:, i] += cp.dot(cp.dot(K * sw, K), beta)
grads[:, i] += cp.dot(K * current_alpha, beta)
return linalg.norm(grads)
def test_pairwise_kernels_basic():
X = np.zeros((4, 4))
# standard kernel with no argument
pairwise_kernels(X, metric="chi2")
pairwise_kernels(X, metric="linear")
# standard kernel with correct kwd argument
pairwise_kernels(X, metric="chi2", gamma=1.0)
# standard kernel with incorrect kwd argument
with pytest.raises(
ValueError, match="kwds contains arguments not used by kernel function"
):
pairwise_kernels(X, metric="linear", wrong_parameter_name=1.0)
# standard kernel with filtered kwd argument
pairwise_kernels(
X, metric="rbf", filter_params=True, wrong_parameter_name=1.0
)
# incorrect function type
def non_numba_kernel(x, y):
return x.dot(y)
with pytest.raises(
TypeError, match="Kernel function should be a numba device function."
):
pairwise_kernels(X, metric=non_numba_kernel)
# correct function type
@cuda.jit(device=True)
def numba_kernel(x, y, special_argument=3.0):
return 1 + 2
pairwise_kernels(X, metric=numba_kernel)
pairwise_kernels(X, metric=numba_kernel, special_argument=1.0)
# malformed function
@cuda.jit(device=True)
def bad_numba_kernel(x):
return 1 + 2
with pytest.raises(
ValueError, match="Expected at least two arguments to kernel function."
):
pairwise_kernels(X, metric=bad_numba_kernel)
# malformed function 2 - No default value
@cuda.jit(device=True)
def bad_numba_kernel2(x, y, z):
return 1 + 2
with pytest.raises(
ValueError,
match="Extra kernel parameters "
"must be passed as keyword arguments.",
):
pairwise_kernels(X, metric=bad_numba_kernel2)
# Precomputed
assert np.allclose(X, pairwise_kernels(X, metric="precomputed"))
@cuda.jit(device=True)
def custom_kernel(x, y, custom_arg=5.0):
sum = 0.0
for i in range(len(x)):
sum += (x[i] - y[i]) ** 2
return math.exp(-custom_arg * sum) + 0.1
test_kernels = sorted(PAIRWISE_KERNEL_FUNCTIONS.keys()) + [custom_kernel]
@st.composite
def kernel_arg_strategy(draw):
kernel = draw(st.sampled_from(test_kernels))
kernel_func = (
PAIRWISE_KERNEL_FUNCTIONS[kernel]
if isinstance(kernel, str)
else kernel
)
# Inspect the function and generate some arguments
py_func = (
kernel_func.py_func if hasattr(kernel_func, "py_func") else kernel_func
)
all_func_kwargs = list(inspect.signature(py_func).parameters.values())[2:]
param = {}
for arg in all_func_kwargs:
# 50% chance we generate this parameter or leave it as default
if draw(st.booleans()):
continue
if isinstance(arg.default, float) or arg.default is None:
param[arg.name] = draw(st.floats(0.0, 5.0))
if isinstance(arg.default, int):
param[arg.name] = draw(st.integers(1, 5))
return (kernel, param)
@st.composite
def array_strategy(draw):
X_m = draw(st.integers(1, 20))
X_n = draw(st.integers(1, 10))
dtype = draw(st.sampled_from([np.float64, np.float32]))
X = draw(
arrays(
dtype=dtype,
shape=(X_m, X_n),
elements=st.floats(0, 5, width=32),
)
)
if draw(st.booleans()):
Y_m = draw(st.integers(1, 20))
Y = draw(
arrays(
dtype=dtype,
shape=(Y_m, X_n),
elements=st.floats(0, 5, width=32),
)
)
else:
Y = None
type = draw(st.sampled_from(["numpy", "cupy", "cudf", "pandas"]))
if type == "cudf":
assume(X_m > 1)
if Y is not None:
assume(Y_m > 1)
return as_type(type, X, Y)
@given(kernel_arg_strategy(), array_strategy())
@settings(deadline=None)
@pytest.mark.skip("https://github.com/rapidsai/cuml/issues/5177")
def test_pairwise_kernels(kernel_arg, XY):
X, Y = XY
kernel, args = kernel_arg
if kernel == "cosine":
# this kernel is very unstable for both sklearn/cuml
assume(as_type("numpy", X).min() > 0.1)
if Y is not None:
assume(as_type("numpy", Y).min() > 0.1)
K = pairwise_kernels(X, Y, metric=kernel, **args)
skl_kernel = kernel.py_func if hasattr(kernel, "py_func") else kernel
K_sklearn = skl_pairwise_kernels(
*as_type("numpy", X, Y), metric=skl_kernel, **args
)
assert np.allclose(as_type("numpy", K), K_sklearn, atol=0.01, rtol=0.01)
@st.composite
def estimator_array_strategy(draw):
X_m = draw(st.integers(5, 20))
X_n = draw(st.integers(2, 10))
dtype = draw(st.sampled_from([np.float64, np.float32]))
rs = np.random.RandomState(draw(st.integers(1, 10)))
X = rs.rand(X_m, X_n).astype(dtype)
X_test = rs.rand(draw(st.integers(5, 20)), X_n).astype(dtype)
n_targets = draw(st.integers(1, 3))
a = draw(
arrays(
dtype=dtype,
shape=(X_n, n_targets),
elements=st.floats(0, 5, width=32),
)
)
y = X.dot(a)
alpha = draw(
arrays(
dtype=dtype,
shape=(n_targets),
elements=st.floats(0.0010000000474974513, 5, width=32),
)
)
sample_weight = draw(
st.one_of(
[
st.just(None),
st.floats(0.1, 1.5),
arrays(
dtype=np.float64, shape=X_m, elements=st.floats(0.1, 5)
),
]
)
)
type = draw(st.sampled_from(["numpy", "cupy", "cudf", "pandas"]))
return (*as_type(type, X, y, X_test, alpha, sample_weight), dtype)
@given(
kernel_arg_strategy(),
estimator_array_strategy(),
st.floats(1.0, 5.0),
st.integers(1, 5),
st.floats(1.0, 5.0),
)
@settings(deadline=None)
def test_estimator(kernel_arg, arrays, gamma, degree, coef0):
kernel, args = kernel_arg
X, y, X_test, alpha, sample_weight, dtype = arrays
model = cuKernelRidge(
kernel=kernel,
alpha=alpha,
gamma=gamma,
degree=degree,
coef0=coef0,
kernel_params=args,
)
skl_kernel = kernel.py_func if hasattr(kernel, "py_func") else kernel
skl_model = sklKernelRidge(
kernel=skl_kernel,
alpha=as_type("numpy", alpha),
gamma=gamma,
degree=degree,
coef0=coef0,
kernel_params=args,
)
if kernel == "chi2" or kernel == "additive_chi2":
# X must be positive
X = (X - as_type("numpy", X).min()) + 1.0
model.fit(X, y, sample_weight)
pred = model.predict(X_test).get()
if dtype == np.float64:
# For a convex optimisation problem we should arrive at gradient norm 0
# If the solution has converged correctly
K = model._get_kernel(X)
grad_norm = gradient_norm(
model, *as_type("cupy", X, y, K, sample_weight)
)
assert grad_norm < 0.1
try:
skl_model.fit(*as_type("numpy", X, y, sample_weight))
except np.linalg.LinAlgError:
# sklearn can fail to fit multiclass models
# with singular kernel matrices
assume(False)
skl_pred = skl_model.predict(as_type("numpy", X_test))
assert np.allclose(
as_type("numpy", pred), skl_pred, atol=1e-2, rtol=1e-2
)
def test_precomputed():
rs = np.random.RandomState(23)
X = rs.normal(size=(10, 10))
y = rs.normal(size=10)
K = pairwise_kernels(X)
precomputed_model = cuKernelRidge(kernel="precomputed")
precomputed_model.fit(K, y)
model = cuKernelRidge()
model.fit(X, y)
assert np.allclose(precomputed_model.dual_coef_, model.dual_coef_)
assert np.allclose(
precomputed_model.predict(K), model.predict(X), atol=1e-5, rtol=1e-5
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_prims.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import cpu_only_import
from cuml.prims.label import make_monotonic
from cuml.prims.label import invert_labels
from cuml.prims.label import check_labels
from cuml.testing.utils import array_equal
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
@pytest.mark.parametrize("arr_type", ["np", "cp"])
@pytest.mark.parametrize("dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("copy", [True, False])
def test_monotonic_validate_invert_labels(arr_type, dtype, copy):
arr = np.array([0, 15, 10, 50, 20, 50], dtype=dtype)
original = arr.copy()
if arr_type == "cp":
arr = cp.asarray(arr, dtype=dtype)
arr_orig = arr.copy()
monotonic, mapped_classes = make_monotonic(arr, copy=copy)
cp.cuda.Stream.null.synchronize()
assert array_equal(monotonic, np.array([0, 2, 1, 4, 3, 4]))
# We only care about in-place updating if data is on device
if arr_type == "cp":
if copy:
assert array_equal(arr_orig, arr)
else:
assert array_equal(arr, monotonic)
wrong_classes = cp.asarray([0, 1, 2], dtype=dtype)
val_labels = check_labels(monotonic, classes=wrong_classes)
cp.cuda.Stream.null.synchronize()
assert not val_labels
correct_classes = cp.asarray([0, 1, 2, 3, 4], dtype=dtype)
val_labels = check_labels(monotonic, classes=correct_classes)
cp.cuda.Stream.null.synchronize()
assert val_labels
if arr_type == "cp":
monotonic_copy = monotonic.copy()
inverted = invert_labels(
monotonic,
classes=cp.asarray([0, 10, 15, 20, 50], dtype=dtype),
copy=copy,
)
cp.cuda.Stream.null.synchronize()
if arr_type == "cp":
if copy:
assert array_equal(monotonic_copy, monotonic)
else:
assert array_equal(monotonic, arr_orig)
assert array_equal(inverted, original)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_nearest_neighbors.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gc
from cuml.common import has_scipy
import cuml
import sklearn
from cuml.internals.safe_imports import cpu_only_import_from
from numpy.testing import assert_array_equal, assert_allclose
from cuml.internals.safe_imports import cpu_only_import
import pytest
import math
from cuml.testing.utils import (
array_equal,
unit_param,
quality_param,
stress_param,
)
from cuml.neighbors import NearestNeighbors as cuKNN
from sklearn.neighbors import NearestNeighbors as skKNN
from cuml.datasets import make_blobs
from sklearn.metrics import pairwise_distances
from cuml.metrics import pairwise_distances as cuPW
from cuml.internals import logger
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
cudf = gpu_only_import("cudf")
pd = cpu_only_import("pandas")
np = cpu_only_import("numpy")
isspmatrix_csr = cpu_only_import_from("scipy.sparse", "isspmatrix_csr")
pytestmark = pytest.mark.filterwarnings(
"ignore:((.|\n)*)#4020((.|\n)*):" "UserWarning:cuml[.*]"
)
def predict(neigh_ind, _y, n_neighbors):
import scipy.stats as stats
neigh_ind = neigh_ind.astype(np.int32)
if isinstance(_y, cp.ndarray):
_y = _y.get()
if isinstance(neigh_ind, cp.ndarray):
neigh_ind = neigh_ind.get()
ypred, count = stats.mode(_y[neigh_ind], axis=1)
return ypred.ravel(), count.ravel() * 1.0 / n_neighbors
def valid_metrics(algo="brute", cuml_algo=None):
cuml_algo = algo if cuml_algo is None else cuml_algo
cuml_metrics = cuml.neighbors.VALID_METRICS[cuml_algo]
sklearn_metrics = sklearn.neighbors.VALID_METRICS[algo]
ret = [value for value in cuml_metrics if value in sklearn_metrics]
ret.sort()
ret.remove("haversine") # This is tested on its own
return ret
def valid_metrics_sparse(algo="brute", cuml_algo=None):
"""
The list of sparse prims in scikit-learn / scipy does not
include sparse inputs for all of the metrics we support in cuml
(even metrics which are implicitly sparse, such as jaccard and dice,
which accume boolean inputs). To maintain high test coverage for all
metrics supported by Scikit-learn, we take the union of both
dense and sparse metrics. This way, a sparse input can just be converted
to dense form for Scikit-learn.
"""
cuml_algo = algo if cuml_algo is None else cuml_algo
cuml_metrics = cuml.neighbors.VALID_METRICS_SPARSE[cuml_algo]
sklearn_metrics = set(sklearn.neighbors.VALID_METRICS_SPARSE[algo])
sklearn_metrics.update(sklearn.neighbors.VALID_METRICS[algo])
ret = [value for value in cuml_metrics if value in sklearn_metrics]
ret.sort()
return ret
def metric_p_combinations():
for metric in valid_metrics():
yield metric, 2
if metric in ("minkowski", "lp"):
yield metric, 3
@pytest.mark.parametrize("datatype", ["dataframe", "numpy"])
@pytest.mark.parametrize("metric_p", metric_p_combinations())
@pytest.mark.parametrize("nrows", [1000, stress_param(10000)])
@pytest.mark.skipif(
not has_scipy(),
reason="Skipping test_self_neighboring" " because Scipy is missing",
)
def test_self_neighboring(datatype, metric_p, nrows):
"""Test that searches using an indexed vector itself return sensible
results for that vector
For L2-derived metrics, this specifically exercises the slow high-precision
mode used to correct for approximation errors in L2 computation during NN
searches.
"""
ncols = 1000
n_clusters = 10
n_neighbors = 3
metric, p = metric_p
if not has_scipy():
pytest.skip(
"Skipping test_self_neighboring because " + "Scipy is missing"
)
X, y = make_blobs(
n_samples=nrows, centers=n_clusters, n_features=ncols, random_state=0
)
if datatype == "dataframe":
X = cudf.DataFrame(X)
knn_cu = cuKNN(metric=metric, n_neighbors=n_neighbors)
knn_cu.fit(X)
neigh_dist, neigh_ind = knn_cu.kneighbors(
X,
n_neighbors=n_neighbors,
return_distance=True,
two_pass_precision=True,
)
if datatype == "dataframe":
assert isinstance(neigh_ind, cudf.DataFrame)
neigh_ind = neigh_ind.to_numpy()
neigh_dist = neigh_dist.to_numpy()
else:
assert isinstance(neigh_ind, cp.ndarray)
neigh_ind = neigh_ind.get()
neigh_dist = neigh_dist.get()
neigh_ind = neigh_ind[:, 0]
neigh_dist = neigh_dist[:, 0]
assert_array_equal(
neigh_ind,
np.arange(0, neigh_dist.shape[0]),
)
assert_allclose(
neigh_dist,
np.zeros(neigh_dist.shape, dtype=neigh_dist.dtype),
atol=1e-4,
)
@pytest.mark.parametrize(
"nrows,ncols,n_neighbors,n_clusters",
[
(500, 128, 10, 2),
(4301, 128, 10, 2),
(1000, 128, 50, 2),
(2233, 1024, 2, 10),
stress_param(10000, 1024, 50, 10),
],
)
@pytest.mark.parametrize(
"algo,datatype",
[("brute", "dataframe"), ("ivfflat", "numpy"), ("ivfpq", "dataframe")],
)
def test_neighborhood_predictions(
nrows, ncols, n_neighbors, n_clusters, datatype, algo
):
if not has_scipy():
pytest.skip(
"Skipping test_neighborhood_predictions because "
+ "Scipy is missing"
)
X, y = make_blobs(
n_samples=nrows, centers=n_clusters, n_features=ncols, random_state=0
)
if datatype == "dataframe":
X = cudf.DataFrame(X)
knn_cu = cuKNN(algorithm=algo)
knn_cu.fit(X)
neigh_ind = knn_cu.kneighbors(
X, n_neighbors=n_neighbors, return_distance=False
)
del knn_cu
gc.collect()
if datatype == "dataframe":
assert isinstance(neigh_ind, cudf.DataFrame)
neigh_ind = neigh_ind.to_numpy()
else:
assert isinstance(neigh_ind, cp.ndarray)
labels, probs = predict(neigh_ind, y, n_neighbors)
assert array_equal(labels, y)
@pytest.mark.parametrize(
"nlist,nrows,ncols,n_neighbors",
[
(4, 10000, 128, 8),
(8, 100, 512, 8),
(8, 10000, 512, 16),
],
)
def test_ivfflat_pred(nrows, ncols, n_neighbors, nlist):
algo_params = {"nlist": nlist, "nprobe": nlist * 0.5}
X, y = make_blobs(
n_samples=nrows, centers=5, n_features=ncols, random_state=0
)
knn_cu = cuKNN(algorithm="ivfflat", algo_params=algo_params)
knn_cu.fit(X)
neigh_ind = knn_cu.kneighbors(
X, n_neighbors=n_neighbors, return_distance=False
)
del knn_cu
gc.collect()
labels, probs = predict(neigh_ind, y, n_neighbors)
assert array_equal(labels, y)
@pytest.mark.parametrize("nlist", [8])
@pytest.mark.parametrize("M", [32])
@pytest.mark.parametrize("n_bits", [4])
@pytest.mark.parametrize("usePrecomputedTables", [False, True])
@pytest.mark.parametrize("nrows", [4000])
@pytest.mark.parametrize("ncols", [64, 512])
@pytest.mark.parametrize("n_neighbors", [8])
def test_ivfpq_pred(
nrows, ncols, n_neighbors, nlist, M, n_bits, usePrecomputedTables
):
if ncols == 512:
pytest.skip("https://github.com/rapidsai/cuml/issues/5603")
algo_params = {
"nlist": nlist,
"nprobe": int(nlist * 0.2),
"M": M,
"n_bits": n_bits,
"usePrecomputedTables": usePrecomputedTables,
}
X, y = make_blobs(
n_samples=nrows, centers=5, n_features=ncols, random_state=0
)
knn_cu = cuKNN(algorithm="ivfpq", algo_params=algo_params)
knn_cu.fit(X)
neigh_ind = knn_cu.kneighbors(
X, n_neighbors=n_neighbors, return_distance=False
)
del knn_cu
gc.collect()
labels, probs = predict(neigh_ind, y, n_neighbors)
assert array_equal(labels, y)
@pytest.mark.parametrize(
"algo, metric",
[
(algo, metric)
for algo in ["brute", "ivfflat", "ivfpq"]
for metric in [
"l2",
"euclidean",
"sqeuclidean",
"cosine",
"correlation",
]
if metric in cuml.neighbors.VALID_METRICS[algo]
],
)
def test_ann_distances_metrics(algo, metric):
X, y = make_blobs(n_samples=500, centers=2, n_features=128, random_state=0)
cu_knn = cuKNN(algorithm=algo, metric=metric)
cu_knn.fit(X)
cu_dist, cu_ind = cu_knn.kneighbors(
X, n_neighbors=10, return_distance=True
)
del cu_knn
gc.collect()
X = X.get()
sk_knn = skKNN(metric=metric)
sk_knn.fit(X)
sk_dist, sk_ind = sk_knn.kneighbors(
X, n_neighbors=10, return_distance=True
)
return array_equal(sk_dist, cu_dist)
def test_return_dists():
n_samples = 50
n_feats = 50
k = 5
X, y = make_blobs(n_samples=n_samples, n_features=n_feats, random_state=0)
knn_cu = cuKNN()
knn_cu.fit(X)
ret = knn_cu.kneighbors(X, k, return_distance=False)
assert not isinstance(ret, tuple)
assert ret.shape == (n_samples, k)
ret = knn_cu.kneighbors(X, k, return_distance=True)
assert isinstance(ret, tuple)
assert len(ret) == 2
@pytest.mark.parametrize("input_type", ["dataframe", "ndarray"])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(70000)]
)
@pytest.mark.parametrize("n_feats", [unit_param(3), stress_param(1000)])
@pytest.mark.parametrize("k", [unit_param(3), stress_param(50)])
@pytest.mark.parametrize("metric", valid_metrics())
def test_knn_separate_index_search(input_type, nrows, n_feats, k, metric):
X, _ = make_blobs(n_samples=nrows, n_features=n_feats, random_state=0)
X_index = X[:100]
X_search = X[101:]
p = 5 # Testing 5-norm of the minkowski metric only
knn_sk = skKNN(metric=metric, p=p) # Testing
knn_sk.fit(X_index.get())
D_sk, I_sk = knn_sk.kneighbors(X_search.get(), k)
X_orig = X_index
if input_type == "dataframe":
X_index = cudf.DataFrame(X_index)
X_search = cudf.DataFrame(X_search)
knn_cu = cuKNN(metric=metric, p=p)
knn_cu.fit(X_index)
D_cuml, I_cuml = knn_cu.kneighbors(X_search, k)
if input_type == "dataframe":
assert isinstance(D_cuml, cudf.DataFrame)
assert isinstance(I_cuml, cudf.DataFrame)
D_cuml_np = D_cuml.to_numpy()
I_cuml_np = I_cuml.to_numpy()
else:
assert isinstance(D_cuml, cp.ndarray)
assert isinstance(I_cuml, cp.ndarray)
D_cuml_np = D_cuml.get()
I_cuml_np = I_cuml.get()
with cuml.using_output_type("numpy"):
# Assert the cuml model was properly reverted
np.testing.assert_allclose(
knn_cu._fit_X, X_orig.get(), atol=1e-3, rtol=1e-3
)
if metric == "braycurtis":
diff = D_cuml_np - D_sk
# Braycurtis has a few differences, but this is computed by FAISS.
# So long as the indices all match below, the small discrepancy
# should be okay.
assert len(diff[diff > 1e-2]) / X_search.shape[0] < 0.06
else:
np.testing.assert_allclose(D_cuml_np, D_sk, atol=1e-3, rtol=1e-3)
assert I_cuml_np.all() == I_sk.all()
@pytest.mark.parametrize("input_type", ["dataframe", "ndarray"])
@pytest.mark.parametrize("nrows", [unit_param(500), stress_param(70000)])
@pytest.mark.parametrize("n_feats", [unit_param(3), stress_param(1000)])
@pytest.mark.parametrize(
"k", [unit_param(3), unit_param(35), stress_param(50)]
)
@pytest.mark.parametrize("metric", valid_metrics())
def test_knn_x_none(input_type, nrows, n_feats, k, metric):
X, _ = make_blobs(n_samples=nrows, n_features=n_feats, random_state=0)
p = 5 # Testing 5-norm of the minkowski metric only
knn_sk = skKNN(metric=metric, p=p) # Testing
knn_sk.fit(X.get())
D_sk, I_sk = knn_sk.kneighbors(X=None, n_neighbors=k)
X_orig = X
if input_type == "dataframe":
X = cudf.DataFrame(X)
knn_cu = cuKNN(metric=metric, p=p, output_type="numpy")
knn_cu.fit(X)
D_cuml, I_cuml = knn_cu.kneighbors(X=None, n_neighbors=k)
# Assert the cuml model was properly reverted
cp.testing.assert_allclose(knn_cu._fit_X, X_orig, atol=1e-5, rtol=1e-4)
# Allow a max relative diff of 10% and absolute diff of 1%
cp.testing.assert_allclose(D_cuml, D_sk, atol=5e-2, rtol=1e-1)
assert I_cuml.all() == I_sk.all()
def test_knn_fit_twice():
"""
Test that fitting a model twice does not fail.
This is necessary since the NearestNeighbors class
needs to free Cython allocated heap memory when
fit() is called more than once.
"""
n_samples = 1000
n_feats = 50
k = 5
X, y = make_blobs(n_samples=n_samples, n_features=n_feats, random_state=0)
knn_cu = cuKNN()
knn_cu.fit(X)
knn_cu.fit(X)
knn_cu.kneighbors(X, k)
del knn_cu
@pytest.mark.parametrize("input_type", ["ndarray"])
@pytest.mark.parametrize("nrows", [unit_param(500), stress_param(70000)])
@pytest.mark.parametrize("n_feats", [unit_param(20), stress_param(1000)])
def test_nn_downcast_fails(input_type, nrows, n_feats):
from sklearn.datasets import make_blobs as skmb
X, y = skmb(n_samples=nrows, n_features=n_feats, random_state=0)
knn_cu = cuKNN()
if input_type == "dataframe":
X_pd = pd.DataFrame({"fea%d" % i: X[0:, i] for i in range(X.shape[1])})
X_cudf = cudf.DataFrame.from_pandas(X_pd)
knn_cu.fit(X_cudf, convert_dtype=True)
with pytest.raises(Exception):
knn_cu.fit(X, convert_dtype=False)
# Test fit() fails when downcast corrupted data
X = np.array([[np.finfo(np.float32).max]], dtype=np.float64)
knn_cu = cuKNN()
with pytest.raises(Exception):
knn_cu.fit(X, convert_dtype=False)
@pytest.mark.parametrize(
"input_type,mode,output_type,as_instance",
[
("dataframe", "connectivity", "cupy", True),
("dataframe", "connectivity", None, True),
("dataframe", "distance", "numpy", True),
("ndarray", "connectivity", "cupy", False),
("ndarray", "distance", "numpy", False),
],
)
@pytest.mark.parametrize("nrows", [unit_param(100), stress_param(1000)])
@pytest.mark.parametrize("n_feats", [unit_param(5), stress_param(100)])
@pytest.mark.parametrize("p", [2, 5])
@pytest.mark.parametrize(
"k", [unit_param(3), unit_param(35), stress_param(30)]
)
@pytest.mark.parametrize("metric", valid_metrics())
def test_knn_graph(
input_type, mode, output_type, as_instance, nrows, n_feats, p, k, metric
):
X, _ = make_blobs(n_samples=nrows, n_features=n_feats, random_state=0)
if as_instance:
sparse_sk = sklearn.neighbors.kneighbors_graph(
X.get(), k, mode=mode, metric=metric, p=p, include_self="auto"
)
else:
knn_sk = skKNN(metric=metric, p=p)
knn_sk.fit(X.get())
sparse_sk = knn_sk.kneighbors_graph(X.get(), k, mode=mode)
if input_type == "dataframe":
X = cudf.DataFrame(X)
with cuml.using_output_type(output_type):
if as_instance:
sparse_cu = cuml.neighbors.kneighbors_graph(
X, k, mode=mode, metric=metric, p=p, include_self="auto"
)
else:
knn_cu = cuKNN(metric=metric, p=p)
knn_cu.fit(X)
sparse_cu = knn_cu.kneighbors_graph(X, k, mode=mode)
assert np.array_equal(sparse_sk.data.shape, sparse_cu.data.shape)
assert np.array_equal(sparse_sk.indices.shape, sparse_cu.indices.shape)
assert np.array_equal(sparse_sk.indptr.shape, sparse_cu.indptr.shape)
assert np.array_equal(sparse_sk.toarray().shape, sparse_cu.toarray().shape)
if output_type == "cupy" or output_type is None:
assert cupyx.scipy.sparse.isspmatrix_csr(sparse_cu)
else:
assert isspmatrix_csr(sparse_cu)
@pytest.mark.parametrize(
"distance_dims", [("euclidean", 2), ("euclidean", 3), ("haversine", 2)]
)
@pytest.mark.parametrize("n_neighbors", [4, 25])
@pytest.mark.parametrize("nrows", [unit_param(10000), stress_param(70000)])
def test_nearest_neighbors_rbc(distance_dims, n_neighbors, nrows):
distance, dims = distance_dims
X, y = make_blobs(
n_samples=nrows,
centers=25,
shuffle=True,
n_features=dims,
cluster_std=3.0,
random_state=42,
)
knn_cu = cuKNN(metric=distance, algorithm="rbc")
knn_cu.fit(X)
query_rows = int(nrows / 2)
rbc_d, rbc_i = knn_cu.kneighbors(
X[:query_rows, :], n_neighbors=n_neighbors
)
if distance == "euclidean":
# Need to use unexpanded euclidean distance
pw_dists = cuPW(X, metric="l2")
brute_i = cp.argsort(pw_dists, axis=1)[:query_rows, :n_neighbors]
brute_d = cp.sort(pw_dists, axis=1)[:query_rows, :n_neighbors]
else:
knn_cu_brute = cuKNN(metric=distance, algorithm="brute")
knn_cu_brute.fit(X)
brute_d, brute_i = knn_cu_brute.kneighbors(
X[:query_rows, :], n_neighbors=n_neighbors
)
assert len(brute_d[brute_d != rbc_d]) == 0
# All the distances match so allow a couple mismatched indices
# through from potential non-determinism in exact matching
# distances
assert len(brute_i[brute_i != rbc_i]) <= 3
@pytest.mark.parametrize("metric", valid_metrics_sparse())
@pytest.mark.parametrize(
"nrows,ncols,density,n_neighbors,batch_size_index,batch_size_query",
[
(1, 10, 0.8, 1, 10, 10),
(10, 35, 0.8, 4, 10, 20000),
(40, 35, 0.5, 4, 20000, 10),
(35, 35, 0.8, 4, 20000, 20000),
],
)
@pytest.mark.filterwarnings("ignore:(.*)converted(.*)::")
def test_nearest_neighbors_sparse(
metric,
nrows,
ncols,
density,
n_neighbors,
batch_size_index,
batch_size_query,
):
if nrows == 1 and n_neighbors > 1:
return
a = cupyx.scipy.sparse.random(
nrows, ncols, format="csr", density=density, random_state=35
)
b = cupyx.scipy.sparse.random(
nrows, ncols, format="csr", density=density, random_state=38
)
if metric == "jaccard":
a = a.astype("bool").astype("float32")
b = b.astype("bool").astype("float32")
logger.set_level(logger.level_debug)
nn = cuKNN(
metric=metric,
p=2.0,
n_neighbors=n_neighbors,
algorithm="brute",
output_type="numpy",
verbose=logger.level_debug,
algo_params={
"batch_size_index": batch_size_index,
"batch_size_query": batch_size_query,
},
)
nn.fit(a)
cuD, cuI = nn.kneighbors(b)
if metric not in sklearn.neighbors.VALID_METRICS_SPARSE["brute"]:
a = a.todense()
b = b.todense()
sknn = skKNN(
metric=metric,
p=2.0,
n_neighbors=n_neighbors,
algorithm="brute",
n_jobs=-1,
)
sk_X = a.get()
sknn.fit(sk_X)
skD, skI = sknn.kneighbors(b.get())
# For some reason, this will occasionally fail w/ a single
# mismatched element in CI. Allowing the single mismatch for now.
cp.testing.assert_allclose(cuD, skD, atol=1e-5, rtol=1e-5)
# Jaccard & Chebyshev have a high potential for mismatched indices
# due to duplicate distances. We can ignore the indices in this case.
if metric not in ["jaccard", "chebyshev"]:
# The actual neighbors returned in the presence of duplicate distances
# is non-deterministic. If we got to this point, the distances all
# match between cuml and sklearn. We set a reasonable threshold
# (.5% in this case) to allow differences from non-determinism.
diffs = abs(cuI - skI)
assert (len(diffs[diffs > 0]) / len(np.ravel(skI))) <= 0.005
@pytest.mark.parametrize("n_neighbors", [1, 5, 6])
def test_haversine(n_neighbors):
hoboken_nj = [40.745255, -74.034775]
port_hueneme_ca = [34.155834, -119.202789]
auburn_ny = [42.933334, -76.566666]
league_city_tx = [29.499722, -95.089722]
tallahassee_fl = [30.455000, -84.253334]
aurora_il = [41.763889, -88.29001]
data = np.array(
[
hoboken_nj,
port_hueneme_ca,
auburn_ny,
league_city_tx,
tallahassee_fl,
aurora_il,
]
)
data = data * math.pi / 180
pw_dists = pairwise_distances(data, metric="haversine")
cunn = cuKNN(
metric="haversine", n_neighbors=n_neighbors, algorithm="brute"
)
dists, inds = cunn.fit(data).kneighbors(data)
argsort = np.argsort(pw_dists, axis=1)
for i in range(pw_dists.shape[0]):
cpu_ordered = pw_dists[i, argsort[i]]
cp.testing.assert_allclose(
cpu_ordered[:n_neighbors], dists[i], atol=1e-4, rtol=1e-4
)
@pytest.mark.xfail(raises=RuntimeError)
def test_haversine_fails_high_dimensions():
data = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]])
cunn = cuKNN(metric="haversine", n_neighbors=2, algorithm="brute")
cunn.fit(data).kneighbors(data)
def test_n_jobs_parameter_passthrough():
cunn = cuKNN()
cunn.set_params(n_jobs=12)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_trustworthiness.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuml.internals.safe_imports import cpu_only_import
import platform
import pytest
from sklearn.manifold import trustworthiness as sklearn_trustworthiness
from cuml.metrics import trustworthiness as cuml_trustworthiness
from sklearn.datasets import make_blobs
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
IS_ARM = platform.processor() == "aarch64"
if not IS_ARM:
from umap import UMAP
@pytest.mark.parametrize("input_type", ["ndarray", "dataframe"])
@pytest.mark.parametrize("n_samples", [150, 500])
@pytest.mark.parametrize("n_features", [10, 100])
@pytest.mark.parametrize("n_components", [2, 8])
@pytest.mark.parametrize("batch_size", [128, 1024])
@pytest.mark.skipif(
IS_ARM, reason="https://github.com/rapidsai/cuml/issues/5441"
)
def test_trustworthiness(
input_type, n_samples, n_features, n_components, batch_size
):
centers = round(n_samples * 0.4)
X, y = make_blobs(
n_samples=n_samples,
centers=centers,
n_features=n_features,
random_state=32,
)
X_embedded = UMAP(
n_components=n_components, random_state=32
).fit_transform(X)
X = X.astype(np.float32)
X_embedded = X_embedded.astype(np.float32)
sk_score = sklearn_trustworthiness(X, X_embedded)
if input_type == "dataframe":
X = cudf.DataFrame(X)
X_embedded = cudf.DataFrame(X_embedded)
cu_score = cuml_trustworthiness(X, X_embedded, batch_size=batch_size)
assert abs(cu_score - sk_score) <= 1e-3
def test_trustworthiness_invalid_input():
X, y = make_blobs(n_samples=10, centers=1, n_features=2, random_state=32)
with pytest.raises(ValueError):
cuml_trustworthiness(X, X, n_neighbors=50)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_kernel_density.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.testing.utils import as_type
from sklearn.model_selection import GridSearchCV
import pytest
from hypothesis.extra.numpy import arrays
from hypothesis import given, settings, assume, strategies as st
from cuml.neighbors import KernelDensity, VALID_KERNELS, logsumexp_kernel
from cuml.common.exceptions import NotFittedError
from sklearn.metrics import pairwise_distances as skl_pairwise_distances
from sklearn.neighbors._ball_tree import kernel_norm
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
# not in log probability space
def compute_kernel_naive(Y, X, kernel, metric, h, sample_weight):
d = skl_pairwise_distances(Y, X, metric)
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == "gaussian":
k = np.exp(-0.5 * (d * d) / (h * h))
elif kernel == "tophat":
k = d < h
elif kernel == "epanechnikov":
k = (1.0 - (d * d) / (h * h)) * (d < h)
elif kernel == "exponential":
k = np.exp(-d / h)
elif kernel == "linear":
k = (1 - d / h) * (d < h)
elif kernel == "cosine":
k = np.cos(0.5 * np.pi * d / h) * (d < h)
else:
raise ValueError("kernel not recognized")
return norm * np.average(k, -1, sample_weight)
@st.composite
def array_strategy(draw):
n = draw(st.integers(1, 100))
m = draw(st.integers(1, 100))
dtype = draw(st.sampled_from([np.float64, np.float32]))
rng = np.random.RandomState(34)
X = rng.randn(n, m).astype(dtype)
n_test = draw(st.integers(1, 100))
X_test = rng.randn(n_test, m).astype(dtype)
if draw(st.booleans()):
sample_weight = None
else:
sample_weight = draw(
arrays(
dtype=np.float64,
shape=n,
elements=st.floats(0.1, 2.0),
)
)
type = draw(st.sampled_from(["numpy", "cupy", "cudf", "pandas"]))
if type == "cupy":
assume(n > 1 and n_test > 1)
return as_type(type, X, X_test, sample_weight)
metrics_strategy = st.sampled_from(
["euclidean", "manhattan", "chebyshev", "minkowski", "hamming", "canberra"]
)
@settings(deadline=None)
@given(
array_strategy(),
st.sampled_from(VALID_KERNELS),
metrics_strategy,
st.floats(0.2, 10),
)
def test_kernel_density(arrays, kernel, metric, bandwidth):
X, X_test, sample_weight = arrays
X_np, X_test_np, sample_weight_np = as_type("numpy", *arrays)
if kernel == "cosine":
# cosine is numerically unstable at high dimensions
# for both cuml and sklearn
assume(X.shape[1] <= 20)
kde = KernelDensity(kernel=kernel, metric=metric, bandwidth=bandwidth).fit(
X, sample_weight=sample_weight
)
cuml_prob = kde.score_samples(X)
cuml_prob_test = kde.score_samples(X_test)
if X_np.dtype == np.float64:
ref_prob = compute_kernel_naive(
X_np, X_np, kernel, metric, bandwidth, sample_weight_np
)
ref_prob_test = compute_kernel_naive(
X_test_np, X_np, kernel, metric, bandwidth, sample_weight_np
)
tol = 1e-3
assert np.allclose(
np.exp(as_type("numpy", cuml_prob)),
ref_prob,
rtol=tol,
atol=tol,
equal_nan=True,
)
assert np.allclose(
np.exp(as_type("numpy", cuml_prob_test)),
ref_prob_test,
rtol=tol,
atol=tol,
equal_nan=True,
)
if kernel in ["gaussian", "tophat"] and metric == "euclidean":
sample = kde.sample(100, random_state=32).get()
nearest = skl_pairwise_distances(sample, X_np, metric=metric)
nearest = nearest.min(axis=1)
if kernel == "gaussian":
from scipy.stats import chi
# The euclidean distance of each sample from its cluster
# follows a chi distribution (not squared) with DoF=dimension
# and scale = bandwidth
# Fail the test if the largest observed distance
# is vanishingly unlikely
assert chi.sf(nearest.max(), X.shape[1], scale=bandwidth) > 1e-8
elif kernel == "tophat":
assert np.all(nearest <= bandwidth)
else:
with pytest.raises(
NotImplementedError,
match=r"Only \['gaussian', 'tophat'\] kernels,"
" and the euclidean metric are supported.",
):
kde.sample(100)
def test_logaddexp():
X = np.array([[0.0, 0.0], [0.0, 0.0]])
out = np.zeros(X.shape[0])
logsumexp_kernel.forall(out.size)(X, out)
assert np.allclose(out, np.logaddexp.reduce(X, axis=1))
X = np.array([[3.0, 1.0], [0.2, 0.7]])
logsumexp_kernel.forall(out.size)(X, out)
assert np.allclose(out, np.logaddexp.reduce(X, axis=1))
def test_metric_params():
X = np.array([[0.0, 1.0], [2.0, 0.5]])
kde = KernelDensity(metric="minkowski", metric_params={"p": 1.0}).fit(X)
kde2 = KernelDensity(metric="minkowski", metric_params={"p": 2.0}).fit(X)
assert not np.allclose(kde.score_samples(X), kde2.score_samples(X))
def test_grid_search():
rs = np.random.RandomState(3)
X = rs.normal(size=(30, 5))
params = {"bandwidth": np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(X)
def test_not_fitted():
rs = np.random.RandomState(3)
kde = KernelDensity()
X = rs.normal(size=(30, 5))
with pytest.raises(NotFittedError):
kde.score(X)
with pytest.raises(NotFittedError):
kde.sample(X)
with pytest.raises(NotFittedError):
kde.score_samples(X)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_pca.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.exceptions import NotFittedError
from sklearn.datasets import make_blobs
from sklearn.decomposition import PCA as skPCA
from sklearn.datasets import make_multilabel_classification
from sklearn import datasets
from cuml.testing.utils import (
get_handle,
array_equal,
unit_param,
quality_param,
stress_param,
)
from cuml import PCA as cuPCA
import pytest
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("input_type", ["ndarray"])
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"name", [unit_param(None), quality_param("digits"), stress_param("blobs")]
)
def test_pca_fit(datatype, input_type, name, use_handle):
if name == "blobs":
pytest.skip("fails when using blobs dataset")
X, y = make_blobs(n_samples=500000, n_features=1000, random_state=0)
elif name == "digits":
X, _ = datasets.load_digits(return_X_y=True)
else:
X, Y = make_multilabel_classification(
n_samples=500,
n_classes=2,
n_labels=1,
allow_unlabeled=False,
random_state=1,
)
skpca = skPCA(n_components=2)
skpca.fit(X)
handle, stream = get_handle(use_handle)
cupca = cuPCA(n_components=2, handle=handle)
cupca.fit(X)
cupca.handle.sync()
for attr in [
"singular_values_",
"components_",
"explained_variance_",
"explained_variance_ratio_",
]:
with_sign = False if attr in ["components_"] else True
print(attr)
print(getattr(cupca, attr))
print(getattr(skpca, attr))
cuml_res = getattr(cupca, attr)
skl_res = getattr(skpca, attr)
assert array_equal(cuml_res, skl_res, 1e-3, with_sign=with_sign)
@pytest.mark.parametrize("n_samples", [200])
@pytest.mark.parametrize("n_features", [100, 300])
@pytest.mark.parametrize("sparse", [True, False])
def test_pca_defaults(n_samples, n_features, sparse):
# FIXME: Disable the case True-300-200 due to flaky test
if sparse and n_features == 300 and n_samples == 200:
pytest.xfail("Skipping the case True-300-200 due to flaky test")
if sparse:
X = cupyx.scipy.sparse.random(
n_samples,
n_features,
density=0.03,
dtype=cp.float32,
random_state=10,
)
else:
X, Y = make_multilabel_classification(
n_samples=n_samples,
n_features=n_features,
n_classes=2,
n_labels=1,
random_state=1,
)
cupca = cuPCA()
cupca.fit(X)
curesult = cupca.transform(X)
cupca.handle.sync()
if sparse:
X = X.toarray().get()
skpca = skPCA()
skpca.fit(X)
skresult = skpca.transform(X)
assert skpca.svd_solver == cupca.svd_solver
assert cupca.components_.shape[0] == skpca.components_.shape[0]
assert curesult.shape == skresult.shape
assert array_equal(curesult, skresult, 1e-3, with_sign=False)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("input_type", ["ndarray"])
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"name", [unit_param(None), quality_param("iris"), stress_param("blobs")]
)
def test_pca_fit_then_transform(datatype, input_type, name, use_handle):
blobs_n_samples = 500000
if name == "blobs" and pytest.max_gpu_memory < 32:
if pytest.adapt_stress_test:
blobs_n_samples = int(blobs_n_samples * pytest.max_gpu_memory / 32)
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
if name == "blobs":
X, y = make_blobs(
n_samples=blobs_n_samples, n_features=1000, random_state=0
)
elif name == "iris":
iris = datasets.load_iris()
X = iris.data
else:
X, Y = make_multilabel_classification(
n_samples=500,
n_classes=2,
n_labels=1,
allow_unlabeled=False,
random_state=1,
)
if name != "blobs":
skpca = skPCA(n_components=2)
skpca.fit(X)
Xskpca = skpca.transform(X)
handle, stream = get_handle(use_handle)
cupca = cuPCA(n_components=2, handle=handle)
cupca.fit(X)
X_cupca = cupca.transform(X)
cupca.handle.sync()
if name != "blobs":
assert array_equal(X_cupca, Xskpca, 1e-3, with_sign=False)
assert Xskpca.shape[0] == X_cupca.shape[0]
assert Xskpca.shape[1] == X_cupca.shape[1]
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("input_type", ["ndarray"])
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"name", [unit_param(None), quality_param("iris"), stress_param("blobs")]
)
def test_pca_fit_transform(datatype, input_type, name, use_handle):
blobs_n_samples = 500000
if name == "blobs" and pytest.max_gpu_memory < 32:
if pytest.adapt_stress_test:
blobs_n_samples = int(blobs_n_samples * pytest.max_gpu_memory / 32)
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
if name == "blobs":
X, y = make_blobs(
n_samples=blobs_n_samples, n_features=1000, random_state=0
)
elif name == "iris":
iris = datasets.load_iris()
X = iris.data
else:
X, Y = make_multilabel_classification(
n_samples=500,
n_classes=2,
n_labels=1,
allow_unlabeled=False,
random_state=1,
)
if name != "blobs":
skpca = skPCA(n_components=2)
Xskpca = skpca.fit_transform(X)
handle, stream = get_handle(use_handle)
cupca = cuPCA(n_components=2, handle=handle)
X_cupca = cupca.fit_transform(X)
cupca.handle.sync()
if name != "blobs":
assert array_equal(X_cupca, Xskpca, 1e-3, with_sign=False)
assert Xskpca.shape[0] == X_cupca.shape[0]
assert Xskpca.shape[1] == X_cupca.shape[1]
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("input_type", ["ndarray"])
@pytest.mark.parametrize("use_handle", [True, False])
@pytest.mark.parametrize(
"name", [unit_param(None), quality_param("quality"), stress_param("blobs")]
)
@pytest.mark.parametrize("nrows", [unit_param(500), quality_param(5000)])
def test_pca_inverse_transform(datatype, input_type, name, use_handle, nrows):
if name == "blobs":
pytest.skip("fails when using blobs dataset")
X, y = make_blobs(n_samples=500000, n_features=1000, random_state=0)
else:
rng = np.random.RandomState(0)
n, p = nrows, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [3, 4, 2] # make a large mean
handle, stream = get_handle(use_handle)
cupca = cuPCA(n_components=2, handle=handle)
X_cupca = cupca.fit_transform(X)
input_gdf = cupca.inverse_transform(X_cupca)
cupca.handle.sync()
assert array_equal(input_gdf, X, 5e-5, with_sign=True)
@pytest.mark.parametrize("nrows", [4000, 7000])
@pytest.mark.parametrize("ncols", [2500, stress_param(20000)])
@pytest.mark.parametrize("whiten", [True, False])
@pytest.mark.parametrize("return_sparse", [True, False])
@pytest.mark.parametrize("cupy_input", [True, False])
def test_sparse_pca_inputs(nrows, ncols, whiten, return_sparse, cupy_input):
if ncols == 20000 and pytest.max_gpu_memory < 48:
if pytest.adapt_stress_test:
ncols = int(ncols * pytest.max_gpu_memory / 48)
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
if return_sparse:
pytest.skip("Loss of information in converting to cupy sparse csr")
X = cupyx.scipy.sparse.random(
nrows, ncols, density=0.07, dtype=cp.float32, random_state=10
)
if not (cupy_input):
X = X.get()
p_sparse = cuPCA(n_components=ncols, whiten=whiten)
p_sparse.fit(X)
t_sparse = p_sparse.transform(X)
i_sparse = p_sparse.inverse_transform(
t_sparse, return_sparse=return_sparse
)
if return_sparse:
assert isinstance(i_sparse, cupyx.scipy.sparse.csr_matrix)
assert array_equal(
i_sparse.todense(), X.todense(), 1e-1, with_sign=True
)
else:
if cupy_input:
assert isinstance(i_sparse, cp.ndarray)
assert array_equal(i_sparse, X.todense(), 1e-1, with_sign=True)
def test_exceptions():
with pytest.raises(NotFittedError):
X = cp.random.random((10, 10))
cuPCA().transform(X)
with pytest.raises(NotFittedError):
X = cp.random.random((10, 10))
cuPCA().inverse_transform(X)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_ordinal_encoder.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cupy as cp
import numpy as np
import pandas as pd
import pytest
from sklearn.preprocessing import OrdinalEncoder as skOrdinalEncoder
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.preprocessing import OrdinalEncoder
DataFrame = gpu_only_import_from("cudf", "DataFrame")
@pytest.fixture
def test_sample():
X = DataFrame({"cat": ["M", "F", "F"], "num": [1, 3, 2]})
return X
def test_ordinal_encoder_df(test_sample) -> None:
X = test_sample
enc = OrdinalEncoder()
enc.fit(X)
Xt = enc.transform(X)
X_1 = DataFrame({"cat": ["F", "F"], "num": [1, 2]})
Xt_1 = enc.transform(X_1)
assert Xt_1.iloc[0, 0] == Xt.iloc[1, 0]
assert Xt_1.iloc[1, 0] == Xt.iloc[1, 0]
assert Xt_1.iloc[0, 1] == Xt.iloc[0, 1]
assert Xt_1.iloc[1, 1] == Xt.iloc[2, 1]
inv_Xt = enc.inverse_transform(Xt)
inv_Xt_1 = enc.inverse_transform(Xt_1)
assert inv_Xt.equals(X)
assert inv_Xt_1.equals(X_1)
assert enc.n_features_in_ == 2
def test_ordinal_encoder_array() -> None:
X = DataFrame({"A": [4, 1, 1], "B": [1, 3, 2]}).values
enc = OrdinalEncoder()
enc.fit(X)
Xt = enc.transform(X)
X_1 = DataFrame({"A": [1, 1], "B": [1, 2]}).values
Xt_1 = enc.transform(X_1)
assert Xt_1[0, 0] == Xt[1, 0]
assert Xt_1[1, 0] == Xt[1, 0]
assert Xt_1[0, 1] == Xt[0, 1]
assert Xt_1[1, 1] == Xt[2, 1]
inv_Xt = enc.inverse_transform(Xt)
inv_Xt_1 = enc.inverse_transform(Xt_1)
cp.testing.assert_allclose(X, inv_Xt)
cp.testing.assert_allclose(X_1, inv_Xt_1)
assert enc.n_features_in_ == 2
def test_ordinal_array() -> None:
X = cp.arange(32).reshape(16, 2)
enc = OrdinalEncoder()
enc.fit(X)
Xt = enc.transform(X)
Xh = cp.asnumpy(X)
skenc = skOrdinalEncoder()
skenc.fit(Xh)
Xt_sk = skenc.transform(Xh)
cp.testing.assert_allclose(Xt, Xt_sk)
def test_output_type(test_sample) -> None:
X = test_sample
enc = OrdinalEncoder(output_type="cupy").fit(X)
assert isinstance(enc.transform(X), cp.ndarray)
enc = OrdinalEncoder(output_type="cudf").fit(X)
assert isinstance(enc.transform(X), DataFrame)
enc = OrdinalEncoder(output_type="pandas").fit(X)
assert isinstance(enc.transform(X), pd.DataFrame)
enc = OrdinalEncoder(output_type="numpy").fit(X)
assert isinstance(enc.transform(X), np.ndarray)
# output_type == "input"
enc = OrdinalEncoder().fit(X)
assert isinstance(enc.transform(X), DataFrame)
def test_feature_names(test_sample) -> None:
enc = OrdinalEncoder().fit(test_sample)
assert enc.feature_names_in_ == ["cat", "num"]
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_handle_unknown(as_array: bool) -> None:
X = DataFrame({"data": [0, 1]})
Y = DataFrame({"data": [3, 1]})
if as_array:
X = X.values
Y = Y.values
enc = OrdinalEncoder(handle_unknown="error")
enc = enc.fit(X)
with pytest.raises(KeyError):
enc.transform(Y)
enc = OrdinalEncoder(handle_unknown="ignore")
enc = enc.fit(X)
encoded = enc.transform(Y)
if as_array:
np.isnan(encoded[0, 0])
else:
assert pd.isna(encoded.iloc[0, 0])
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_strategies.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.array import CumlArray
from cuml.internals.safe_imports import cpu_only_import, gpu_only_import
from cuml.testing.strategies import (
create_cuml_array_input,
cuml_array_dtypes,
cuml_array_input_types,
cuml_array_inputs,
cuml_array_orders,
cuml_array_shapes,
regression_datasets,
split_datasets,
standard_classification_datasets,
standard_datasets,
standard_regression_datasets,
)
from cuml.testing.utils import normalized_shape, series_squeezed_shape
from hypothesis import HealthCheck, given, settings
from hypothesis import strategies as st
from hypothesis.extra.numpy import floating_dtypes, integer_dtypes
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
@given(
input_type=cuml_array_input_types(),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(),
order=cuml_array_orders(),
)
@settings(deadline=None)
def test_cuml_array_input_elements(input_type, dtype, shape, order):
input_array = create_cuml_array_input(input_type, dtype, shape, order)
assert input_array.dtype == dtype
if input_type == "series":
assert input_array.shape == series_squeezed_shape(shape)
else:
assert input_array.shape == normalized_shape(shape)
layout_flag = f"{order}_CONTIGUOUS"
if input_type == "series":
assert input_array.values.flags[layout_flag]
else:
assert input_array.flags[layout_flag]
@given(cuml_array_inputs())
@settings(deadline=None)
def test_cuml_array_inputs(array_input):
array = CumlArray(data=array_input)
assert cp.array_equal(
cp.asarray(array_input), array.to_output("cupy"), equal_nan=True
)
assert np.array_equal(
cp.asnumpy(array_input), array.to_output("numpy"), equal_nan=True
)
@given(standard_datasets())
def test_standard_datasets_default(dataset):
X, y = dataset
assert X.ndim == 2
assert X.shape[0] <= 200
assert X.shape[1] <= 200
assert (y.ndim == 0) or (y.ndim in (1, 2) and y.shape[0] <= 200)
@given(
standard_datasets(
dtypes=floating_dtypes(sizes=(32,)),
n_samples=st.integers(10, 20),
n_features=st.integers(30, 40),
)
)
def test_standard_datasets(dataset):
X, y = dataset
assert X.ndim == 2
assert 10 <= X.shape[0] <= 20
assert 30 <= X.shape[1] <= 40
assert 10 <= y.shape[0] <= 20
assert y.shape[1] == 1
@given(split_datasets(standard_datasets()))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_split_datasets(split_dataset):
X_train, X_test, y_train, y_test = split_dataset
assert X_train.ndim == X_test.ndim == 2
assert X_train.shape[1] == X_test.shape[1]
assert 2 <= (len(X_train) + len(X_test)) <= 200
assert y_train.ndim == y_test.ndim
assert y_train.ndim in (0, 1, 2)
assert (y_train.ndim == 0) or (2 <= (len(y_train) + len(y_test)) <= 200)
@given(standard_regression_datasets())
def test_standard_regression_datasets_default(dataset):
X, y = dataset
assert X.ndim == 2
assert X.shape[0] <= 200
assert X.shape[1] <= 200
assert (y.ndim == 0) or (y.ndim in (1, 2) and y.shape[0] <= 200)
assert X.dtype == y.dtype
@given(
standard_regression_datasets(
dtypes=floating_dtypes(sizes=64),
n_samples=st.integers(min_value=2, max_value=200),
n_features=st.integers(min_value=1, max_value=200),
n_informative=st.just(10),
random_state=0,
)
)
def test_standard_regression_datasets(dataset):
from sklearn.datasets import make_regression
X, y = dataset
assert X.ndim == 2
assert X.shape[0] <= 200
assert X.shape[1] <= 200
assert (y.ndim == 1 and y.shape[0] <= 200) or y.ndim == 0
assert X.dtype == y.dtype
X_cmp, y_cmp = make_regression(
n_samples=X.shape[0], n_features=X.shape[1], random_state=0
)
assert X.dtype.type == X_cmp.dtype.type
assert X.ndim == X_cmp.ndim
assert X.shape == X_cmp.shape
assert y.dtype.type == y_cmp.dtype.type
assert y.ndim == y_cmp.ndim
assert y.shape == y_cmp.shape
assert (X == X_cmp).all()
assert (y == y_cmp).all()
@given(regression_datasets())
def test_regression_datasets(dataset):
X, y = dataset
assert X.ndim == 2
assert X.shape[0] <= 200
assert X.shape[1] <= 200
assert (y.ndim == 0) or (y.ndim in (1, 2) and y.shape[0] <= 200)
@given(split_datasets(regression_datasets()))
@settings(
suppress_health_check=[HealthCheck.too_slow, HealthCheck.data_too_large]
)
def test_split_regression_datasets(split_dataset):
X_train, X_test, y_train, y_test = split_dataset
assert X_train.ndim == X_test.ndim == 2
assert y_train.ndim == y_test.ndim
assert y_train.ndim in (0, 1, 2)
assert 2 <= (len(X_train) + len(X_test)) <= 200
@given(standard_classification_datasets())
def test_standard_classification_datasets_default(dataset):
X, y = dataset
assert X.ndim == 2
assert X.shape[0] <= 200
assert X.shape[1] <= 200
assert (y.ndim == 0) or (y.ndim in (1, 2) and y.shape[0] <= 200)
assert np.issubdtype(X.dtype, np.floating)
assert np.issubdtype(y.dtype, np.integer)
@given(
standard_classification_datasets(
dtypes=floating_dtypes(sizes=64),
n_samples=st.integers(min_value=2, max_value=200),
n_features=st.integers(min_value=4, max_value=200),
n_informative=st.just(2),
n_redundant=st.just(2),
random_state=0,
labels_dtypes=integer_dtypes(sizes=64),
)
)
def test_standard_classification_datasets(dataset):
from sklearn.datasets import make_classification
X, y = dataset
assert X.ndim == 2
assert X.shape[0] <= 200
assert X.shape[1] <= 200
assert (y.ndim == 1 and y.shape[0] <= 200) or y.ndim == 0
assert np.issubdtype(X.dtype, np.floating)
assert np.issubdtype(y.dtype, np.integer)
X_cmp, y_cmp = make_classification(
n_samples=X.shape[0],
n_features=X.shape[1],
random_state=0,
)
assert X.dtype.type == X_cmp.dtype.type
assert X.ndim == X_cmp.ndim
assert X.shape == X_cmp.shape
assert y.dtype.type == y_cmp.dtype.type
assert y.ndim == y_cmp.ndim
assert y.shape == y_cmp.shape
assert (X == X_cmp).all()
assert (y == y_cmp).all()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_utils.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from hypothesis.extra.numpy import (
array_shapes,
arrays,
floating_dtypes,
integer_dtypes,
)
from hypothesis import target
from hypothesis import strategies as st
from hypothesis import given, note
from cuml.testing.utils import array_equal, assert_array_equal
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
@given(
arrays(
dtype=st.one_of(floating_dtypes(), integer_dtypes()),
shape=array_shapes(),
),
st.floats(1e-4, 1.0),
)
@pytest.mark.filterwarnings("ignore:invalid value encountered in subtract")
def test_array_equal_same_array(array, tol):
equal = array_equal(array, array, tol)
note(equal)
difference = equal.compute_difference()
if np.isfinite(difference):
target(float(np.abs(difference)))
assert equal
assert equal == True # noqa: E712
assert bool(equal) is True
assert_array_equal(array, array, tol)
@given(
arrays=array_shapes().flatmap(
lambda shape: st.tuples(
arrays(
dtype=st.one_of(floating_dtypes(), integer_dtypes()),
shape=shape,
),
arrays(
dtype=st.one_of(floating_dtypes(), integer_dtypes()),
shape=shape,
),
)
),
unit_tol=st.floats(1e-4, 1.0),
with_sign=st.booleans(),
)
@pytest.mark.filterwarnings("ignore:invalid value encountered in subtract")
def test_array_equal_two_arrays(arrays, unit_tol, with_sign):
array_a, array_b = arrays
equal = array_equal(array_a, array_b, unit_tol, with_sign=with_sign)
equal_flipped = array_equal(
array_b, array_a, unit_tol, with_sign=with_sign
)
note(equal)
difference = equal.compute_difference()
a, b = (
(array_a, array_b) if with_sign else (np.abs(array_a), np.abs(array_b))
)
expect_equal = np.sum(np.abs(a - b) > unit_tol) / array_a.size < 1e-4
if expect_equal:
assert_array_equal(array_a, array_b, unit_tol, with_sign=with_sign)
assert equal
assert bool(equal) is True
assert equal == True # noqa: E712
assert True == equal # noqa: E712
assert equal != False # noqa: E712
assert False != equal # noqa: E712
assert equal_flipped
assert bool(equal_flipped) is True
assert equal_flipped == True # noqa: E712
assert True == equal_flipped # noqa: E712
assert equal_flipped != False # noqa: E712
assert False != equal_flipped # noqa: E712
else:
with pytest.raises(AssertionError):
assert_array_equal(array_a, array_b, unit_tol, with_sign=with_sign)
assert not equal
assert bool(equal) is not True
assert equal != True # noqa: E712
assert True != equal # noqa: E712
assert equal == False # noqa: E712
assert False == equal # noqa: E712
assert difference != 0
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_stationarity.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# TODO: update!
from cuml.tsa import stationarity
from statsmodels.tsa import stattools
import warnings
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
###############################################################################
# Helpers and reference functions #
###############################################################################
def prepare_data(y, d, D, s):
"""Applies differencing and seasonal differencing to the data"""
n_obs, batch_size = y.shape
s1 = s if D else (1 if d else 0)
s2 = 1 if d + D == 2 else 0
y_diff = np.zeros((n_obs - d - s * D, batch_size), dtype=y.dtype)
for i in range(batch_size):
temp = y[s1:, i] - y[:-s1, i] if s1 else y[:, i]
y_diff[:, i] = temp[s2:] - temp[:-s2] if s2 else temp[:]
return y_diff
def kpss_ref(y):
"""Wrapper around statsmodels' KPSS test"""
batch_size = y.shape[1]
test_results = np.zeros(batch_size, dtype=bool)
for i in range(batch_size):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
_, pval, *_ = stattools.kpss(
y[:, i], regression="c", nlags="legacy"
)
test_results[i] = pval > 0.05
return test_results
cuml_tests = {
"kpss": stationarity.kpss_test,
}
ref_tests = {
"kpss": kpss_ref,
}
###############################################################################
# Tests #
###############################################################################
@pytest.mark.parametrize("batch_size", [25, 100])
@pytest.mark.parametrize("n_obs", [50, 130])
@pytest.mark.parametrize("dD", [(0, 0), (1, 0), (2, 0), (0, 1), (1, 1)])
@pytest.mark.parametrize("s", [4, 12])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("test_type", ["kpss"])
def test_stationarity(batch_size, n_obs, dD, s, dtype, test_type):
"""Test stationarity tests against a reference implementation"""
d, D = dD
# Fix seed for stability
np.random.seed(42)
# Generate seasonal patterns with random walks
pattern = np.zeros((s, batch_size))
pattern[0, :] = np.random.uniform(-1.0, 1.0, batch_size)
for i in range(1, s):
pattern[i, :] = pattern[i - 1, :] + np.random.uniform(
-1.0, 1.0, batch_size
)
pattern /= s
# Decide for each series whether to include a linear and/or quadratic
# trend and/or a seasonal pattern
linear_mask = np.random.choice([False, True], batch_size, p=[0.50, 0.50])
quadra_mask = np.random.choice([False, True], batch_size, p=[0.75, 0.25])
season_mask = np.random.choice([False, True], batch_size, p=[0.75, 0.25])
# Generate coefficients for the linear, quadratic and seasonal terms,
# taking into account the masks computed above and avoiding coefficients
# close to zero
linear_coef = (
linear_mask
* np.random.choice([-1.0, 1.0], batch_size)
* np.random.uniform(0.2, 2.0, batch_size)
)
quadra_coef = (
quadra_mask
* np.random.choice([-1.0, 1.0], batch_size)
* np.random.uniform(0.2, 2.0, batch_size)
)
season_coef = season_mask * np.random.uniform(0.4, 0.8, batch_size)
# Generate the data
x = np.linspace(0.0, 2.0, n_obs)
offset = np.random.uniform(-2.0, 2.0, batch_size)
y = np.zeros((n_obs, batch_size), order="F", dtype=dtype)
for i in range(n_obs):
y[i, :] = (
offset[:]
+ linear_coef[:] * x[i]
+ quadra_coef[:] * x[i] * x[i]
+ season_coef[:] * pattern[i % s, :]
+ np.random.normal(0.0, 0.2, batch_size)
)
# Call the cuML function
test_cuml = cuml_tests[test_type](y, d, D, s)
# Compute differenced data and call the reference function
y_diff = prepare_data(y, d, D, s)
test_ref = ref_tests[test_type](y_diff)
np.testing.assert_array_equal(test_cuml, test_ref)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_adapters.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import platform
from sklearn.preprocessing import normalize as sk_normalize
from cuml.testing.test_preproc_utils import assert_allclose
from cuml.thirdparty_adapters.sparsefuncs_fast import (
csr_mean_variance_axis0,
csc_mean_variance_axis0,
_csc_mean_variance_axis0,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2,
)
from sklearn.utils._mask import _get_mask as sk_get_mask
from cuml.thirdparty_adapters.adapters import (
check_array,
_get_mask as cu_get_mask,
_masked_column_median,
_masked_column_mean,
_masked_column_mode,
)
from cuml.internals.safe_imports import cpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
cpx = gpu_only_import("cupyx")
np = cpu_only_import("numpy")
coo_matrix = gpu_only_import_from("cupyx.scipy.sparse", "coo_matrix")
stats = cpu_only_import_from("scipy", "stats")
IS_ARM = platform.processor() == "aarch64"
@pytest.fixture(scope="session", params=["zero", "one", "nan"])
def mask_dataset(request, random_seed):
cp.random.seed(random_seed)
randint = cp.random.randint(30, size=(500, 20))
randint = randint.astype(cp.float64)
if request.param == "zero":
mask_value = 0
elif request.param == "one":
mask_value = 1
else:
mask_value = cp.nan
random_loc = cp.random.choice(
randint.size, int(randint.size * 0.3), replace=False
)
randint.ravel()[random_loc] = mask_value
return mask_value, randint.get(), randint
@pytest.fixture(scope="session", params=["cupy-csr", "cupy-csc"])
def sparse_random_dataset(request, random_seed):
cp.random.seed(random_seed)
X = cp.random.rand(100, 10)
random_loc = cp.random.choice(X.size, int(X.size * 0.3), replace=False)
X.ravel()[random_loc] = 0
if request.param == "cupy-csr":
X_sparse = cpx.scipy.sparse.csr_matrix(X)
elif request.param == "cupy-csc":
X_sparse = cpx.scipy.sparse.csc_matrix(X)
return X.get(), X, X_sparse.get(), X_sparse
@pytest.mark.skipif(
IS_ARM,
reason="Test fails unexpectedly on ARM. "
"github.com/rapidsai/cuml/issues/5100",
)
def test_check_array():
# accept_sparse
arr = coo_matrix((3, 4), dtype=cp.float64)
check_array(arr, accept_sparse=True)
with pytest.raises(ValueError):
check_array(arr, accept_sparse=False)
# dtype
arr = cp.array([[1, 2]], dtype=cp.int64)
check_array(arr, dtype=cp.int64, copy=False)
arr = cp.array([[1, 2]], dtype=cp.float32)
new_arr = check_array(arr, dtype=cp.int64)
assert new_arr.dtype == cp.int64
# order
arr = cp.array([[1, 2]], dtype=cp.int64, order="F")
new_arr = check_array(arr, order="F")
assert new_arr.flags.f_contiguous
new_arr = check_array(arr, order="C")
assert new_arr.flags.c_contiguous
# force_all_finite
arr = cp.array([[1, cp.inf]])
check_array(arr, force_all_finite=False)
with pytest.raises(ValueError):
check_array(arr, force_all_finite=True)
# ensure_2d
arr = cp.array([1, 2], dtype=cp.float32)
check_array(arr, ensure_2d=False)
with pytest.raises(ValueError):
check_array(arr, ensure_2d=True)
# ensure_2d
arr = cp.array([[1, 2, 3], [4, 5, 6]], dtype=cp.float32)
check_array(arr, ensure_2d=True)
# ensure_min_samples
arr = cp.array([[1, 2]], dtype=cp.float32)
check_array(arr, ensure_min_samples=1)
with pytest.raises(ValueError):
check_array(arr, ensure_min_samples=2)
# ensure_min_features
arr = cp.array([[]], dtype=cp.float32)
check_array(arr, ensure_min_features=0)
with pytest.raises(ValueError):
check_array(arr, ensure_min_features=1)
def test_csr_mean_variance_axis0(failure_logger, sparse_random_dataset):
X_np, _, _, X_sparse = sparse_random_dataset
if X_sparse.format != "csr":
pytest.skip("Skip non CSR matrices")
means, variances = csr_mean_variance_axis0(X_sparse)
ref_means = np.nanmean(X_np, axis=0)
ref_variances = np.nanvar(X_np, axis=0)
assert_allclose(means, ref_means)
assert_allclose(variances, ref_variances)
def test_csc_mean_variance_axis0(failure_logger, sparse_random_dataset):
X_np, _, _, X_sparse = sparse_random_dataset
if X_sparse.format != "csc":
pytest.skip("Skip non CSC matrices")
means, variances = csc_mean_variance_axis0(X_sparse)
ref_means = np.nanmean(X_np, axis=0)
ref_variances = np.nanvar(X_np, axis=0)
assert_allclose(means, ref_means)
assert_allclose(variances, ref_variances)
def test__csc_mean_variance_axis0(failure_logger, sparse_random_dataset):
X_np, _, _, X_sparse = sparse_random_dataset
if X_sparse.format != "csc":
pytest.skip("Skip non CSC matrices")
means, variances, counts_nan = _csc_mean_variance_axis0(X_sparse)
ref_means = np.nanmean(X_np, axis=0)
ref_variances = np.nanvar(X_np, axis=0)
ref_counts_nan = np.isnan(X_np).sum(axis=0)
assert_allclose(means, ref_means)
assert_allclose(variances, ref_variances)
assert_allclose(counts_nan, ref_counts_nan)
def test_inplace_csr_row_normalize_l1(failure_logger, sparse_random_dataset):
X_np, _, _, X_sparse = sparse_random_dataset
if X_sparse.format != "csr":
pytest.skip("Skip non CSR matrices")
inplace_csr_row_normalize_l1(X_sparse)
X_np = sk_normalize(X_np, norm="l1", axis=1)
assert_allclose(X_sparse, X_np)
def test_inplace_csr_row_normalize_l2(failure_logger, sparse_random_dataset):
X_np, _, _, X_sparse = sparse_random_dataset
if X_sparse.format != "csr":
pytest.skip("Skip non CSR matrices")
inplace_csr_row_normalize_l2(X_sparse)
X_np = sk_normalize(X_np, norm="l2", axis=1)
assert_allclose(X_sparse, X_np)
def test_get_mask(failure_logger, mask_dataset):
mask_value, X_np, X = mask_dataset
cu_mask = cu_get_mask(X, value_to_mask=mask_value)
sk_mask = sk_get_mask(X_np, value_to_mask=mask_value)
assert_allclose(cu_mask, sk_mask)
def test_masked_column_median(failure_logger, mask_dataset):
mask_value, X_np, X = mask_dataset
median = _masked_column_median(X, mask_value).get()
mask = ~sk_get_mask(X_np, value_to_mask=mask_value)
n_columns = X.shape[1]
for i in range(n_columns):
column_mask = mask[:, i]
column_median = np.median(X_np[:, i][column_mask])
assert column_median == median[i]
def test_masked_column_mean(failure_logger, mask_dataset):
mask_value, X_np, X = mask_dataset
mean = _masked_column_mean(X, mask_value).get()
mask = ~sk_get_mask(X_np, value_to_mask=mask_value)
n_columns = X.shape[1]
for i in range(n_columns):
column_mask = mask[:, i]
column_mean = np.mean(X_np[:, i][column_mask])
assert column_mean == mean[i]
def test_masked_column_mode(failure_logger, mask_dataset):
mask_value, X_np, X = mask_dataset
mode = _masked_column_mode(X, mask_value).get()
mask = ~sk_get_mask(X_np, value_to_mask=mask_value)
n_columns = X.shape[1]
for i in range(n_columns):
column_mask = mask[:, i]
column_mode = stats.mode(X_np[:, i][column_mask], keepdims=True)[0][0]
assert column_mode == mode[i]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_mbsgd_regressor.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.model_selection import train_test_split
from cuml.datasets import make_regression
from sklearn.linear_model import SGDRegressor
from cuml.testing.utils import unit_param, quality_param, stress_param
from cuml.metrics import r2_score
from cuml.linear_model import MBSGDRegressor as cumlMBSGRegressor
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.fixture(
scope="module",
params=[
unit_param([500, 20, 10, np.float32]),
unit_param([500, 20, 10, np.float64]),
quality_param([5000, 100, 50, np.float32]),
quality_param([5000, 100, 50, np.float64]),
stress_param([500000, 1000, 500, np.float32]),
stress_param([500000, 1000, 500, np.float64]),
],
ids=[
"500-20-10-f32",
"500-20-10-f64",
"5000-100-50-f32",
"5000-100-50-f64",
"500000-1000-500-f32",
"500000-1000-500-f64",
],
)
def make_dataset(request):
nrows, ncols, n_info, datatype = request.param
if (
nrows == 500000
and datatype == np.float64
and pytest.max_gpu_memory < 32
):
if pytest.adapt_stress_test:
nrows = nrows * pytest.max_gpu_memory // 32
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
X, y = make_regression(
n_samples=nrows, n_informative=n_info, n_features=ncols, random_state=0
)
X = cp.array(X).astype(datatype)
y = cp.array(y).astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=10
)
return nrows, datatype, X_train, X_test, y_train, y_test
@pytest.mark.parametrize(
# Grouped those tests to reduce the total number of individual tests
# while still keeping good coverage of the different features of MBSGD
("lrate", "penalty"),
[
("constant", None),
("invscaling", "l1"),
("adaptive", "l2"),
("constant", "elasticnet"),
],
)
@pytest.mark.filterwarnings("ignore:Maximum::sklearn[.*]")
def test_mbsgd_regressor_vs_skl(lrate, penalty, make_dataset):
nrows, datatype, X_train, X_test, y_train, y_test = make_dataset
if nrows < 500000:
cu_mbsgd_regressor = cumlMBSGRegressor(
learning_rate=lrate,
eta0=0.005,
epochs=100,
fit_intercept=True,
batch_size=2,
tol=0.0,
penalty=penalty,
)
cu_mbsgd_regressor.fit(X_train, y_train)
cu_pred = cu_mbsgd_regressor.predict(X_test)
cu_r2 = r2_score(
cp.asnumpy(cu_pred), cp.asnumpy(y_test), convert_dtype=datatype
)
skl_sgd_regressor = SGDRegressor(
learning_rate=lrate,
eta0=0.005,
max_iter=100,
fit_intercept=True,
tol=0.0,
penalty=penalty,
random_state=0,
)
skl_sgd_regressor.fit(cp.asnumpy(X_train), cp.asnumpy(y_train).ravel())
skl_pred = skl_sgd_regressor.predict(cp.asnumpy(X_test))
skl_r2 = r2_score(skl_pred, cp.asnumpy(y_test), convert_dtype=datatype)
assert abs(cu_r2 - skl_r2) <= 0.021
@pytest.mark.parametrize(
# Grouped those tests to reduce the total number of individual tests
# while still keeping good coverage of the different features of MBSGD
("lrate", "penalty"),
[
("constant", "none"),
("invscaling", "l1"),
("adaptive", "l2"),
("constant", "elasticnet"),
],
)
def test_mbsgd_regressor(lrate, penalty, make_dataset):
nrows, datatype, X_train, X_test, y_train, y_test = make_dataset
cu_mbsgd_regressor = cumlMBSGRegressor(
learning_rate=lrate,
eta0=0.005,
epochs=100,
fit_intercept=True,
batch_size=nrows / 100,
tol=0.0,
penalty=penalty,
)
cu_mbsgd_regressor.fit(X_train, y_train)
cu_pred = cu_mbsgd_regressor.predict(X_test)
cu_r2 = r2_score(cu_pred, y_test, convert_dtype=datatype)
assert cu_r2 >= 0.88
def test_mbsgd_regressor_default(make_dataset):
nrows, datatype, X_train, X_test, y_train, y_test = make_dataset
cu_mbsgd_regressor = cumlMBSGRegressor(batch_size=nrows / 100)
cu_mbsgd_regressor.fit(X_train, y_train)
cu_pred = cu_mbsgd_regressor.predict(X_test)
cu_r2 = r2_score(
cp.asnumpy(cu_pred), cp.asnumpy(y_test), convert_dtype=datatype
)
assert cu_r2 > 0.9
def test_mbsgd_regressor_set_params():
x = np.linspace(0, 1, 50)
y = x * 2
model = cumlMBSGRegressor()
model.fit(x, y)
coef_before = model.coef_
model = cumlMBSGRegressor(eta0=0.1, fit_intercept=False)
model.fit(x, y)
coef_after = model.coef_
model = cumlMBSGRegressor()
model.set_params(**{"eta0": 0.1, "fit_intercept": False})
model.fit(x, y)
coef_test = model.coef_
assert coef_before != coef_after
assert coef_after == coef_test
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tests/test_make_regression.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Note: this isn't a strict test, the goal is to test the Cython interface
# and cover all the parameters.
# See the C++ test for an actual correction test
import cuml
import pytest
# Testing parameters
dtype = ["single", "double"]
n_samples = [100, 100000]
n_features = [10, 100]
n_informative = [7]
n_targets = [1, 3]
shuffle = [True, False]
coef = [True, False]
effective_rank = [None, 6]
random_state = [None, 1234]
bias = [-4.0]
noise = [3.5]
@pytest.mark.parametrize("dtype", dtype)
@pytest.mark.parametrize("n_samples", n_samples)
@pytest.mark.parametrize("n_features", n_features)
@pytest.mark.parametrize("n_informative", n_informative)
@pytest.mark.parametrize("n_targets", n_targets)
@pytest.mark.parametrize("shuffle", shuffle)
@pytest.mark.parametrize("coef", coef)
@pytest.mark.parametrize("effective_rank", effective_rank)
@pytest.mark.parametrize("random_state", random_state)
@pytest.mark.parametrize("bias", bias)
@pytest.mark.parametrize("noise", noise)
def test_make_regression(
dtype,
n_samples,
n_features,
n_informative,
n_targets,
shuffle,
coef,
effective_rank,
random_state,
bias,
noise,
):
result = cuml.make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
n_targets=n_targets,
bias=bias,
effective_rank=effective_rank,
noise=noise,
shuffle=shuffle,
coef=coef,
random_state=random_state,
dtype=dtype,
)
if coef:
out, values, coefs = result
else:
out, values = result
assert out.shape == (n_samples, n_features), "out shape mismatch"
assert values.shape == (n_samples, n_targets), "values shape mismatch"
if coef:
assert coefs.shape == (n_features, n_targets), "coefs shape mismatch"
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/endog_hourly_earnings_by_industry_missing_exog.csv | ,Forestry and Mining,Manufacturing,"Electricity, Gas, Water and Waste Services",Construction,Wholesale Trade,Retail Trade,Accommodation and Food Services,"Transport, Postal and Warehousing",Information Media and Telecommunications,Financial and Insurance Services,"Rental, Hiring and Real Estate Services","Professional, Scientific, Technical, Administrative and Support Services",Public Administration and Safety,Health Care and Social Assistance
0,13.65,12.11,13.65,11.38,13.44,9.5,9.71,12.35,17.14,13.83,12.61,14.79,15.19,13.68
1,13.851591792200573,,,11.78678800254696,13.867070571786073,,,13.0909155671951,17.812435117728164,14.82753845996605,13.295474886760168,,15.970541016832259,14.403721755556253
2,13.932186081902552,12.355023621386243,14.73108527800528,12.198193625189077,,10.140409682929818,10.624890778487318,13.635226975795222,18.36749014305581,15.400909822132215,13.913703912697493,,16.5780963891776,14.681660977669893
3,14.270795333322575,12.389220656198583,15.037142876939978,12.529790889900646,,10.411711219132206,10.758341117348921,14.053573457078269,18.776523723975927,15.931418721560089,14.526344870973595,16.746351574028473,16.802179355432582,
4,14.456451844523633,,15.44068996229278,,15.358240546466158,10.897833721372711,11.264642978217099,14.367647763213672,19.131335743321515,,14.648792834011681,16.795357183854268,,14.389451990191542
5,,,15.835638324049468,13.025904835817773,,10.814465936104252,,14.627823362307268,19.163520039723746,,13.845282074885532,15.854124630510897,15.740833420283229,13.78885365892225
6,14.755192726717508,13.391837153802191,16.10703989305711,13.393799246358679,15.743149821626593,10.871785353725251,11.25806724331454,14.793080078737047,19.099177482803615,15.671361692069091,,,,13.304967855532414
7,,13.460685177131175,16.28128400492666,,15.764061922522005,10.624436687056946,,14.393242984219937,18.76028382617809,15.403333480182564,13.184251121142342,15.340403812554172,15.631810694537654,13.72564647530384
8,15.111423095329485,13.860052744866836,,13.504369773165179,15.647866101549015,10.559756132924411,10.781764606923604,,,15.117207774393604,13.130494698085222,15.486256968345458,,14.787248466064424
9,15.509152289611192,14.06848872408872,16.461348841513022,13.312529586519855,15.451333166356427,,10.40638565085023,13.745025043328631,18.41955609445053,15.23175800991669,,16.012411559233954,,15.168791033576886
10,15.99905474034535,13.865012060228567,16.587632105308824,13.292312577028328,15.386623659307197,,10.190870942631053,13.751970213173598,18.531171516576446,15.72297198478351,13.998908635452404,16.72975951816983,,
11,16.510549491152634,,16.467664550151046,13.07488546826914,,9.558430839241527,10.054838707843363,13.913547294520743,19.01756504479315,16.352285753089898,14.740697910529839,17.439374575451623,,
12,,14.130868119227609,,12.923728527569113,,9.335576201739169,10.019043516690937,14.218635909450446,19.396699451288576,17.27975613593806,,17.718110781473023,17.76047886322311,15.907114431636783
13,,14.150731910715734,16.28633645384955,12.593797428271577,,9.148404113495584,,14.647586412250444,20.094244516699277,17.895337095821482,15.599364719121331,17.639608655548603,17.341301647078755,14.80755974830567
14,,,16.106656700032303,,14.560129339332676,,10.411476175242253,15.050114363658523,20.369120706454208,18.396085468372462,15.52625269520014,17.18359370446417,16.73204748507204,14.299466177489194
15,16.223870619910233,13.70934880732469,15.81362202578458,,,9.172505474264355,10.752094658709545,,20.693143663634118,18.385372675770192,,16.827440101500745,,14.36343693548252
16,,13.801490498350368,,11.893342197371068,,,11.216826705552865,,20.567511512606718,18.082536889458,,16.051596779045568,,
17,15.471557194866138,13.668462742553087,15.46862785139451,11.74453937774285,14.710541524792731,9.780577148513332,,15.95041834511198,20.519649957730785,,,16.153367914663377,16.409359490870646,15.488010058690753
18,15.375498390845612,13.26291061275795,,,14.845142084689208,,12.00148005213932,15.896990406354737,19.93629090947301,17.3297862648617,,,,
19,15.539709223208575,13.177760452229116,15.38397799817525,11.70900403358093,15.168771394772293,10.352467812568,12.244499204115552,15.790451785461007,19.5920308828934,17.087343960951127,14.196487328272848,17.012128821795294,17.619713024493418,16.27321801625822
20,15.69446493790059,,15.429866861658358,11.938756591185607,,,12.314705122501344,,,,,17.749446096990667,17.937073947586256,16.22194775081549
21,15.240135253277526,13.161378607244325,,12.11813041571459,15.819371568103742,10.995682668360207,12.258713127760142,15.229731329617602,19.02350540163146,17.681672934656827,15.411103727888927,,17.85853675262889,15.381718553472455
22,15.197180666001987,12.896791867830677,,12.340908626863227,16.255649396257507,11.16628018649039,,,,17.99707961824398,16.170386608740145,18.63097893045504,17.50223160782623,14.34404055935509
23,15.666147849912862,12.685747756627947,,12.587553918226797,,11.067057786079852,11.634538821359905,,,,16.55284117684216,18.37411463966689,16.773365163491086,
24,15.997664193841038,12.771520533403976,15.883838619523702,12.975801540819567,16.540876626842746,,11.426089469830575,14.221864563946376,19.28565698097457,19.228503059556587,16.545635697312466,,16.25419287668816,15.129004646018375
25,,12.737231365318934,,,16.976759104893066,10.78712389990577,11.175162483958083,14.190942829585476,19.92891758074776,20.44726321869338,16.541098174285437,17.405716978386923,,15.702683341424121
26,15.861219136218843,12.575778232239756,16.40821550833602,,16.998852838617662,10.609594306346992,10.980124469555697,14.522717276261574,20.43841609404956,,,,16.759573747052517,16.60969775357549
27,15.624856020184954,,16.6097391443485,13.973413242676422,,10.223372576700045,,,21.0150430628985,20.857495389596775,,17.17308743127811,17.501152586860183,17.291765990953927
28,16.004222665660773,12.751472033735043,,,17.088009131580808,9.970208810437839,10.98630789707783,,21.204875571165708,,15.345740540642215,17.604157861823353,18.139016848716075,17.21497951345778
29,16.360242220272596,12.752746935952635,17.329568388187358,14.695218134492185,17.067929818699444,9.781482314984832,11.241424375054338,15.957546333166437,21.402172850119477,20.231005212809258,15.68148060711822,18.701898715684482,18.49614296407842,
30,16.21387127467223,,17.7655885372505,14.979521275547857,16.868629438983746,9.603184175410224,11.566447395816509,16.402285419067706,21.11057521387587,20.02375158869258,16.07398515418231,19.459458459965642,18.536355526373136,16.06531535352705
31,,12.729248489477106,18.252748100480247,15.10550412117673,16.852515742373786,9.361553170729106,12.276183482162534,17.11698636747496,21.22976443478051,19.846439731689753,16.8288311525825,20.080059424126155,,15.80283440155813
32,16.19789387429227,12.975890026762595,18.74322017599655,15.249647631981372,16.79528490276848,9.539894201922701,12.692064826506279,17.2060588352331,20.808403215301606,19.88639489521436,17.50995215650415,20.08764173033968,17.744790863904786,16.111077495046775
33,16.22028069913035,,,,16.56362615534087,9.736965011833718,13.121395859905862,16.952863243408935,20.627530768721506,20.87621265038233,18.24114337855649,,17.163482508801106,16.705274097802207
34,,13.085743367995606,20.032451257981293,15.217544584566355,16.442837726647383,,13.497251503849032,16.709041697982173,20.40092819833503,21.555942829389064,18.668094954557187,19.595137806854765,16.993194760471493,
35,16.640759916691465,13.277375264259197,20.246157973409787,,16.414567019393985,10.392240569631083,13.677464388481305,,20.52970926387039,21.979620413485033,18.505790106315708,18.99814989302875,,18.271807600264626
36,,13.478296977492253,20.383672706388467,15.039944043769497,16.244876914423898,10.84507896967941,13.881282153129611,15.931779204620836,20.667357601690828,22.35831805478115,,18.561786750174598,18.21484793792997,
37,17.055223754580084,13.74657331513443,20.479546406199255,15.051099087200612,16.412810921411136,11.293840421450092,13.652643787749549,15.711393559341623,21.428873809116848,22.847021849424287,17.643462873903157,18.96791975085005,19.04812786325131,18.00770732065041
38,16.954960380099607,,,14.921442524650805,16.599585349075184,11.557217680347001,13.369557753021928,15.672036252105396,22.08424121637337,23.331504308037175,17.258136951830803,,19.314931298471475,
39,17.250810487412092,14.085552569295736,21.31104859145886,,16.948480655945556,11.609406542761098,13.08968006026147,,,23.27431880554524,17.365548563119532,20.56164781571207,19.633937754783584,16.973161317306587
40,17.113532526841887,14.38065279629912,,14.541340620808118,17.33544168698889,11.714447269537395,,15.797014076583102,23.113328415685963,22.6351979321252,17.53771817259869,21.19111657080765,19.313838972090643,16.88528819722389
41,17.273816286639146,,21.714902738460655,,17.460333178748254,11.739372323095271,12.371354413201876,16.044942075863236,,22.418687760841166,18.256232357579826,,18.5463289641368,17.017614842438775
42,16.582276000843713,14.696385609707297,20.99960455771748,14.764434058310693,18.088738382459926,11.805778740677587,,17.03032407732933,23.223057961742313,,19.092169433861056,21.850813926939836,18.443025234761727,
43,15.849444212061595,14.860365522450357,,14.753515993654123,18.124140139501215,11.339634486279655,11.900095717096024,17.355666629438762,23.121050905424408,21.86247728032324,19.224628796820753,20.794627447501078,18.514207040432677,18.70024313132518
44,15.27576645047528,15.320616487461226,22.301589778217586,14.642907698316534,18.590289580195478,11.14934226197248,11.906918629013393,17.585475193197365,22.50357017216377,22.42019843886321,19.62091128255854,20.168473970476658,18.450511457715322,19.071563663555693
45,15.571596782253444,,22.619624538415895,15.074473950648414,18.653550857164607,10.742295550354001,12.49354322395374,17.363276600339248,21.95825654811762,,,,,18.536309231362477
46,16.66719427183882,15.557018303060953,,15.336213816763681,,10.460344483734895,12.908732192447417,17.914854246360115,21.932220235526916,24.089636631165103,19.862668150212503,20.054846807713446,20.447042618530524,17.983305272619187
47,16.03272039346193,15.817175190964967,22.273948414324124,15.592385058917381,18.72217654961037,10.21472132416064,13.372529517434845,17.825060814769373,,24.945329184343414,19.263388873745605,20.234109866176293,20.943438419699625,
48,15.878237417743936,16.15174981724681,22.225679675496103,,18.950075302536465,10.121040863029524,13.984147430927006,,22.188881435943383,,18.968166540505802,21.094017204097774,,17.358204932136985
49,16.31370778950919,16.188500374352987,22.36436079531547,15.918698766348298,19.331714722254855,10.144981964722703,,17.012734162244552,,25.567491883235835,18.998233646681324,22.131303670355962,20.37452548297793,17.529168222938615
50,16.328994503024127,,23.047652281526343,16.412880267270662,19.327806096393772,10.209173886834998,14.591312440422534,16.870327141732965,,25.151801583424604,19.09358904416378,,19.977983810232317,18.626350977009476
51,16.96386247091328,16.561604858171208,23.132632172670487,16.771185342545433,,,14.776927433631474,16.43491938997211,,25.089869767886448,,23.022361478867225,19.47282313748951,19.189676970412055
52,17.657980873074603,16.815632128222063,23.355513859026686,16.80813728803338,19.148372638785563,,,16.962486162240566,24.52905656492544,24.742324051956086,20.119032954691022,22.87524670263121,19.235492914443842,
53,17.710926462124604,16.94723426746753,22.77139837434785,17.12896528221741,,,14.625830443982585,,,24.279144933608002,20.468818172001505,21.93790165825381,19.92122724526191,
54,17.932187792346262,16.766482008417448,23.824071033742477,17.82052276434323,18.58129773033712,11.63760657149422,14.40468705652544,,,24.07247939265653,,21.746791242362942,21.212176895099244,19.466632935626034
55,,17.02394811517415,,18.152104823892675,18.063484739491194,11.95510720877985,,17.633787064828706,25.454766883472814,,,21.976229288214732,,18.996601633753862
56,18.407202393137414,16.900694520385542,,17.886113780071355,18.009253413403545,12.25533949944767,13.743415569336863,17.66040992522217,24.439442219249543,,21.824381402973884,,21.721663876203685,18.580126201927815
57,18.499541863965845,,23.83817914168924,18.168531568801686,18.294305807028138,12.602880581672464,13.74624384020795,18.20365126888522,24.247790369878835,,,22.73596620166741,21.528403190557437,18.545476636547296
58,,17.07856825710435,23.90551254023352,,18.357261796763886,,13.713296024851553,18.96843491960251,23.980921704976623,,21.41816837722393,23.533797431281393,21.50513998823391,19.296998224925463
59,,,24.285649518273416,18.24168031770849,18.84888909256807,12.821410512218494,13.727997291703362,19.189288198415518,23.986533759970595,28.38561159794283,,24.15126644177114,21.46975971522927,
60,19.726087035025795,,23.703435152898418,18.05333043770335,,,,19.21337791343667,,28.700985722230676,20.338165948464855,24.091632566776166,20.612878227758525,21.005883256509247
61,,,24.69261225651034,,,,,19.267047638632196,24.83045708710418,29.75143488758314,21.11997307119624,,21.187051145316737,21.41773919396587
62,19.995929881514716,16.699060279979033,25.146070831147174,17.958278166068904,19.61914897599683,12.459004517783642,14.536940189116962,,25.532556260362355,,21.662963710049823,24.58750520343232,21.75210246530295,21.38334230745425
63,20.497591560370303,16.734138198690765,25.406131521664935,17.779170374704705,19.898095690664995,12.051230405436923,14.839550731541058,19.129258148569917,26.11357794050113,28.093278515131207,21.63315913899587,23.256151004622385,,20.78545252821467
64,,16.69978248997389,26.454851767393652,17.802675454050917,,11.670588179026606,15.52155059708487,18.779012816347073,26.574039745175032,28.251295337050703,,23.209282362887212,23.404269831445287,20.455057052783133
65,21.059414399081263,16.72934178493705,,,21.056313602827252,11.398151397735806,,19.38867664197427,27.45949306660313,,23.587902255017724,23.64316988242037,23.519607296058776,20.666924131697026
66,21.37759415006057,16.856076792146453,27.217082392695964,18.01756151493173,21.70599783573949,11.689148490020484,16.282929784889973,18.49321510028085,27.928544082288525,,,,23.845291869964207,21.25459275093894
67,,16.883087069141187,27.8762182357895,18.00529742778539,22.23299901700501,11.60869114551357,,19.054766171232604,,28.384338539067027,,,23.575687095029227,22.26407886180237
68,,16.983241413053133,28.555837628541866,17.876717452006172,22.20521772862452,11.608729407788429,16.275452439635796,20.16624148187153,27.7237340665492,29.44521210890627,23.4972973661789,,,
69,24.849767677596628,17.159113433608127,28.15120495097779,,22.578127918108695,,16.43932385719536,19.862737642450988,,30.652999349279003,23.336847344443964,26.71285841578283,,23.540006985792726
70,,,,18.64125702500245,22.765181197267374,12.285362319359294,,,27.642487869180943,,23.491747387643667,27.65106758298586,,24.11351491428843
71,24.133813704441792,17.196490147449122,,19.06526123383819,,12.685826767352129,16.037145783270336,,27.424661351589144,32.48035488878264,23.3168147760208,27.332019035177307,23.74313316948091,
72,24.21027090371882,17.52118670002999,28.715667461627902,19.644663870283804,,,,22.121161867346384,27.23491084348189,32.685301217174995,,26.75566112769298,24.37939195845465,
73,23.75879803605197,17.647912965565116,28.625634325067445,,22.294199070305364,,15.860994618294951,22.055020395192106,27.587982388194263,32.991220230046096,24.489477039785008,26.539507902546546,25.46257567120528,23.127909220661756
74,23.67882788678878,17.657073756617397,29.029928688150573,20.60193858517074,22.46739059862349,14.063268508860913,15.618677707041277,22.681631186993123,28.10917072181593,31.75923178044594,25.23726046557517,26.23400813803925,26.22726838707944,
75,24.159878143305637,18.078570054672575,29.502797928780684,,,14.476102595067202,,,,31.745879207097587,25.603334526919085,25.9663258976724,26.81195494657682,24.541977453257402
76,24.65155621411613,18.41180123388161,29.939742245325682,21.54306449988818,22.354332993435467,14.489940610178635,16.14634908245693,23.152153883330744,,30.859906236562804,25.93216350649512,26.415852312334756,26.46617142958987,25.814472415749652
77,25.013563151515392,,29.5172953810659,22.392398660764044,22.304190746784766,14.88317339664858,16.877370427684426,22.83580350735958,30.879712185316443,,26.35204794583786,27.655905628652068,25.753691781736272,
78,,18.698649590352154,29.562764144467955,22.827495246529704,22.60401292160853,,17.179600275628168,23.247840360461893,31.618542557952846,32.77535782975501,26.293184753582853,,,27.050939374333044
79,26.197853010585654,,29.793937109789145,23.104942296080896,23.171993323956755,14.744140776130486,17.94565588869854,22.552668013473806,31.7214022064877,34.00573324867393,25.853145405702424,29.468730333670663,25.60097934425267,26.96073171152589
80,27.380028290923008,19.183563670995653,30.638774111200075,23.543668714122607,,14.47858561599292,18.342814184753543,22.994223793266446,31.855879490396024,34.82580040793662,25.77833840788696,30.10374383269659,,26.391829021531287
81,27.81231823069846,19.760444473509402,30.385088909314987,23.355632618252535,,,18.702914332613528,22.83324849043029,,35.78064636746854,25.471904866097244,29.73637863009134,27.462944972445285,26.0764944965911
82,,20.04640821235519,,,24.059233349837488,,19.101540068463787,24.03116568971664,30.84816281935988,36.54689818504657,26.483235316951447,29.448914770848795,,25.74921701018323
83,,20.008283569740986,30.190825752875785,,,,19.755032047285805,,31.07723491431308,36.487960412213994,26.590236329204323,28.8278656081488,29.014646936902277,26.67331560890523
84,25.502302454602887,20.562766465667032,30.452444265676892,23.744915275100784,23.707438743003053,13.137509210859115,19.264223133213886,24.320519876037537,30.63628309849687,36.0254555236954,26.594034950316818,28.257519994211492,28.518395317470052,27.452327325067984
85,24.98794903469184,20.86649426043948,30.189447275227156,23.702779333149962,24.38083938962448,13.342681283865911,19.314306908601367,,29.899727722416877,,27.84098431187967,28.38623504666513,28.456597635889285,27.761532246733413
86,24.845617395496213,21.08612127230849,30.14477816891343,,24.622166422847084,13.468559682627623,18.83086016132365,,,34.7520045993564,28.783803339705965,29.327774660718184,28.095856393360037,
87,24.975947903258024,21.43839391426747,30.689852322332705,23.40522989047162,25.02821476658986,13.77177566394805,18.59264731211536,25.6004725031517,31.12801786780056,,28.596999421154976,30.225696146703147,27.574605637577918,28.113191879854224
88,,21.790223762602924,31.394500672733532,,24.890717225469206,14.144732766007824,18.582342804648768,25.189615494007423,32.04254488801376,34.05001498159384,,30.703074304091057,27.50334645443357,27.906752452872883
89,25.54751628074122,22.078756916232816,,,25.367976238958263,14.557740459851596,,25.020401897240475,31.862737859584932,35.20417548622854,28.451159841496754,30.583205519360252,28.49564501814814,27.404560966229297
90,26.560372285078834,22.441438089172223,,23.38738938385884,,15.032614261837429,17.917282088007717,,32.511765645690375,,29.09612815401034,31.620221493503493,29.23645536381405,26.808448117928055
91,,22.556067996747686,31.271012751790312,23.315859477353488,25.068411126946085,15.417305776945842,18.08450806077097,24.892374975737827,33.73246257269199,36.95764327124107,28.256297557749672,31.418579060919292,30.128555708622592,27.488432436281695
92,,,34.572113419973014,22.847111578729052,24.767010726891833,15.811001259018141,,25.116813865965792,33.97866962705299,38.214626749107296,28.431151306018794,31.53716422390082,30.38174281304598,28.36685077524534
93,27.117983350005378,22.974444126582963,35.340538848156605,23.41097756623431,24.729836576882796,16.20907046417267,,24.626896136404437,32.61408744294142,,29.94675106935023,29.88041496850416,,29.37686520065353
94,26.13006272750542,23.42596988081832,33.778810336863174,23.294018041509705,25.06999976730927,,19.575180659076135,24.625512889143184,,39.98674412640572,31.960126131890853,30.9445532093056,29.66697037433533,29.288583947087055
95,28.151894557143997,23.25505330056562,34.27905665525281,23.299114423153114,,16.314662097420715,20.019266239129706,25.031999034138526,32.93860466764186,,,,,29.332919796443235
96,,23.701821683344587,34.833327842258065,23.63528979406735,24.5470457509538,,20.646561737159086,25.85629253863722,33.340664934576346,39.31618581303483,31.08799702043318,,,
97,27.298801802302354,23.4669034742969,35.11391214644418,24.22776964143364,24.52538579905033,16.14614409900249,20.84204222104956,26.460937836209936,33.080820736420876,38.967613538827436,32.037502832611956,31.395424283185832,,28.088454804263375
98,27.055841675015113,,,25.06827871279372,24.249381174013656,15.828090121758219,21.016757110709822,27.460534505046308,,38.652216044422666,32.11881214130925,33.19460011032051,30.097118050651403,27.667260704032692
99,27.896543286045464,23.556931180523904,37.236187054490216,25.10555553116717,24.36399906409404,15.719154746661877,21.203979618222824,,,38.48639069824376,32.02861068270491,,31.309918439168708,28.23417965903108
100,26.921816444485163,23.725458576402232,37.06629203555905,25.826052579145948,25.177004888380445,15.38141711511016,20.781847539869162,28.01590683292107,,39.23536793018039,,32.10322398031023,31.506223641014824,29.110517917882447
101,28.082525282035398,23.899381909328028,37.72000077044671,26.25477861791052,24.858268635040684,15.175598590873083,20.882078477028287,28.445643913965725,,40.509223906146104,,31.596694745122885,31.378555962948795,29.514948435187144
102,28.67947961888848,24.121414724100628,,,25.11977647489118,15.106125108464909,,28.69733233750169,37.377206246487134,41.151060024634106,31.98676954731085,32.240541897793385,31.460619844385484,30.304477227057085
103,,23.55453760092264,39.23975388299498,26.995358412408784,25.67634661289073,15.41695287110958,20.61142160105469,28.52177590445198,37.93397576249284,42.340835704061114,,31.975214062536953,,
104,29.52504458850386,23.631931217994364,,27.278498952893074,25.72698543267635,15.418663551516804,20.203393124927736,28.727059951824913,37.14962382093216,,32.46801652545539,31.787805184535422,30.770603616736167,30.430609991411085
105,29.104933673171587,23.96690492033853,,,,15.807181215662224,20.344049550443668,28.338977283447083,37.41371103499975,42.90190026324728,,31.887594908647223,30.51707674923643,
106,29.6336123870842,,,,27.219038966871167,,20.4594045758944,,,42.057978097779035,,33.16779409537264,30.838878982241567,29.06150063917811
107,,23.943026150023712,39.55649611328729,27.803013447079607,27.207844281353324,16.339728454014644,20.74262717227577,,37.60923375450625,42.203898634977385,,34.25501856819074,31.378730345613043,
108,29.758968329738003,23.900762408746257,39.76126501927288,27.819852603987723,27.270198918778952,16.885084864367318,21.26196249232675,27.49708295657939,36.366245496535896,42.20013471137889,33.7719901280318,34.69039241904022,32.203329518494556,29.815075523280772
109,29.24622668205186,23.689108246801936,39.71083467126169,28.287228937799178,27.798165631548986,17.38822886599891,21.892587702446274,27.761016261471127,36.39006828617381,41.921955305224294,33.97326285582644,,32.99367277343588,
110,28.953431104361282,,39.64283015402416,28.13148479382319,,17.668534355838588,22.362109742434207,28.201618684991946,,,33.40501431614065,34.67751042211091,33.795086413983356,
111,31.56062709826174,23.808681852470528,39.18517851055612,27.80226529139295,28.19341926763215,18.091782142069754,,28.91017851837316,37.31730370849863,40.350676565208616,32.87604085843692,33.75992786236827,33.90641010601702,31.06121007243653
112,,24.24462419101302,39.08579271588977,27.811918637060838,28.381407148496546,18.434309177666876,22.781519242309443,,,40.750231326075166,32.37189877919565,,33.43757416356101,
113,30.754677187671135,23.35049937164877,38.542256211749944,27.96472837292261,28.54634640535743,18.475877813089294,23.599017322044045,28.994150825960794,38.78143388613222,41.207956265328946,32.85659088136453,32.98881088350147,32.71319444813519,30.9083153188646
114,29.451126287462294,23.917629634237787,38.97152057866358,28.136025295151047,28.336692993176232,18.41091715811818,23.518377283249407,29.42791972093787,39.23925677537909,41.36156898366093,34.4992487277059,33.80780913929345,32.468713081836796,30.791005294663236
115,30.316761013992284,23.966859875322662,40.02962839838097,27.941234259343467,28.515779467449228,18.257986032035472,23.620032883947097,30.170534098666323,39.35029132076366,42.19395930363636,35.93094272320709,34.3195832389059,33.04204108839906,30.699865611609432
116,30.721142969265745,24.218538673573697,40.33147235520991,28.18491513770975,28.95973075651526,18.237522621892214,23.489315933078423,30.524783296117274,39.20198382646372,44.646579557148975,36.672734356542584,34.67759486186219,33.48047774628434,31.539809645187752
117,30.613746355869168,24.022510461463455,39.23060018013212,28.07985755673791,27.92513049951384,18.178153713095085,23.68403573498857,29.892952307239767,38.680586480268516,45.45620837168165,37.691677857875035,35.05488204685573,34.12668393647759,32.030759425418246
118,30.363963230564828,24.668119018978047,39.68907321396445,28.42628570016464,28.176646409073587,17.98201101498513,23.578509831621627,30.044375769470324,38.65186834376624,44.26129493191529,36.991714995045896,36.45477828185509,34.73090012617649,33.04598348088618
119,30.11110957865866,24.734222210903923,40.087384228205536,28.621222618750576,28.434826778009704,17.819622498356807,23.36498845698211,29.99561515151064,38.21725453610195,44.947163941983646,36.75469741619022,37.34151110042108,35.311334273448985,33.907428220081655
120,30.974432148449008,25.42921763672246,41.25443777979108,28.218053570999604,28.834264961524852,17.94499819361194,23.273826593944108,29.85521492121272,37.8263388737728,45.19559657022285,36.06371296657798,37.479652816971694,35.13198987142161,34.373956127550805
121,30.27311597746858,25.481078617071045,42.21759388295207,28.666315544181327,28.872295796251493,17.961506319001167,24.02395524954119,29.976072828888345,37.860051482131524,45.63186048672116,36.024060713808964,36.80817746477709,35.26781460544899,34.190745522952575
122,31.8062925353201,25.97739970859231,41.662773257116314,29.301726042220057,29.488340401140576,18.349039777267706,23.965156324851055,29.708934484805052,37.81105088311454,44.098913515609055,36.172013940114326,36.2715477099431,35.20593550433007,33.72041309158106
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/cattle.csv | Year,Total Beef Cattle,Total Pigs,Total Sheep,Total Deer
2002,4491281,342015,39571837,1647938
2003,4626617,377249,39552113,1689444
2004,4447400,388640,39271137,1756888
2005,4423626,341465,39879668,1705084
2006,4439136,355501,40081594,1586918
2007,4393617,366671,38460477,1396023
2008,4136872,324594,34087864,1223324
2009,4100718,322788,32383589,1145858
2010,3948520,335114,32562612,1122695
2011,3846414,326788,31132329,1088533
2012,3734412,313703,31262715,1060694
2013,3698522,297724,30786761,1028382
2014,3669862,286971,29803402,958219
2015,3547228,268300,29120827,900100
2016,3533054,254607,27583673,834608
2017,3616091,273860,27526537,836337
2018,3721262,287051,27295749,851424
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/alcohol.csv | "","Total beer","Total spirits","Total wine"
"1994Q3",3.214,1.151,1.573
"1994Q4",4.362,1.184,2.470
"1995Q1",3.368,0.939,1.445
"1995Q2",3.235,0.951,1.734
"1995Q3",3.157,1.189,1.670
"1995Q4",4.335,1.150,2.385
"1996Q1",3.373,0.882,1.511
"1996Q2",3.259,1.155,1.740
"1996Q3",2.890,0.871,1.730
"1996Q4",4.136,1.097,2.512
"1997Q1",2.994,0.733,1.508
"1997Q2",2.886,1.178,1.784
"1997Q3",2.823,0.957,1.720
"1997Q4",4.125,1.816,2.629
"1998Q1",3.115,0.972,1.453
"1998Q2",3.009,1.228,1.754
"1998Q3",2.779,1.132,1.771
"1998Q4",3.952,1.599,2.565
"1999Q1",3.112,0.931,1.559
"1999Q2",2.705,1.143,1.872
"1999Q3",2.881,1.279,1.892
"1999Q4",4.270,1.627,2.801
"2000Q1",2.957,1.097,1.486
"2000Q2",2.823,1.309,1.915
"2000Q3",2.798,1.496,1.844
"2000Q4",3.997,1.810,2.808
"2001Q1",3.037,1.240,1.287
"2001Q2",2.778,1.516,1.861
"2001Q3",2.857,1.272,2.034
"2001Q4",3.967,1.751,2.739
"2002Q1",3.094,1.276,1.656
"2002Q2",2.819,1.378,1.918
"2002Q3",3.052,1.512,2.265
"2002Q4",4.088,2.055,2.902
"2003Q1",3.044,1.257,1.691
"2003Q2",2.849,1.494,2.033
"2003Q3",3.137,1.386,2.141
"2003Q4",4.040,1.974,2.932
"2004Q1",3.337,1.529,1.847
"2004Q2",2.726,1.444,2.157
"2004Q3",3.135,1.746,2.318
"2004Q4",3.904,2.130,2.974
"2005Q1",3.222,1.464,1.977
"2005Q2",3.058,1.771,2.328
"2005Q3",2.992,1.522,2.479
"2005Q4",4.025,2.310,3.099
"2006Q1",3.027,1.443,2.141
"2006Q2",2.796,1.865,2.299
"2006Q3",2.921,1.861,2.606
"2006Q4",4.256,2.272,3.330
"2007Q1",3.169,1.434,2.290
"2007Q2",2.870,1.852,2.499
"2007Q3",2.864,1.722,2.524
"2007Q4",4.267,2.337,2.887
"2008Q1",3.399,1.722,2.007
"2008Q2",3.296,2.222,2.330
"2008Q3",2.780,1.671,2.384
"2008Q4",4.166,2.421,3.696
"2009Q1",3.162,1.547,2.157
"2009Q2",2.940,1.864,2.529
"2009Q3",2.846,1.671,2.395
"2009Q4",4.024,3.101,3.447
"2010Q1",3.301,1.970,2.499
"2010Q2",2.826,2.062,2.499
"2010Q3",2.712,2.243,2.504
"2010Q4",3.934,3.054,3.834
"2011Q1",3.192,2.003,2.246
"2011Q2",2.975,2.645,2.538
"2011Q3",2.865,2.482,2.704
"2011Q4",3.789,2.566,3.182
"2012Q1",2.728,2.061,2.325
"2012Q2",2.840,2.715,2.577
"2012Q3",2.633,2.252,2.540
"2012Q4",3.837,2.764,3.335
"2013Q1",3.090,2.132,2.461
"2013Q2",2.779,2.673,2.666
"2013Q3",2.594,2.070,2.524
"2013Q4",3.960,2.677,3.246
"2014Q1",2.771,1.823,2.550
"2014Q2",2.860,2.421,2.691
"2014Q3",2.676,2.314,2.661
"2014Q4",3.831,2.564,3.616
"2015Q1",2.986,1.986,2.320
"2015Q2",2.558,2.109,2.650
"2015Q3",2.810,2.453,2.602
"2015Q4",3.743,2.576,3.268
"2016Q1",3.054,2.107,2.531
"2016Q2",2.764,2.207,2.678
"2016Q3",2.985,2.526,2.817
"2016Q4",3.807,2.898,3.324
"2017Q1",3.046,2.127,2.517
"2017Q2",2.880,2.338,2.616
"2017Q3",2.689,2.395,2.718
"2017Q4",4.005,3.215,3.644
"2018Q1",3.013,2.174,2.413
"2018Q2",2.800,2.179,2.643
"2018Q3",2.937,2.828,2.717
"2018Q4",4.158,3.358,3.534
"2019Q1",3.197,2.248,2.408
"2019Q2",2.687,2.476,2.664
"2019Q3",2.774,3.053,2.705
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/hourly_earnings_by_industry_missing.csv | ,Forestry and Mining,Manufacturing,"Electricity, Gas, Water and Waste Services",Construction,Wholesale Trade,Retail Trade,Accommodation and Food Services,"Transport, Postal and Warehousing",Information Media and Telecommunications,Financial and Insurance Services,"Rental, Hiring and Real Estate Services","Professional, Scientific, Technical, Administrative and Support Services",Public Administration and Safety,Health Care and Social Assistance
0,13.65,12.11,13.65,11.38,13.44,9.5,9.71,12.35,17.14,13.83,12.61,14.79,15.19,13.68
1,13.77,12.09,,11.54,13.6,9.48,9.74,12.65,17.35,14.31,12.7,14.93,,
2,13.77,,14.32,11.72,13.77,9.56,9.85,12.84,17.56,14.52,,,15.66,13.68
3,14.03,,14.44,11.85,,9.64,9.74,,,14.95,13.46,15.81,16.04,13.82
4,14.14,12.74,14.68,,14.54,10.04,10.15,13.37,18.29,15.25,13.85,,16.17,14.24
5,,,,12.08,14.7,9.99,,13.82,18.65,15.57,13.55,15.95,16.26,14.35
6,14.3,12.82,,12.4,14.94,10.2,10.41,14.33,19.06,15.86,13.73,16.17,16.43,14.27
7,14.67,12.84,15.22,,,10.21,10.38,14.36,19.23,16.09,13.84,16.31,16.85,14.57
8,14.54,13.21,15.22,12.6,15.2,10.48,,14.33,19.29,16.11,13.88,,16.86,
9,14.89,13.41,15.39,12.54,15.28,10.35,10.59,14.48,19.55,16.25,13.92,16.41,17.07,14.7
10,15.34,,15.57,12.7,15.53,10.55,10.66,14.67,,16.48,,16.49,,14.92
11,15.82,13.34,15.54,12.7,15.59,10.56,10.69,14.82,19.91,16.64,14.09,16.65,,14.94
12,15.36,13.58,15.63,12.79,15.63,10.59,10.67,14.92,19.88,,14.11,16.69,17.31,
13,,,15.63,12.71,,10.55,10.64,14.99,20.1,,14.2,,17.46,
14,,13.39,15.62,12.7,15.83,10.62,10.64,,19.95,,14.24,16.83,17.57,15.15
15,,13.46,,,16.05,10.51,,14.91,20.01,17.54,14.49,,17.67,15.3
16,14.91,13.69,15.4,12.66,,10.64,,,19.85,17.58,14.25,16.84,17.47,15.38
17,14.79,13.71,15.54,12.65,16.12,10.65,10.75,14.87,20.01,17.86,14.48,17.11,17.6,15.27
18,14.73,13.47,15.49,,16.13,10.66,10.69,14.82,19.83,,14.48,17.12,17.68,15.23
19,14.94,13.56,,12.72,16.26,10.6,10.73,14.91,19.99,,14.53,,17.59,15.24
20,15.15,13.72,15.97,12.91,16.14,10.75,10.75,14.96,19.95,18.15,14.63,17.28,17.62,15.53
21,14.76,13.91,16.15,,16.4,10.79,10.8,15.13,20.26,18.64,14.73,17.41,17.72,15.38
22,14.79,13.83,,13.06,16.57,10.88,10.88,14.92,20.2,,14.9,17.59,17.94,15.01
23,15.34,13.8,16.63,13.11,16.35,10.82,,15.01,20.26,18.9,14.9,17.63,17.93,15.76
24,,14.06,16.62,,,,10.91,15.06,20.23,18.76,14.84,17.52,17.95,15.83
25,15.86,,,13.35,16.75,10.96,11.0,15.01,,19.65,,17.86,18.07,15.75
26,,14.18,17.01,13.5,16.74,,11.06,15.13,,19.93,15.15,17.91,,15.95
27,,14.31,17.09,13.54,16.84,11.11,10.96,,,20.24,15.28,18.07,18.36,16.25
28,16.18,14.61,,13.76,17.03,11.23,,15.39,20.69,,15.37,18.14,18.4,16.32
29,16.65,14.71,17.48,13.89,17.23,,11.23,15.32,20.96,20.62,15.78,18.63,,16.37
30,16.62,14.69,17.72,14.06,17.31,11.44,11.24,15.42,20.97,,15.9,18.78,18.71,16.49
31,,14.82,,14.13,17.61,,11.55,,21.55,20.98,16.12,19.04,,16.69
32,16.84,15.1,18.28,14.28,17.88,11.54,11.54,16.05,21.64,21.0,16.18,19.08,19.22,
33,16.98,15.26,18.57,14.27,17.96,11.64,11.58,,21.9,,16.41,,19.17,17.01
34,17.34,15.21,19.17,14.44,18.11,11.77,11.66,,21.94,21.88,16.62,19.62,19.11,17.26
35,,,19.21,14.57,,11.82,11.68,16.23,22.1,21.77,16.59,19.6,19.2,17.3
36,18.32,15.52,19.2,14.65,18.25,11.96,11.88,16.19,,21.73,16.51,19.46,19.36,17.41
37,18.26,15.72,19.18,14.9,18.46,12.1,11.8,16.3,22.4,22.04,16.71,19.76,19.6,17.43
38,18.26,,,15.02,18.6,12.1,11.79,16.43,22.58,22.64,,19.88,19.6,17.47
39,,15.88,19.89,15.19,,11.97,11.86,16.46,22.68,22.96,17.15,20.25,20.12,17.72
40,,16.07,19.8,15.11,19.01,12.0,11.76,16.31,22.88,,17.21,20.33,20.39,17.79
41,18.84,16.16,,15.33,18.89,,11.83,16.19,22.84,23.16,17.5,20.65,20.34,17.55
42,18.22,16.16,19.69,15.67,,,,16.73,23.1,23.14,,20.92,20.76,17.88
43,17.55,,21.19,15.75,19.02,12.1,11.66,16.62,23.38,23.08,17.28,20.37,20.93,17.87
44,17.03,16.56,21.24,15.67,19.25,12.25,11.59,16.51,23.26,,17.31,20.38,20.51,17.98
45,,,21.72,16.07,19.13,,11.95,16.11,23.21,23.84,17.55,20.66,20.78,17.72
46,18.5,16.6,21.85,16.24,19.31,12.31,12.02,16.68,23.56,24.16,17.82,20.94,21.29,
47,17.89,16.78,21.74,16.35,19.06,,12.07,16.8,,,,20.88,21.53,17.94
48,17.75,17.05,,16.48,19.35,12.54,12.26,16.91,23.91,,17.99,21.19,,18.25
49,18.19,17.04,,16.26,19.88,12.7,12.27,16.78,23.96,24.85,18.39,21.59,,18.24
50,,17.23,23.05,16.51,20.1,12.78,,17.06,24.19,24.71,,21.79,22.09,18.72
51,18.82,17.38,23.28,16.62,20.21,,12.31,16.95,,25.13,18.62,21.91,22.1,18.56
52,19.49,17.65,23.62,16.42,20.52,12.95,12.35,17.64,24.65,25.33,18.73,22.06,21.95,
53,19.51,17.82,23.12,16.53,20.07,13.01,12.39,17.16,24.72,25.32,18.46,21.7,,18.78
54,,,,,20.59,13.31,,,25.57,25.34,18.77,22.13,22.93,19.02
55,19.66,18.04,23.98,17.26,20.34,13.32,12.44,17.68,25.69,,19.35,22.77,22.73,19.28
56,20.06,18.02,24.05,16.93,20.49,13.36,12.53,17.26,25.12,26.88,19.28,22.67,22.61,19.38
57,20.09,18.29,24.14,,20.9,13.53,,17.37,25.44,27.18,19.76,23.2,22.64,19.37
58,20.19,18.46,24.1,17.12,21.0,13.44,12.98,17.8,25.63,27.42,19.87,23.39,23.22,19.64
59,20.38,,24.34,17.46,,13.73,13.03,17.85,25.93,27.83,19.8,,23.9,20.28
60,21.1,18.66,23.59,,21.19,13.77,12.97,17.9,25.85,,,22.98,23.55,20.03
61,20.83,18.91,24.39,17.94,,,13.08,18.17,26.66,29.22,20.13,,24.2,
62,21.21,18.76,24.64,17.79,21.63,14.15,13.08,18.63,26.99,28.11,20.23,23.92,24.39,20.67
63,21.63,18.98,24.69,,21.64,14.12,12.96,,27.1,28.51,,,24.34,20.78
64,22.03,,25.53,18.13,22.0,14.11,13.23,,27.11,29.18,20.06,,24.83,21.09
65,22.03,19.34,25.04,18.23,22.3,14.16,13.58,,27.68,29.55,,24.47,24.71,
66,22.27,19.64,,18.77,22.77,14.69,,,28.05,29.55,21.25,25.16,,21.81
67,22.86,,26.41,18.91,23.19,14.74,13.36,19.61,28.3,29.48,,25.25,25.61,
68,23.7,20.08,26.96,18.88,23.14,14.75,13.44,,28.35,30.09,21.36,,25.68,22.57
69,25.53,20.39,26.46,19.14,,14.91,13.83,19.81,,30.76,21.74,,25.96,22.4
70,24.61,20.57,26.58,19.66,23.92,15.11,13.92,20.26,29.26,31.46,22.25,26.48,,23.18
71,24.7,20.64,,20.0,,,14.12,20.77,29.44,31.87,22.11,26.33,26.67,23.26
72,24.73,21.04,26.97,20.44,23.97,15.4,14.19,,,32.11,22.05,,26.67,23.76
73,24.24,21.22,26.94,20.55,24.28,15.42,14.56,,29.76,32.73,,26.68,27.18,23.95
74,24.13,21.26,27.44,20.99,24.78,15.73,14.46,21.29,30.01,32.0,22.55,26.88,27.72,
75,24.59,21.69,28.04,21.1,24.76,15.97,,21.82,30.58,32.53,22.44,26.78,28.55,
76,25.07,22.01,28.63,21.44,,15.92,14.81,22.35,,32.07,22.59,26.99,28.82,25.22
77,25.43,22.4,28.38,22.05,25.39,16.37,15.24,22.47,31.47,33.44,23.18,27.68,,25.47
78,26.49,22.21,28.61,,25.81,16.64,15.15,23.3,31.98,34.06,,28.25,29.14,25.96
79,26.64,22.37,29.03,22.37,26.41,,15.49,,32.09,,23.69,28.39,29.21,26.37
80,27.85,,30.06,22.68,26.49,,15.49,23.51,32.47,35.22,24.08,28.92,29.91,26.53
81,,23.02,29.98,22.42,26.84,17.02,15.54,,32.68,35.66,23.97,28.85,30.04,26.78
82,26.82,23.2,30.2,22.73,,16.87,15.76,24.26,32.4,,24.83,29.14,30.46,26.55
83,27.56,23.05,30.07,22.93,26.29,16.84,16.39,23.86,33.1,35.92,24.48,29.14,30.81,27.05
84,26.18,23.49,30.43,22.96,26.03,16.72,16.03,23.72,,35.66,,28.98,30.57,27.11
85,25.74,23.68,30.23,23.08,26.44,17.05,16.34,,32.34,35.62,24.55,,31.13,26.79
86,,23.79,30.21,23.31,26.45,17.18,16.2,24.1,,35.37,25.16,,,
87,25.9,24.04,30.74,,26.68,17.37,16.33,24.09,33.07,35.61,,30.01,31.44,
88,26.22,24.3,31.39,,26.44,,16.65,23.72,33.52,,,29.88,31.41,27.75
89,26.67,24.51,31.63,23.73,26.9,17.66,16.4,23.78,32.88,36.63,25.7,29.4,32.0,27.92
90,27.79,24.81,31.13,23.93,27.01,17.82,16.32,24.3,33.19,37.57,26.88,30.47,32.1,27.62
91,27.27,24.88,30.89,24.06,26.83,,,24.46,,37.64,26.38,,32.43,28.06
92,,25.29,34.01,,26.76,18.04,,25.1,34.64,38.36,26.57,31.42,32.48,28.29
93,28.69,25.27,34.58,24.42,27.01,,16.95,24.92,33.61,38.41,27.76,30.34,32.57,28.59
94,27.82,25.74,32.81,,27.67,18.35,16.97,25.06,34.78,39.48,29.21,31.7,32.66,28.11
95,29.96,25.61,33.1,24.34,27.82,18.38,16.99,25.41,34.92,39.01,30.26,31.26,,28.29
96,30.13,26.12,,24.6,27.78,18.43,17.24,25.99,35.74,39.24,27.26,31.68,32.79,
97,29.34,25.97,33.54,25.06,28.02,,17.16,,,39.41,28.05,30.94,33.33,
98,,,36.22,,27.94,18.7,17.2,26.76,36.64,39.63,28.32,32.19,33.89,
99,30.16,26.29,35.35,25.54,28.17,18.97,17.41,26.5,37.01,39.86,28.69,31.51,34.46,28.94
100,29.29,26.6,35.07,26.02,29.01,19.0,,26.57,38.91,40.75,28.84,31.03,34.1,29.29
101,30.55,26.93,35.65,,28.63,19.11,17.55,26.85,,,29.5,31.03,33.78,28.96
102,31.24,27.32,36.23,26.01,28.75,19.27,17.92,27.15,38.44,42.11,,32.31,34.14,29.2
103,31.39,26.93,37.14,26.48,29.1,,,27.21,,42.77,29.81,,34.88,29.67
104,32.25,,37.48,26.58,28.9,19.7,17.9,27.79,,42.07,29.68,32.53,34.79,29.7
105,31.9,27.71,37.41,26.65,29.31,,18.24,27.84,38.41,42.51,28.83,32.39,,29.7
106,32.49,28.03,37.7,26.98,29.86,19.95,,27.73,38.73,41.62,29.74,33.12,35.34,
107,32.89,28.05,37.84,26.87,29.62,20.0,18.59,28.14,39.52,42.01,29.77,33.57,35.46,30.12
108,32.71,,,26.93,29.51,20.23,18.86,27.85,38.76,42.47,29.52,33.54,,30.22
109,,28.13,38.34,27.5,,20.43,,28.05,39.13,42.74,,33.74,35.88,29.91
110,,28.51,38.46,27.5,30.27,20.46,19.18,28.24,39.31,42.3,30.04,33.72,36.5,30.1
111,34.58,28.53,38.19,27.37,30.4,20.72,18.99,28.56,40.07,41.9,30.04,33.38,,29.83
112,30.8,29.08,38.27,27.61,,21.01,18.83,27.71,39.64,42.27,29.86,33.43,37.07,
113,33.77,28.28,37.89,28.01,31.15,21.12,19.41,27.77,40.75,42.42,30.34,33.64,37.05,30.6
114,32.45,28.92,38.46,28.43,31.23,21.24,19.24,27.89,40.74,42.08,31.64,34.49,37.26,31.18
115,33.29,29.02,39.63,28.47,31.73,21.37,19.41,28.49,40.49,42.38,32.5,34.64,37.84,31.45
116,33.66,29.3,40.01,28.92,32.5,21.7,19.49,28.9,40.18,44.42,32.63,34.39,37.85,32.12
117,33.51,29.11,38.95,28.98,31.77,22.02,20.0,28.51,39.74,45.06,33.2,34.16,37.85,32.0
118,33.21,29.74,39.41,29.44,32.28,22.19,20.26,29.04,40.02,43.99,32.36,35.2,37.91,32.29
119,32.9,29.77,39.77,29.69,32.73,22.34,20.4,29.43,40.05,45.06,32.33,36.12,38.32,32.7
120,33.7,30.41,40.86,29.28,33.24,22.69,20.59,29.7,40.17,45.84,32.11,36.67,38.44,33.23
121,32.93,30.39,41.71,29.66,33.3,22.82,21.5,30.12,40.64,46.8,32.63,36.62,39.22,33.59
122,34.39,30.8,41.01,30.17,33.85,23.2,21.45,29.98,40.85,45.63,33.22,36.66,39.86,33.85
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/population_estimate_missing.csv | ,Male,Female
0,238.1,183.2
1,,194.2
2,252.5,201.6
3,264.6,211.5
4,281.8,225.5
5,292.3,236.2
6,299.4,
7,307.7,
8,,265.9
9,331.0,277.4
10,,282.8
11,,290.9
12,347.4,297.9
13,347.8,301.6
14,351.4,306.6
15,,312.1
16,359.0,317.0
17,368.0,
18,380.5,333.8
19,386.6,341.5
20,392.6,348.1
21,398.7,355.4
22,,362.5
23,413.8,369.5
24,420.4,376.0
25,425.3,382.8
26,437.3,
27,449.0,402.0
28,462.8,
29,477.1,423.6
30,490.5,435.1
31,507.2,
32,518.2,459.0
33,535.9,472.5
34,545.9,484.7
35,555.5,
36,,509.0
37,,
38,595.6,538.9
39,,551.2
40,,562.2
41,575.8,574.5
42,563.3,584.1
43,,590.2
44,627.8,599.4
45,643.7,613.9
46,,631.8
47,673.8,
48,686.0,657.1
49,700.0,
50,,
51,730.6,699.0
52,740.8,709.3
53,749.1,717.8
54,758.5,727.1
55,,738.9
56,775.6,747.1
57,,
58,,760.8
59,792.0,766.4
60,796.7,773.0
61,804.3,780.3
62,813.1,788.7
63,821.7,
64,,
65,813.0,820.6
66,799.2,832.0
67,793.7,842.7
68,790.8,851.2
69,813.6,862.7
70,855.9,872.6
71,,891.1
72,913.6,909.5
73,934.3,927.6
74,949.4,942.6
75,967.3,960.3
76,989.5,981.0
77,1017.9,
78,1043.1,1031.6
79,,
80,1089.1,
81,,1098.0
82,1137.8,1125.0
83,1165.6,1150.3
84,1186.1,1173.7
85,1207.9,1195.6
86,1238.0,1223.3
87,1264.1,
88,1288.4,
89,,1304.0
90,1336.7,1327.1
91,1360.3,1351.0
92,1373.6,1371.4
93,,1387.6
94,,1404.2
95,1425.4,1426.7
96,,1451.1
97,1477.8,1481.9
98,1510.0,1514.9
99,1543.9,1548.0
100,,1576.1
101,1578.1,
102,1578.4,1588.0
103,1575.9,
104,1573.8,1590.1
105,1581.5,
106,,
107,1601.9,1624.9
108,1620.7,1644.1
109,1632.2,1660.8
110,,1666.3
111,,1674.3
112,1652.9,1689.2
113,1649.7,
114,1659.7,1710.1
115,1681.9,1728.5
116,,
117,1749.1,1803.1
118,1772.5,1825.4
119,1797.8,1850.4
120,1828.0,1878.7
121,1855.4,1906.9
122,1872.9,1929.7
123,1883.3,1945.9
124,1891.7,1959.5
125,1900.4,1972.6
126,1920.5,1995.7
127,1956.7,2032.9
128,1991.8,2069.8
129,2016.2,2098.1
130,2037.7,2123.3
131,2061.8,2149.6
132,2083.4,2169.2
133,2104.1,2187.4
134,2134.0,2213.2
135,2158.2,2234.9
136,2174.3,2248.4
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/endog_deaths_by_region_exog.csv | ,Northland Region,Auckland Region,Waikato Region,Bay of Plenty Region,Gisborne Region,Hawke's Bay Region,Taranaki Region,Manawatu-Wanganui Region,Wellington Region,Tasman Region,Nelson Region,Marlborough Region,West Coast Region,Canterbury Region,Otago Region,Southland Region
0,996.0,6768.0,2256.0,1647.0,369.0,1206.0,888.0,1950.0,2940.0,237.0,360.0,270.0,318.0,3756.0,1581.0,840.0
1,1038.9504083744005,6952.438043167127,2485.113767748108,1779.3883525138863,431.89560211209425,1304.2642215271565,1036.5471247093506,2036.1456081473514,2992.806902426471,322.62946335095785,406.0179425670709,379.3377726268739,390.8048040628316,4075.498902651059,1675.393568860146,943.6548564339623
2,1093.1087848206537,7152.236292341489,2381.0794761244383,1822.0386957877217,517.0666181419041,1342.2298786056208,1015.5803915429468,2024.0105974570256,3040.2493505739303,352.8670950715875,405.17438599581175,362.5140453524556,312.0184771435616,3876.6108087872935,1540.0350027692327,766.4226405747478
3,1154.7146732241865,7094.990758465727,2481.7197091484513,1808.0868946208034,488.6788274055916,1303.8482950750497,963.9808649133096,1977.176739577192,2770.0914126430303,255.75161262011798,309.1900575815597,237.16924633706643,180.62191595059778,3846.4952002054506,1404.692164784402,745.8836579333731
4,1214.0695102695297,7294.747998600694,2429.6450943731224,1985.7129372964323,519.2717371454779,1215.86862622914,892.4086481348347,1921.9996244089268,2903.9589662519315,193.87470199030548,275.6396749978092,296.5823507686031,199.91266046871,3956.056066142721,1537.3997547090307,916.3648366687227
5,1180.5644749552614,7507.17961116537,2571.789410268783,2039.0762076777257,421.26877466894024,1163.1339412479751,806.377328566198,1894.3115741565052,2885.4065922620916,226.98960416703216,342.128620031849,401.4217692345546,329.9978151918066,4081.547017630741,1676.116867442315,933.2905882059263
6,1224.7047603660887,7407.700898093777,2489.5779191585725,1945.7867927267828,375.336084306059,1129.8971395789656,670.2526328328049,1739.6010543520192,2940.017273854194,286.2065004202234,450.2509902070052,468.74605904838813,345.0188771927413,3834.7082724680963,1548.184573213674,697.9427835729385
7,1160.1293000631065,6933.523094171837,2486.7023496362913,1753.0422678053,275.8619061667761,1014.6519083612786,684.553945548555,1805.3718103406559,2807.3172874916468,326.3779148013999,427.1581286012663,380.78351483101744,230.17090452270594,3523.276251937407,1393.1356869768356,713.6049164973049
8,1271.6251626633054,7276.634498063058,2559.5380537951855,1976.886139696935,341.8343772971416,1164.6129586398672,911.1470454402347,2040.8701238617393,3007.2142562277872,309.5615380476903,361.0795936404886,297.0729857726715,128.1608755967526,3834.337940125184,1565.6406509581166,859.8231845953732
9,1166.1360407418688,6887.712190532319,2378.2953573257014,1868.2591761146844,313.6057441977176,1131.2634652789216,944.6704269594989,1988.3052357459355,2798.1500840353056,222.82495672579628,287.23268272648573,337.6392283744881,161.70078429798355,3699.1230785710427,1561.4916546588859,836.0694358115003
10,1260.7644956315119,7112.972225815915,2426.0390856416366,2047.5655590683461,354.3719979060145,1220.123187905753,993.0509808563199,2021.3410203702056,2812.1959283678743,171.08825279307234,329.4830479034397,462.8362445342101,297.6590462093425,3870.522823922246,1493.5717538934468,725.7440046991989
11,1324.767868625797,7177.971678096597,2353.7310613007207,2019.3497526310869,434.89903884983556,1263.0838042249616,933.418391102556,1858.1884996531007,2774.258822747422,201.63125433949682,494.1617251445538,553.4786913641619,215.16997737841118,3879.064909660275,1412.9138443241363,832.4722266944501
12,1274.5480215938476,7178.380225682429,2347.446741323331,2056.408034021699,495.5430436560038,1313.5693244494769,889.3055093104801,1795.6613832085868,2825.26035554813,308.31207960849355,511.33418529910756,421.5588142517996,85.36126940967603,3764.8941232106736,1599.5659955539336,903.8397946788524
13,1191.6353160157732,7236.74167659451,2455.889670152143,2217.3073813038395,468.4619465811397,1143.8617226789715,819.7670561436108,1876.3695650079355,2907.594154976519,362.29065191004685,441.3578068444726,384.853001833573,93.8945215911836,4210.296238850455,1553.5502346356634,800.4852205108929
14,1145.6674691441117,6854.246737772542,2347.280405454169,2153.939841020403,466.49885762509797,1127.1464263222308,794.4434457564427,1822.7431453065556,2783.297990809023,296.43401850197,309.639772226726,409.26407495153603,202.83399690261112,3868.1572197227533,1352.3513514717915,646.65741086763
15,1119.3641302294022,7127.537325748383,2608.6374529387517,2386.486977479464,485.6821701130363,1040.667216053644,786.5154424909291,2027.572200424207,2825.9258432009287,218.45237593530175,348.11178487805716,525.2832982492791,207.5630298133887,3931.52360861375,1328.9852921392676,823.2314662329486
16,1155.4981903755518,7196.559899132302,2673.4058567609363,2389.0686031177943,367.4689982391534,1052.561471939067,943.1352324205246,1889.5724526085758,2766.7665314643914,173.2551877252159,476.35603640756267,570.5907887701976,91.40172871162537,3980.4326276497923,1534.0233963234643,894.7413123845197
17,1251.8649698105212,7394.480914284106,2787.3342049069906,2481.427288371271,324.3455623746878,999.5079864338826,977.7119470154184,1939.4936494513122,2845.0769178886235,222.7484622804227,575.8178675101883,488.82955990522026,44.237283513367856,4014.645703243325,1661.3428516169931,752.738647610733
18,1172.2505109786134,7396.671956262273,2698.4615896952882,2437.236224286564,269.67076289998363,1107.526455183087,972.1730588040948,1872.3903106830073,2801.7777463450925,318.5244659573674,466.77766063176375,404.983617720718,131.39776640428724,4261.126864672212,1463.6728239058586,718.9546802137188
19,1097.4002415087211,7264.765873259101,2720.0587040307646,2310.955062369673,297.2628881127559,1117.963579484021,935.0586656539452,1784.0389259992926,2848.5239959342366,331.3815724207062,372.11716983219355,450.7210360106143,201.4012676035091,4206.863111803736,1258.2910579759787,849.6119213422157
20,1210.9892563670944,7761.778900199965,2879.374495007197,2336.513861980383,320.9698036517707,1204.9097916372352,925.4399807214569,1812.1596190148923,3127.4906039863854,277.28941596724496,360.722843206304,593.8949267510523,163.28720011049774,4329.1849174037825,1545.866839244076,889.6757763863114
21,1204.595403951423,7769.287862569286,2852.0704946709297,2321.392195725619,408.42075494651857,1239.266191985429,834.2851246492112,1900.113529657344,3115.521446712909,179.16453745913148,503.8155206298622,622.3538258477845,-0.7990084773221895,4320.134929372896,1662.9370051285464,723.2892159796717
22,1183.6762490987244,7704.646631373182,2805.274256668521,2345.8085497211814,467.6971564677179,1119.8457973490933,766.9688938349474,2002.2515518119765,2927.6305558759445,214.86642891651843,590.3372567969327,517.6089936849814,62.94477896310872,4148.478073805102,1464.6782433104709,783.2770984017143
23,1187.5508314192239,8204.222493961657,2797.2434623066124,2481.708151560875,491.2907267139968,1151.1692727595096,892.2082970389683,2132.8123676960804,3001.5534503331487,275.3487292844441,552.1604948596505,441.7354408921744,171.59306018970088,4375.985039595521,1551.7553454328274,948.7926638195188
24,1249.3869461938807,8371.631326408744,2899.6937262915326,2690.0418758831406,457.91829219710473,1182.4621639231473,1013.8078677475456,2025.635061595966,2955.2774584956846,386.63796572481215,488.35788518630227,557.1416440141217,190.01257098829717,4151.807810431371,1643.9586786383747,918.7903350641574
25,1369.1944549060454,8222.950560202296,2815.8962268421365,2632.51277660153,453.8126533304794,1066.4987195378694,1042.582415557717,2003.605478369024,3115.259923632251,366.6421867858665,443.5476868494218,688.9358944834481,30.67750965002341,4081.9552198026895,1546.4165299478184,792.6007344059269
26,1420.8248920952383,8803.890919563275,3091.687624539751,2963.60746867278,453.9905893173028,1254.4204906474442,1078.9789295120765,2132.444290987613,3263.4107476356694,249.2329147767512,545.7396291994676,655.242920834494,11.423585216247943,4531.864042385398,1511.5003594921118,909.2964867254242
27,1508.9773842065492,8814.911588611316,3110.5485819589103,2918.415006298744,393.4043802201025,1299.788494472141,1032.0359708892447,2083.9709054764917,3308.4522993738406,208.78692998453175,627.2548657301826,586.8865382911815,92.08714842685964,4403.649025432484,1486.2865672704493,938.2290317900179
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/endog_guest_nights_by_region_missing_exog.csv | ,Northland,Auckland,Waikato,Bay of Plenty,"Hawke's Bay, Gisborne","Taranaki, Manawatu, Wanganui",Wellington,"Nelson, Marlborough, Tasman",Canterbury,West Coast,Otago,Southland
0,64.17299227679365,256.12957833548916,122.90063742259679,153.34482929479051,46.46812303813159,92.4040361649103,110.08256257512701,56.66954966645217,210.85669076131327,27.326230187754817,181.95421948782865,28.68977607448827
1,60.77705041928116,278.06747055665863,119.22263536632272,144.2406540652008,64.26100497982999,105.20464266565688,,82.85207011198217,226.33968447296206,46.95904341058122,245.33139736074503,53.20952051806148
2,,286.20150559049614,155.30774275799837,184.4523636262992,97.46749769480458,138.90042530860737,153.20324761889702,122.73479246507993,281.71789223943426,78.30778562298264,272.93376361830417,
3,120.90289466573269,309.86477242189716,164.1905893440761,198.39837519061413,115.25490956553529,145.85539965859192,174.371983200797,145.15648613560853,299.63612447645215,102.8307302306984,270.03897824474285,116.42293046019745
4,,371.2532515091942,181.25369073799166,235.0857238307511,133.7524546453504,165.7603696753859,208.93896611624365,,,130.0216247572543,300.0633662954886,149.90321222941935
5,228.23618116065984,385.44661104346545,,311.4773236923793,166.86200467142518,176.774274476457,218.39681709535606,260.508204364607,434.43537375102846,146.64284052269358,367.8470975961556,177.30376402709825
6,325.21336380889073,441.76290051229415,376.55129321034417,,247.53173885280978,227.3353634610168,255.65673062494136,378.01108559779163,520.4623297563426,,479.56740405219574,192.4104156000344
7,204.61183771478898,423.87418945004424,245.17689691980723,301.63074835262637,195.48581951083403,206.2278197417678,263.96313312914015,268.46102036047887,449.1025659543392,169.110591265166,403.8067149093254,199.05287386010514
8,204.30811291922188,,274.8917568575331,317.0811977835216,199.03009334690532,216.639497028831,271.93460598197373,258.87921633215194,433.7038963437551,162.2669520553419,396.5938977400008,180.90230611398374
9,161.87620905862946,373.59152437080127,201.9007925767581,273.9140875882694,182.82618442819847,192.8776036681976,247.1140389232352,212.37629648619443,368.0056295337654,127.01037125154744,315.3105160221565,143.50893733609004
10,126.60232102174227,336.21910792442134,173.13405275843178,225.68457106122156,165.09092219924545,179.78572429471768,235.16480582693606,177.2491930292626,282.57705299769896,95.98643834926476,239.69498629776925,103.89914563993854
11,111.06879773889949,308.21572617159416,167.89809977612998,201.6239570075229,158.04917651712594,,217.30342787559943,157.8674643416101,242.88513101196529,81.2658524947087,,80.04913176988083
12,,352.80875130485225,210.60663848803125,252.07100351202922,162.05775637808352,195.79981806951434,226.1844818570277,151.83110134708465,,62.81503689498883,243.21795070013076,
13,131.06657856157304,363.9099810242821,191.72030784758442,232.36619806938404,157.78352257318485,200.9458649977767,206.4563316660434,142.28504238229226,250.97360787994955,36.07834296563914,257.88666292010265,26.966873597779824
14,143.87771315469763,350.8848098087916,,247.87286439150782,156.9976139987921,199.15193184220135,198.61159835765162,129.14177682451577,240.23969846284785,25.421446120095705,,12.523742692497471
15,180.40585538255988,,255.79859476493854,297.1626091037754,179.8581829103668,204.45167186150525,203.54859712432255,,298.8972828348238,28.408911704079404,194.03251726764415,11.259828139577472
16,181.3579985668115,419.9449491304847,234.03596348581533,275.42931928955215,,,190.87932475074587,131.97718404821072,290.8815240142501,10.860336039343636,183.23851122378505,3.239379471185906
17,297.3384495575763,428.2665411205504,331.66440891143736,337.3786006821596,,152.29879715318924,163.7935918519268,198.43252391547782,,0.5844039412331199,195.82891900236592,
18,465.7102413733304,472.1781997374792,457.9824047286671,506.2440883761818,236.1687534259462,191.5738866968739,164.74804471502333,,399.36368160634885,-2.1679120586377536,308.147980587583,8.193393577783723
19,250.40352240123477,482.52650635174604,300.9388226712018,321.9354203546028,149.36199283047924,140.14301118710026,,,302.5650288743075,-23.341912590934967,193.51227729156452,-5.109028783545412
20,225.80916149188334,471.1898725051267,268.5413313115603,,132.8450775501953,122.59481550059678,149.5033587335011,123.95983653483941,244.249728340452,,178.37638139598306,-12.852109339523224
21,225.6124568166897,430.10771447384957,265.81012870178836,270.407689193587,126.83142011547359,103.58210059799168,126.85194635310965,89.40201739805157,210.30058061273255,-53.95532619358271,173.0994080907351,-9.355869325287884
22,162.80261799104255,389.47500473238216,202.43455558967486,,86.6006391184157,61.34834112876805,97.17986374245346,33.17384847213427,90.45021298406557,-81.66265007966079,62.44601757166957,
23,139.4667759854316,358.04714760162653,180.95405616836277,140.84343766479742,,35.92270010806437,65.16580308472683,-0.08583659490479789,31.25439851386527,-96.79193901485021,,-14.056016201175382
24,,393.4851814366358,198.84964530287294,190.12961018927265,67.03536052454243,,94.86265447563581,-0.9630516003743423,90.04771739076827,-77.39470419912175,145.1038190063706,7.099633305599935
25,144.63613875307706,370.68886656493646,171.05245871580846,148.21861970203636,51.57627963666792,21.44254561022032,69.68619806023389,-15.122879587808328,87.1500962880283,-66.64145006471051,196.9710961578623,39.980577235020796
26,146.9746187046207,373.0090949628321,192.14279977074358,154.5645506630272,42.82657124947198,25.715268946743393,65.46600007539678,-4.583333683781973,104.68939934178191,,206.73826943981575,79.37141111686914
27,177.6427030076839,417.8901057744501,213.03346560633312,197.08791053898153,54.93534159061152,38.90548627400287,87.72212970730871,14.785417938360865,203.99983553965808,3.7713218212170148,,117.81856779552692
28,184.04599761782114,436.77628949499683,177.05340008297966,155.20086427497034,25.47367600198011,,,31.735622126847062,229.75971837874548,28.14753166727553,265.62610568888584,148.98675367590488
29,253.52553800779958,407.6244380860303,274.2087691466604,217.4710595827851,38.63634256995729,2.410631779727524,78.45674183491622,120.24512974632302,281.4525579868648,65.039922090278,334.75122862248406,
30,338.5517949131872,457.53409421110484,392.5609497850607,388.7919358654618,94.44522098570393,31.216667881782314,102.1254188010646,274.3656915848173,412.48032321197707,115.73440957348835,512.384294686282,204.57881443826506
31,198.90228373463884,453.6532979817648,202.96933521887738,148.10767647105018,27.697703261821893,3.7202906388561843,93.06936404299063,149.1633026029337,334.5922031161586,122.71163571275935,,206.23130715493096
32,180.06980231728102,465.474800429703,196.4274047978714,108.83206366448316,17.484008787162495,-5.331468053860135,112.98730583358682,142.47411473295034,323.8079293251147,121.93993934894277,379.7632690535629,194.90025526395942
33,161.8055178199014,389.45431380733885,166.665616941418,145.78322500974141,17.223826307162632,-3.7297270335227495,116.11248552018034,139.53723660212967,322.59599735575637,127.34764541984913,354.8790702616999,171.8841852179261
34,99.04106313823357,331.0905422210189,82.13429248039212,,-22.952909476563704,-39.8372008589518,89.8787132517622,100.71318017900143,219.48547507762603,113.85250236446008,260.2813871891552,130.57731001000872
35,94.59120886882394,300.9231147745463,95.39691540939094,32.133461042671996,-14.546892343941508,-32.89152468285715,104.73427285052331,122.27716632763568,226.8481626392898,111.55127832491553,,106.26740078818844
36,89.46904996036085,340.58369300231544,88.57443858835406,,-15.08039491651411,-17.09967839366601,132.48439649205466,132.59666340470758,266.9921522622426,116.4546028534119,345.54116570122505,76.49843559223382
37,78.7761874004413,305.6489706843157,57.74386076779937,2.37880589133556,-23.924914783836357,,121.94738685694885,143.41018749327014,,,358.9002421045252,44.29209051187635
38,87.7982752569694,347.2320250567426,86.14167928134773,52.96354412914644,,48.17775070751357,168.21185609027472,188.42803621340437,348.21272741486223,128.96548331498562,352.2251275667646,28.925069187085917
39,,358.27961605455937,101.20873923350788,,-2.2026134676020206,26.14050147725409,172.92642864925762,202.28149772042076,379.40636600939825,123.70296011548663,297.4406185453771,16.02492797351267
40,102.25240184051947,412.59385615640014,85.6210783160023,54.72906960373308,-0.6576275091539117,35.17318329329908,188.86845114653366,252.60993032658718,409.96438840170606,127.86437920537657,308.5014558877492,14.110870312943561
41,,,156.07483945495125,126.44854253324439,33.01366924528242,55.07318987863034,208.2614790589131,327.25797900640066,413.5303060549219,119.01425719152637,313.0385241145576,0.44387221343299643
42,262.1510168954733,441.97605359597435,298.0524197758136,309.58888797406325,,126.5422880205808,247.57234111533467,468.17381461806923,492.8620749714096,132.4970529223614,409.43199855011545,-1.6273728912755985
43,122.39255586970621,411.42796955447216,138.682285042299,90.95946322127693,,94.32047533888087,251.51278452735085,334.9346200792453,,102.22517309370332,321.5298195507169,0.4112710612371586
44,103.98607847715762,,118.56089236625172,78.81410564347104,52.89899600386705,107.81505136021094,279.35519854796553,311.5240593781127,399.0738070965356,,258.2547688386521,-9.604071735813761
45,87.88420083231307,311.9827551049291,120.31149648347217,103.51048325211252,69.21869209153824,112.06290381951389,271.0526252149733,288.78453846365375,366.8124744760913,43.1504369326666,260.06047619150803,-12.481381543059399
46,,242.28334463868515,,17.081967143523883,40.99961210668816,96.29237747050709,225.54287694346525,,210.4537077053119,-38.09329020678881,119.6677135815307,-35.94100199887923
47,-3.920275964665123,224.82552282348942,20.920563782153522,28.738041103151588,58.08726813819797,112.3542736410262,222.04095898337664,193.79633268352313,164.0701172051015,-63.242003607510824,85.76113536567962,-28.8388782286788
48,3.2262218419940254,251.47893988054017,43.5245781530427,82.46583063655368,82.27605739836471,152.3532365890202,245.01553535339644,182.4316336381263,196.24537619542448,-67.85547092184234,206.18603821858463,-3.493710509846693
49,2.124373804649437,221.06395889196614,,68.52426428263004,,156.89665974224914,241.13397594671483,169.39634810641724,152.12681808757554,-73.9022374238744,253.07373203938386,21.61470641647526
50,21.150219915249366,223.85606776942586,58.83056506574587,99.70629752159178,103.85142521794893,171.9604493374434,239.91742066005645,157.42014252752364,160.95227493796276,-72.41920819058053,249.54946479002967,52.051622620459014
51,40.77944762343958,,86.7682180368102,,129.91252219316252,171.63088990924427,234.41224564065453,162.03865001512992,204.1492208816939,-64.29233498745128,251.5964090017958,79.41573965169167
52,48.22966436572857,327.8127138786595,81.56164504281172,145.24408896134523,135.15126523798426,173.29315887061156,,165.0719729080512,,,321.6645771857913,123.03078173350481
53,120.61690689892592,316.40086160399284,201.45930740784368,251.50838450790243,,187.9703498658829,229.52967133198757,228.35744591580436,277.66756811938205,-21.144367882725575,398.5758699247986,143.6148915081157
54,208.6038434374749,397.1726054278074,318.2844046117546,468.3004480017964,238.3224949342239,216.53840059845356,,340.6926173884726,376.4089139560837,35.73653247717317,560.3627436885515,190.75814326261448
55,56.10852328545971,319.388731305277,,232.7785906068631,182.86969283087126,179.63099055103515,212.36379541602582,196.59164472754242,283.94543867959044,29.655042014651002,461.4234455952509,201.33630668670585
56,39.17941596818724,341.7560761337597,170.092497525716,250.66853478872656,,174.05528918150287,210.16777427220038,159.15751346050024,286.602685462135,50.995230051506184,453.72703428427917,205.59902559470834
57,12.282230085696611,264.2427924305057,137.98653323105276,252.6611661046444,184.9843412672884,168.96832728141095,163.789740654862,114.70388591830701,245.86419654152985,45.45707662526305,430.5679815039892,171.6368753903642
58,-51.65103378004602,230.4241397309065,,169.13425616449598,144.07257509612322,126.60231761869018,111.06247363362122,46.631419555408186,121.72271377668756,38.86852482710118,342.0654518990314,155.08088085623658
59,,209.30488882104117,91.36764999993558,181.38682281294587,144.99926658571047,,98.94615938652106,37.0942972726742,110.07774385532502,,,124.40597977703126
60,,261.25551263107224,120.14419216502733,225.70522836159455,150.74283330139426,134.67997612677306,108.35528895996381,53.38677590524128,179.6045871451727,85.25573970276476,458.32061348470376,115.03135976571356
61,-83.39126700276157,270.9285566449587,109.23344842333364,196.11501939956105,144.98467533455698,131.8370568437007,89.5464971638898,45.51502192765558,204.53308712836065,97.4837107129506,493.9077986234868,90.48806193190055
62,-71.55543633339929,272.5846349825465,,232.49278076127797,,128.93905766242975,89.7806396089191,61.98029139968253,,125.22965524503343,458.3755298303417,73.5162445328661
63,-45.26341677965712,327.69662816912614,171.52013127228355,,151.43354392864615,103.23511675853766,83.87066381043131,76.5149047676897,299.03825612646375,157.18714211146926,431.61599139186296,60.467701659489
64,-30.24793284860661,360.53899268340615,179.52226387757722,270.47713803737224,134.48574982341407,86.61790270214428,99.87967845022585,116.1804611216806,350.2877514932055,178.5381613085191,429.2956278324089,54.49557570572105
65,60.798881390774426,,271.48007590588054,376.26558604342824,166.13976467410794,78.54816088891153,96.12659289815589,208.68262867653092,430.47865174226945,204.49755251620644,506.66425079380133,30.672858434408653
66,161.28255499146846,489.31848490419185,397.26039621146117,557.9128761265697,226.25822715668812,109.63153877736298,112.84348205185651,343.42989086396193,534.2472493473085,239.8898356261254,627.1515572945323,27.446554102585637
67,17.413203400153606,431.22517719455675,261.38720274965294,343.2064375595364,136.63642055943123,57.65859658096001,112.17581755897557,257.56707932914605,482.9102925043161,215.79835849197167,477.4015674644734,9.216715152184577
68,31.20841653455159,,291.21613031821806,378.6036626837638,137.71950053683344,,132.52159020126146,,563.336925874782,213.3459481038922,469.77195560554054,
69,-22.068169584909924,397.17201788578467,240.06550854215874,304.8637707975173,81.27392668279106,-0.6679762781022589,91.07527261697777,208.17538447051663,406.66778789417526,173.03405062903983,363.9570788085363,
70,-72.53505416175429,370.87678573709894,188.18095296702302,248.5015479826154,,-20.44089554850177,65.70810997131532,163.47573332151947,304.22869669604984,123.16129824507749,226.82318024525665,-51.93223987709534
71,,353.3883648152892,193.44413057420883,244.76999630963616,23.005310521724546,-25.20320752936415,73.31204713551242,169.2955571972228,283.77735731410615,96.61055667502713,192.44878410338316,-61.59461685873174
72,-92.35689638405748,394.2777610953915,225.1619483038391,273.97203970858135,22.985090596362497,-3.6961080945401363,,186.01880184840218,359.2606877550411,89.23583467911112,303.64136437711363,
73,-89.22032735164044,407.36791197237454,204.03360776948176,240.5015214419725,-1.4377790821071201,-10.512626812274846,104.32735287757889,202.3002314197903,328.7365435106419,55.28682342887023,307.7362895495219,
74,-80.30824148787326,,224.66449732245363,250.04886302449057,3.864705925743621,-1.1789304630115396,129.66395960324002,,356.24952523240944,44.87400457754824,249.23592549184139,-6.566481003467317
75,-54.90233259843677,498.5192570664144,,271.40967356114015,3.62538195563441,-2.360258287755414,161.15656025131034,259.47301920404044,399.4479136246562,52.91891397171732,237.28913196143964,34.446371480383775
76,-37.75745045615432,581.2979234672455,278.0268618742776,270.1637668324553,-12.659251486643399,-7.778658922129225,180.2365870908391,292.7172444216353,431.0725830665922,48.556483893941724,303.13189890423166,78.728589576275
77,54.30723841989186,556.6211711972882,356.11972845705213,340.45258912472485,17.815001609139458,11.183028594304702,186.06401030265957,379.78370405992837,449.95503720471055,55.350028639155155,384.57398159700483,109.4022242110454
78,173.5632861529246,630.1842987526522,459.68461038295857,479.30892401669337,69.73807347173253,65.57016132567679,,488.35792659793555,,88.20927527003671,522.0697819180757,146.72277863281224
79,27.468637047592466,603.5899568127384,348.0316353328024,287.37045759259814,0.28611710877876817,48.926520051312735,241.88992611255293,365.1302230850633,,68.92039473585601,446.0182137030126,160.56535443294007
80,10.69251790189594,,340.25364517128435,252.80576008637152,-7.148672253332236,69.76450104356879,258.3834885369376,,389.73439125619586,74.0762772018507,,
81,12.468423434946743,537.8815105399826,323.97810430104107,264.9371112698018,-6.556414572569523,,249.42583753511752,282.8862050648454,280.0633342941316,,432.1569948258721,160.47386483989945
82,,493.0053490193818,218.10903282116294,155.06531313778055,-57.857980257276466,57.73197833726465,239.13775429832316,191.32596404651704,140.84762775784537,7.813584088317519,,127.62877319264045
83,-56.45408672790876,448.95726570159366,202.92870118442423,135.46436878019696,-66.0359688061256,55.26680371887384,226.91406132322493,,94.3931767801587,7.56308464881036,321.5433813565723,109.55778795502643
84,,511.6561102451385,233.03726897670165,181.4696995283003,-44.83416750338,126.1984813304295,250.24826251567958,142.5470467079012,160.6815478446955,33.70520769647068,500.85326855433067,102.56541864271635
85,-47.15857078857546,502.56432387802454,205.2699348195347,135.26836782353175,-48.54744703914662,129.16967311969574,245.9575117542364,127.68795644228732,142.69163883579364,43.275002338496435,504.6965957585388,78.28448998803161
86,-17.006046716538776,514.603311678906,,163.90176448342643,-28.06086095184473,157.08766413660587,276.6285858072224,121.61664086797548,176.46365548392697,80.15736626622531,,72.59043161297313
87,11.02281670485516,558.2670744438167,225.43283167392303,177.8323068361647,-11.021543585456115,164.27597671334487,276.4353508509626,,238.10710314268383,118.69104574301736,477.71255563967566,66.17695181526872
88,33.268593290266494,630.4525126970733,,173.1538987969542,-6.212160964121296,148.41377320134953,289.7998528248751,123.65759279910272,273.08614394478866,165.65118029402612,525.7342383117553,70.403892867653
89,130.88020894105412,654.710120653681,,241.02052588837458,40.473724289383426,176.52322092512065,271.59666873712314,199.4069950530113,348.07149994676973,205.96632406410694,614.1908913376715,62.495211990603295
90,250.42485437744426,715.5145982437406,421.4813291659026,423.19243962494534,128.9386131897532,237.81614851088057,289.3335918751541,282.8817734741126,459.43842003631283,259.91851276192347,719.9470576630292,45.22627204510957
91,112.94266215533284,649.5387560341304,279.6328266573239,196.22544248278695,57.737581519416636,203.48942326917975,283.5092810562507,157.31113901083964,403.1523696569012,263.26132239168317,594.1492496106712,26.604443386828436
92,92.18408660538739,675.3620826883458,,177.2333972411624,68.83125769969068,,,124.4274721679651,403.57208393497837,265.0195052484836,539.383563174275,9.51390951529288
93,,607.2563687125105,241.70120673395454,187.92450089624728,73.23959701136667,,218.11591119028702,82.77575775095235,,249.11438499405145,490.74544019874554,-14.115618428654244
94,35.09043881201346,535.5648159710345,132.3720205676046,96.97917325627738,41.63768816169751,167.54711358348945,173.21862823439992,14.326472309606686,221.08820616734496,,327.6479415832477,-63.25401137592188
95,40.120069586868624,,138.91718989652992,,43.7414959748841,175.9414338821632,143.90503210842868,7.787426159777112,202.9129352528335,,310.4960737016622,-74.31181722921008
96,54.65039421758523,,148.83391199042117,,66.09948501799383,,159.66724243262325,,282.97325190260057,,407.92690861079865,-67.37292838391392
97,48.90485330286329,530.1693326095208,112.23976998603989,,72.63681724054659,,115.89177411547672,20.777696583033986,269.09107644086777,173.8923315882547,394.0460823324714,-47.40816981929186
98,88.01876231592871,535.8216726200199,131.1228906993824,146.12289111665675,93.20332531508896,214.29076341932407,124.55433417240903,45.09754966815989,347.5488558184219,185.5845825516624,377.2024700339626,-12.10549791245873
99,110.76461201572462,578.4677744112118,132.7368227330573,152.53943315592937,110.72982650651966,197.05987529973314,110.50708354055823,68.46114015468824,380.8742438051952,180.44195586456362,328.27278834627907,24.62078936045507
100,130.76820413433867,604.3850115753203,,171.3369391125921,131.102199011868,177.15375647344473,123.36374315295578,126.08235207589486,457.6327980324498,191.3740788249341,364.89725890824076,79.63147589968389
101,224.84718084994287,617.4574438267839,201.34706063768724,,187.80156756946684,180.95851133380387,108.68546477183409,212.50713373242837,,173.73382512925474,424.0824276113444,113.88232605430957
102,339.0497811619692,703.8584961340576,318.3441386103,490.1747496478347,282.787749872064,213.19086832657408,130.60529603676594,358.38567426036667,647.8609038467291,188.2006317006596,572.7274724125084,161.1150096580065
103,189.25452313212475,620.0001839814402,163.48590714196388,254.67566507777292,196.9577531944496,167.3889652706843,127.17917039573273,236.2706870872554,520.4479755207942,149.81916312142437,479.9199746684373,171.54726360489258
104,216.36352845689697,644.6931261769353,,283.3989830144124,210.1516723734479,156.20151633033598,143.14342821663618,249.02699287954331,551.5508587796952,,491.495844476385,187.93056099470607
105,172.91814290810106,555.33420869933,127.87050997277873,245.528145599672,180.84725166564237,111.33621761051612,109.25013863498724,186.713764502497,402.9163043504809,60.984724179550916,416.18256795231946,157.4899968405436
106,118.744328014503,464.37744634791994,21.045430414354882,186.75842307299584,144.83474706954223,59.23873101120479,76.14445206369737,146.19705954205423,235.13987657201977,4.721116502519614,335.4839609546828,130.4676369112633
107,117.34434638582786,441.11289960203953,32.717859498596255,205.1380382307245,,59.472689736603925,97.9266450044191,147.21355487177388,245.371483515816,-13.81818502657957,358.8136870850168,121.29670867455525
108,140.10095753916517,496.83648297815637,56.714491625285916,254.25647559973095,,83.27346224631941,130.3300805205995,166.8217427664206,244.01238211384796,-13.05236606413041,504.5113970361652,118.96502645271107
109,128.45621549610064,441.45483911096875,25.062550789922682,,145.7520367802772,75.4382301672851,114.25795375035051,174.3600929845716,196.80012955780802,-14.278058914264193,527.7832821815738,
110,152.05116767408202,449.78652908906,47.090835263287744,,139.0659852192204,65.37851785463329,150.06203807367302,194.60423507586327,218.31668848076052,6.049329523328346,543.7523030435369,106.69533526337835
111,179.25099514896436,465.1343421645922,67.93345640381594,298.1074373045494,154.8239288251321,54.02352454400358,178.56200869734064,227.58782208566004,262.95536143659115,42.95472675492295,520.6141311913881,113.19015833329746
112,206.3849473580332,513.9964558128,,315.91781387634734,136.02254149448171,25.755261856217373,211.7642402277362,,282.42921560387106,75.47564117820758,,112.42127755045867
113,274.9951546985061,,120.59067831320783,,172.64344936541477,37.30450781942643,212.92923896347318,316.7686360994534,304.68088450338394,96.13755286425447,666.1626069150196,98.19047138114571
114,384.9151070314955,572.3414080256281,,580.047350248934,250.6571611941414,86.60871567208663,264.96976280031595,,427.833643602181,160.19386187163553,823.139646496576,96.79595735690776
115,259.45204429656576,513.4161375695749,,,147.8603017001807,42.39281301015278,274.34485447422713,328.46744037122363,318.4411259206576,167.16948267757422,709.9920156850153,66.95597140610926
116,239.90697430578362,517.2804139852909,132.1898718273705,379.89097727989224,123.20506164271144,39.34365606751875,293.05764185163076,280.3305755618509,288.1416838334973,178.46843092375548,674.6316268526189,26.88563580508054
117,214.25010416466574,440.80175595404444,100.70107913907839,,91.8761338462129,16.13831305698716,271.411934556475,,222.5953309212182,178.73908795326204,634.2287868681672,-9.843437131307951
118,147.85334659301634,365.86419604734493,8.894004657595758,283.31513677514704,29.65404422145903,6.971685987286179,243.68106895491172,127.91596338985974,71.87925254517754,137.05604900831867,475.6706019778999,-55.86764479303626
119,,337.87154710546105,22.543859924884515,286.1968477048378,10.14007676811822,15.86711889711583,240.59172465804173,101.05514665841272,24.47355716228992,145.75207670466895,424.71899681884,-76.58043669723227
120,135.38068816054638,345.2440784455153,60.88746541298258,311.1997614200036,0.508008909339452,71.24185075433792,282.13262388539306,90.22846364080661,81.7028946215386,155.61311981334057,529.6477657661802,-77.58247912137212
121,124.31095334749172,349.68960257227275,40.05937566828962,297.3644231067133,-11.792499068001803,81.23594969074725,272.31229282115555,72.29696017506063,70.44634741645837,168.38519239613035,520.1110747998816,
122,143.79844611737553,364.9787566070719,77.76118046592137,305.6260691040102,-22.546378292535934,108.28647394476998,,,139.94827207774614,187.75547056796023,456.4458852629072,-41.04245159643297
123,176.07073215057915,,104.16273528612747,333.7183828660599,-7.360920256103668,92.04881907111783,,92.57111670449201,208.4422152516856,224.42890124382978,418.90610360720643,-3.9353421176689523
124,189.22257468757988,,149.34830351224835,338.85807559433124,-23.18159514334498,95.61555248331926,332.7660530004864,111.30809086210735,281.73790044394445,234.31276887278096,451.2888011407616,45.37014318135237
125,256.4227686246002,416.96023681273704,208.65391264488528,430.364436742182,19.12027609823781,118.33558122298503,310.4807632278242,172.7474843547144,358.8182722613646,239.50146874680112,497.6328270151405,
126,402.9182561859288,493.41592452267287,344.0792194173592,,92.83057926228724,180.31296349544252,337.4010370281948,293.967651430065,491.2593209891394,265.9034170748911,631.6372225765886,129.5049957028051
127,238.71853794205953,452.20305502361487,251.22640691197887,,-2.1903647850540153,189.56685307285127,336.6844093831895,168.54190423998506,450.4989801111377,243.84083344593554,,166.9299080905383
128,,500.0343737635326,263.2842393108515,367.75122748129115,-2.2608939159304953,195.33898901371026,,131.8974799009186,448.3786806514318,203.93750575202688,,192.35263098985834
129,173.6026759149479,388.87824709046345,224.30083730074742,,,192.46814968953345,263.9109978055944,79.00422671150758,401.1797925796765,156.2763618822195,456.828387685105,181.96498172069124
130,111.9321049442778,315.715483262377,147.3013427956925,247.77297669990014,-59.265297482291004,170.47602163590656,212.82764012417232,35.89190855995727,257.2571664378398,,327.78474866562374,154.78479509785942
131,91.10486195674906,306.84408831852835,156.11847629425603,217.9881570711172,-51.75403107476842,,183.81354589019344,32.46829127370716,250.38680915392843,52.743962773684274,319.4116233351782,154.85609967485917
132,96.84751708257801,344.97862158416615,191.86380956384565,239.86322170333924,-54.18294347864685,247.49149999126428,181.36358355155596,41.7808220626176,,40.30498731891328,494.4865731013461,157.90135467402493
133,,356.0902564116026,184.347977571061,193.68725211872015,-50.43480553488686,259.8208297800775,153.2356516593724,52.51558260243048,311.98427074858,,511.86273787855964,155.39522385594685
134,101.671116701806,365.91896140163107,217.92149308165975,236.48303958396235,-30.955156736657344,269.42462609418965,160.61765771604104,77.14787167130734,342.43948406358317,8.515327040108872,484.58046718148944,156.11076385383285
135,121.97249425472617,395.56212656211625,228.21316885838388,236.67095868138605,-11.458254083532083,245.04871302256228,,118.7847807143352,,,489.4647075585867,147.88122869900104
136,121.83137225090792,478.9986568416986,246.24217666918543,235.13353853017955,-12.709383348606835,,,,449.54602374672976,29.073719490691445,562.501284048653,159.47268962134808
137,,428.3025922548959,307.76453726847024,316.05424197462423,52.175123751934336,249.8619489842725,139.5998847063649,259.14172440463454,468.96930590091455,37.117304761508294,,144.5081945766862
138,327.73954606241756,536.5295562233187,453.9084969649398,482.8382434091801,,301.4624498650025,,422.1908771805479,591.4866947375813,79.86260818083575,825.6929238451794,135.76408635345499
139,159.35319205932,506.6847216461493,337.3067851541464,278.46198525740994,81.15954120477889,,,331.3779492333391,486.2388112359546,86.1059175091339,750.4063932076712,120.51149420064465
140,145.25550286661613,516.4473336762787,336.2741529596762,252.15199962456094,,265.5866336703525,187.02425239551857,327.41437071432716,498.2362004322176,88.6085919098655,760.3577546060992,88.89709103946942
141,82.71300684182648,429.45516968742584,273.4867645737766,178.4812153998133,70.09361249421207,207.42251357357196,,257.2709430741493,315.2948445260673,47.33643696071505,,34.58665278911217
142,38.19719722956492,376.27785551439956,,108.19994703894459,,169.77162985968104,92.88527170712764,210.56653446033226,180.6914886440016,14.832041822352295,527.0018037528178,-11.083742095358161
143,10.350422471564144,341.78425867902774,194.35842363654808,69.44959965925673,39.390094074735075,150.411274292431,65.62530431608965,202.78942469108742,,15.974243216579445,478.9468948220405,-51.377844161463386
144,0.48779770456539495,370.03770597590676,211.2897290965965,,60.849187016724386,183.2057926653991,104.4395195366694,211.65184494425523,173.5552199313575,50.05134375846466,638.3795080629525,-54.71396076458686
145,-16.785354702958628,,203.182348028734,63.41644415482277,61.08422569672095,165.87380732093243,84.08285781496348,211.0693216258378,127.29670948314421,75.87707309717521,614.4902796210419,-57.746494514620835
146,8.461871028353869,418.0895068983458,,71.2817714415103,96.35294366839454,149.15496343444204,126.82026122049965,,152.90636011458872,107.26861084367673,536.0522127303456,-39.436825404853195
147,29.80023344853447,490.9293636884285,252.30166661979675,119.38073341869762,135.04375427264125,140.9580457599755,168.99293383991295,250.82577824347345,,157.7966211535268,,-17.094755456553983
148,31.822596493980143,534.086660539441,248.17192430891114,101.17109137298247,142.27903337571252,107.72100575086607,178.05887505687224,272.2033892017145,,189.570419206716,511.80379273544713,35.85173818363917
149,95.24217057252011,525.0110047947285,308.8763034288177,207.80510401792705,200.6383027650494,93.45910147008252,167.8391868229007,349.5863756440716,279.266063744438,228.84494166304097,561.8960714912993,60.88567142745102
150,201.78584261343036,617.0385808102199,420.535568521029,399.2561160229123,,148.4466114238359,210.38384071754615,464.73580242470155,402.6138478734034,286.4292330936836,707.5530487990254,114.89143901745999
151,67.5151463933762,588.4442775069433,287.3993788508073,192.32393250120953,213.24823854568243,91.47973758207857,230.46265860501921,332.6745092117807,,280.4791289437456,549.3900827961357,136.02860734055744
152,43.225881190221756,615.2505960035492,,166.68277619835192,214.76292727285403,84.46037326781128,260.1615407826965,265.87718691509684,338.864940265943,277.0647728294761,525.2998503201261,157.2880341736188
153,20.478918741475667,533.4266887723467,268.0572475120205,182.125688871964,207.18366916503956,68.37413590494549,,214.32823725501314,279.96817238795194,250.196600386216,468.73258039314794,159.59571753370523
154,-39.07367782142816,483.2148622375343,161.53468284229803,94.45882319016246,162.50458153016854,33.64246555183111,224.78921531456314,137.39001416571574,154.90682795853184,,339.90114429376496,143.72879056301713
155,-70.18168625869751,441.3667456604586,133.16420383897858,55.432961853291914,,17.058066180834487,199.3342795448896,,,160.8448126593467,307.4189559355427,143.97701515506594
156,-58.05224633178068,548.5149051341668,154.1685768438051,112.77851080834014,151.5339270510824,68.7234348923962,229.972680738902,,218.56478304932293,166.66673915546093,496.514037650261,150.40798923553433
157,-69.21349021774239,513.1952570834022,132.26164798283764,,133.4154963269262,55.287414848602054,218.53389240295533,65.24074337481315,215.25316889089322,144.1256895240143,,
158,-51.333511372337114,557.5453801044067,148.45268279844967,131.78496963889572,133.36879263690878,69.95512426966978,263.4393286059226,60.5737370730863,,141.9546072750852,428.4969661638907,159.19639800364223
159,-19.184340027559983,630.8280522404352,166.93203275709706,188.91998025731505,,50.08952591373985,299.0061921845742,63.65726941614419,360.2584549652436,,423.31625013539394,170.97574745437177
160,-16.034666788981383,646.915047195977,150.5520329097861,189.0004606454392,129.83809067326092,27.02309254222901,301.31605921027204,80.98680720355483,416.3489113168532,141.87694220955777,484.22604740359617,173.8140959328774
161,,656.0474003979617,213.99555187878917,324.314127603453,,65.41891973982291,287.0691889168907,157.45234654929305,486.7171679168127,139.0232711816065,,158.1746736861506
162,,730.4772714344095,358.0186340734516,,,122.6881738419509,323.66221076641096,303.1687889364138,621.8904653320246,155.29487666648708,763.0924421815209,
163,38.075317270479076,682.7949977152563,192.66501665477887,305.27120958488496,127.23288032362676,89.05893585411367,,158.85640534269209,521.5983112356321,119.21896543784254,647.6094131263258,129.7750529372726
164,24.42430569719187,697.0145656353158,186.38535711270436,306.22865692194017,114.09536045240097,,314.06422584704853,131.1393398923121,535.3874231054618,,668.9226512777157,104.62139313175203
165,,,141.44871194265085,299.20293698749737,94.73418563664572,92.85301431400617,257.9566568767393,95.78369666217654,465.3390660204748,58.711228854741165,643.7579667466852,59.844719431590775
166,-64.45900580276927,569.7280162020363,48.467815976444726,212.9905486177923,35.3062911079832,73.34674606714036,206.90331604548547,,285.2724323019034,6.042798083035969,502.4061353531746,-4.022042261088515
167,-88.16667634952927,523.6327454030863,39.39826625796903,223.32819111596464,15.12480902178676,94.57606531327505,179.02607962479905,,258.49392880246444,-6.5381489074854855,524.2969425249819,-39.54354021122282
168,-77.5091645642384,580.4366544232914,61.74721456062238,,5.312905073125023,154.13739866445147,189.4536013962785,37.763710008055405,,,698.0294718712228,-40.6032110277622
169,-81.08219314191354,589.2325797918318,,237.82486837769684,,151.45908536690195,133.38963488209006,49.65067039901335,,1.7454271239287777,668.2492671449623,-53.24952250591657
170,-66.28037712981197,604.1663600253494,39.09335014745774,288.99871428019424,,178.60813281399726,161.35179447619333,77.39051304932865,308.97366901684677,23.308286938335655,611.633170416235,
171,-20.02829381836341,655.6557917771652,78.49820750978611,326.69896557703066,2.5566623745202577,195.20751909773375,164.76116259513856,123.87891664864043,,62.76230960599985,,-33.00749482763918
172,-20.54080365433765,708.5944846012464,75.38908397529889,333.5684419890168,-7.522237733336667,,152.7182964353252,178.9810467789993,388.9972840579092,,591.2062244483504,3.9663729454665173
173,70.50328290528341,670.4130970208096,127.6769202601019,412.85623452844214,21.4990148460562,220.27866926689353,140.6853391163141,289.20750625784024,405.02689808645823,116.39364151492552,663.7761192317403,24.61561440900698
174,193.96215115164415,767.8040394744382,270.23160672841874,584.6744318829882,95.84252253174753,284.1013514880319,150.3916074573105,456.5557047861678,533.2269772229808,171.70392350488322,779.7635846284219,60.05041260234385
175,45.839656652705344,710.9168169351458,121.21995033139672,396.0106756954011,19.84251627207408,257.92410886985897,155.90106147947907,331.8053753028686,323.1881900636123,188.5255301209413,659.6898245145499,79.45617628795819
176,,728.9385865719739,123.405793665249,377.3949633104489,3.1358637761883017,276.95865419321206,153.50408837516233,306.4147242330662,220.79177388573885,184.84592122808112,598.9957470235815,
177,42.563684859529886,650.0782576413668,108.81069923199749,375.436920951974,-10.038405325704161,262.32792627400744,101.90188854733725,269.7816125444902,165.29832871220464,170.64707215131781,522.7534853890984,103.23909030582588
178,-20.436906084493444,599.7164877152597,6.84413730502331,,-45.38983350161223,217.88306123223293,56.09321418632962,197.74212397089667,,139.48595118374837,346.9992381696571,104.82270401077517
179,-27.905698975802522,539.8017916607814,15.007922423955392,278.5829785560912,-46.87669539696995,212.2415945436051,42.112476491359274,191.78790867634342,36.784813255945835,152.6537706385697,322.4096415801582,109.95204992760797
180,-8.324960632634202,617.4115538331889,46.74245435766642,322.6189846631929,-38.04829572509517,255.44560472458733,76.01129477739718,214.08885211109367,93.46363403122228,167.42203167498695,495.4009691164164,136.33180314918565
181,-11.46523846718489,,30.931833236337326,297.30057253344853,,247.79630769626644,,211.55268398970793,83.458244891829,163.49032300937017,479.5382197933595,140.99109183933928
182,,616.3938361252594,,282.88549628752526,-8.634230060112657,239.77440438873816,76.83531225163745,205.2274999354465,78.3080265811584,170.32306750386607,409.12250646517424,156.4341797685134
183,43.20722009344465,683.5399536203116,,294.60680101380376,12.069610158850793,242.76780912035608,,240.38888530117052,120.63624923295475,187.70247401763305,376.3034290036447,
184,69.21056771765166,658.0001502147215,115.52666370545927,312.2552904679648,21.27588466969287,229.6631444906571,129.5322928661083,246.40270886314033,191.63050401615044,206.26900714205664,456.52324245761884,167.91871081688828
185,,700.5302114927148,182.3850410394743,368.8435172156584,65.59154425903046,226.54662626186322,117.51502943807392,303.09728001294354,261.13670396260886,208.7244115276318,593.302453091257,174.01468057898504
186,281.4868297080209,727.2831073677885,306.2779700591272,480.0887504662886,139.14716689082928,271.8451349465785,162.6555480924237,420.3339838329082,374.7471965232653,234.1387471039363,746.8991586923562,176.83371377115589
187,147.43245106158437,686.7624837584486,218.77806376791537,313.7147824238973,99.9295140023306,232.384726399945,186.45296971832448,281.45631499710146,274.38068737819725,196.17465827221957,603.9226474681072,145.34814662709948
188,115.13214271734128,704.712119510795,222.1307360542234,276.7976734283241,102.14580260114623,216.48808616762472,,229.86576432829582,,158.31304305809493,601.9855505282937,111.28220088103038
189,113.48380914734295,618.0357060953826,194.7298581423762,263.25164406847017,86.06056883729336,,155.75322741239586,,263.4685776373349,116.26486836623317,,71.34662521046965
190,72.30719211698835,531.5080641574549,117.28323506030887,161.57384561765366,73.42528528855775,,145.39155056135232,98.30290161358428,181.18713102538402,43.685883350159706,477.4567253074507,15.428096160491634
191,71.43073059706629,,129.8488895696933,161.09180814160845,84.67280808845535,102.7704518267892,149.750462254498,69.22446505242189,176.98524987241345,15.75877144525537,496.45247343338144,-19.332599272393978
192,75.73553621551063,,143.51211671407634,171.93669324474976,93.92103622252188,132.85792653162363,184.08779514240865,50.42717297547743,234.19582943542943,8.211794291543157,678.1223321023622,-33.481992184455464
193,74.71693558258161,536.8382936503541,133.19240272468892,138.88188539824858,97.89752277122935,103.03978931849778,180.3930977730261,27.857076637167467,232.01454107914813,,668.5081818227643,-46.916220092708826
194,94.7179484548552,528.5879931877043,167.33593047751842,151.98648079624488,115.98030537041419,107.5054558811344,239.22142414051623,37.35401867304103,260.97146858293587,-11.936227074229762,,-51.787807936253074
195,,,217.8085901014224,182.48023904543865,159.93215121974032,99.15290864514365,282.2694970567174,61.033522330183246,331.7998744204043,7.836058270171776,614.7248448636142,-39.94716574048603
196,144.07998888099007,638.6030067193034,221.8756065050274,158.9504014306619,162.3768725494087,79.3027624634326,297.9295058431828,72.12569235043196,358.76401854132376,16.390313683196197,624.9011939922614,-18.32589276421993
197,226.784699667261,631.9260760619696,298.0801487815389,242.37902834076687,207.34296285055248,103.36921007559394,304.43280419457943,153.67263356495513,434.00042144461486,,781.0752305940262,0.7581759621300819
198,352.5873606375453,654.8088785473565,437.271047558539,370.1879030259056,260.2672047636163,133.16969204342791,,275.22733178267447,523.2483452198198,72.0169509593992,856.8862716841029,40.82760569105899
199,198.25586359498914,618.0421325755888,294.62414085345324,232.26620445527828,221.7397327361993,,318.4966037160262,172.203889248805,415.4563571526851,100.21161387007456,724.6754297505875,
200,208.2785555284259,668.4261224611423,335.12036319081204,232.28174232099644,235.2897827893277,128.13285690175996,,165.51196058795483,420.7110111608114,96.67758826121916,,69.42075541055145
201,158.85557383121665,557.4620783606135,247.30324178306785,165.361868946947,185.74419986673212,74.16588840165866,272.2783424537407,98.92217913834597,322.9798442844814,91.22376637969278,600.7785175024142,67.85229348545433
202,129.48927218633358,510.0404662574122,185.31954664777322,107.96113949931684,162.21461200756437,55.16396561416326,260.8778388581564,63.39381270219613,232.48620744510245,63.25978260442335,429.0884347617864,69.7105354182253
203,112.99988106056955,459.6995668995887,178.1974609069834,95.25087105002443,142.22399213644042,60.469754755579324,224.27804646427694,61.905684383701015,181.46920192128064,72.99834144132508,416.5979200389903,77.42157145327738
204,119.24992859692381,,216.149243189814,125.96824066365994,,,222.8649084644274,78.4499038111159,208.6524549733934,111.00462222095355,567.1114377613943,104.15409314221795
205,121.01917863381973,505.47016732599985,190.13147657533696,102.59552429531948,124.64788835468833,111.39388959223052,186.95492453161225,104.92645293464585,176.24011007061065,124.87864258621661,535.7225338185191,
206,135.12775160629695,529.290057342677,214.8468814098119,,,130.83676062531737,181.88944893971419,130.88309025806652,188.31569877744218,143.55901049404585,431.39447293184617,
207,161.9232225213641,585.5825240802948,255.54658701949288,166.13692334312967,129.93254735397068,,211.07180965394704,174.28832362809246,246.49569064366102,,,160.02741728942246
208,166.336423544004,635.2475280386765,,186.7583836775905,123.19037807516837,152.0558985234784,200.40548414614432,218.14216326600956,260.6511073323346,203.28894147098762,450.7238240197819,176.71731627456805
209,259.49475385718466,621.713860193631,323.1661544549112,,140.91102271420954,,175.44766380908345,,292.0833685868904,,576.9652848230996,
210,398.20612993712587,694.7623835288412,446.07274769865955,434.8129787233305,177.60521323053857,247.15104497556092,173.40704349315595,450.8141435907,403.4687875488457,268.289283215176,702.6051495943348,187.963174571891
211,236.17502680701648,675.3511414815946,306.24034214276196,267.62503961506343,103.567718769359,208.6561822263807,165.55027755823187,353.239037135776,300.8637689471881,263.0248018230219,627.1200551119507,156.48811180751233
212,203.66551691897558,658.8862998621668,287.9375970254629,,87.33161282071714,224.40245390842196,151.95706150396285,320.58948657921974,,238.72390339009615,602.7003626435766,138.716883769525
213,216.8689539534285,586.0968653081873,258.1452882593784,,,199.33256028391963,121.1571035797696,275.20809871629694,224.17964973477837,203.76991000941945,616.6510624200093,79.30074349067309
214,159.27178754734493,562.6691107345274,163.54359615640962,188.65509197250253,12.840909730196167,181.76355915761417,83.82566255979265,214.87466329287722,119.9699829062184,125.85289654218228,443.4728850129042,22.801829751269796
215,132.51055763658894,499.7931761228439,,153.76211420745477,-13.230428628224189,172.07647577657252,46.61571090314868,188.6272430015881,63.50036525099796,90.06074648662565,465.68868316621445,-26.761819158055438
216,138.6402721623516,536.2003653416672,162.9773124384937,215.12278122049253,-1.6116780476119175,227.5920040345899,61.4469011731951,196.57433930486428,,81.18943547476519,685.0436500384668,-36.688810162149
217,120.29402653217507,559.3633125129068,126.1229230660785,193.57597527361008,,218.79372591044603,51.33503120398842,184.6900769561672,136.76913383679494,55.3566173116987,657.9627021371592,-58.242981363641846
218,126.95906334804911,591.0292040687536,144.01645234659222,250.94003477556564,-25.14057005797588,236.1626767581144,74.47064879591395,,160.50692550483888,42.15914542035173,,-60.374428350239214
219,156.48546038612676,,187.13852408629967,301.07022908000874,-11.011601702014246,241.10597476697666,,206.69098187204497,271.10091483112336,40.29363456496672,641.6501497366728,-58.32152781609621
220,169.00465602945243,678.5766560960458,179.61732663625435,301.63521745337215,-9.941344906731501,235.7018035193555,111.1607214873195,221.59383161842175,327.83293101861045,47.03434324810968,700.7962062793703,-26.09477231259271
221,269.9865230743759,677.2942358958311,256.06982883309183,400.2820080496053,13.677443330890497,267.6252603225005,124.04168390188559,277.4647474562394,429.3005480081048,67.22034642747812,834.0332047193995,1.7715165582131505
222,398.41986343814796,746.3446912495265,390.6421649768834,545.272516877374,71.70412143966483,301.7891927870413,156.62332741366248,402.0251525048482,542.6916082301107,86.05285382402052,928.8299443096511,18.18920296590838
223,231.19864523562418,720.3030879582548,231.54059205198013,393.4259778454049,16.716128664783383,263.08169501378586,167.48089948189042,277.13219981954387,488.9582124895877,70.86903751640897,855.8240844232978,38.043686783994204
224,188.0453062337002,747.9033424173972,210.1094715934331,383.18168049595033,24.371778715111304,246.1284161102543,185.77955971178062,,504.6888581916684,47.43725989744149,,64.57995773791774
225,171.42077742475638,,178.50096307055054,386.86213381972095,11.764757505115455,204.62042084805432,190.56113501558804,147.0412831420663,443.72113871920396,,757.6182167457355,46.57766708647655
226,,624.7568898186083,80.29850251131239,,-26.96525144613547,167.1626901503684,180.07270112007063,74.18654871748052,346.6913497958282,-30.516962887728155,593.1142219524954,25.653610597835858
227,80.34993567857327,561.5198607819198,43.93430996579039,271.4294609260751,-44.58660629450824,150.9613716205173,149.927055305916,38.132499330312314,,-43.444207282371096,535.9060969645138,33.45787820722922
228,89.23309167208296,619.3021835249735,78.3516371639335,327.57977297616924,-23.54419118042118,173.9971749842396,197.1806564937132,35.7894413958395,360.3884611972513,-26.500465419354626,,61.66090031745341
229,83.4481904240412,,,282.366699665663,-18.2079251235798,,196.15084847470843,34.473330112963524,321.85787776190426,-15.087612374004934,647.4698840094068,74.96308990689029
230,,681.0884855838533,73.72993025356345,320.53437551559546,0.9068849608391787,138.41749139769746,,44.80756306697187,349.03810682188276,27.106420840059123,570.8891421272807,102.38570894798472
231,128.82689669750675,,107.22121400971875,357.8814258613734,45.82664335449167,124.25882585350078,,73.97732768126221,405.5232543989042,65.07036438794324,559.5152074680386,
232,133.50678996402948,,97.97120633091902,355.4146936555148,60.093984392858125,,296.5300490162443,113.93653727623942,432.6413906181781,117.27959222469119,612.8993373615048,182.8803742373
233,228.66911567307386,794.3906354241228,199.59239360442842,439.28062308625374,125.65566584996003,113.1778461761991,,206.2193095931756,525.5253794599726,159.3042694023382,717.3167641418001,192.7234996644282
234,,872.344523268957,310.28924173407904,563.7382496183664,196.18220385843264,,340.83517635426404,345.76386748373375,611.2889436741219,227.99444476591464,826.0997386124474,212.82054362675638
235,175.104754048908,,,396.2994296231538,152.5669349583241,104.98694471739574,343.0692239796379,253.80006550060799,489.6370840912301,236.1229333996475,704.1953320283629,200.16739435702965
236,175.94263268201018,858.1891153425942,207.1818675895649,398.7347475307079,159.34754842444656,95.24890470191542,342.9701169868406,228.81582470227687,474.40459294887535,245.29846685144997,719.9863916441941,201.61279349467952
237,104.34695927401977,806.4057563130343,129.41296068594,348.4997416938926,148.63232389314848,37.01140105001451,315.0321290068329,151.14767260105788,329.33159693411227,203.66627252074508,629.5301704953179,
238,51.77697992560924,740.0871100914271,32.76461795294378,246.06203050749144,126.9053695853805,-15.618805890899054,259.29768024500567,100.53479499327781,183.38789857859555,159.1375007833245,479.89115786502464,66.93863006976153
239,44.826002876138666,698.4126060042809,43.53770625566693,236.81880287567688,130.5013485979574,-26.585713493776808,239.9090714293081,89.85397924594106,122.29601125330828,141.13609572669307,474.6040787960297,27.90926126189658
240,42.90263236531844,758.9734027166019,66.4883997994798,248.66230175550962,151.63801676095542,7.735852206174684,255.37523891970218,,191.21434900789583,,,10.624050524789517
241,29.339698679588096,757.1244328341069,53.085101874705686,208.61662920811125,143.73698610112564,,227.64275109655017,143.20329889004148,,129.56604431345943,630.7829983383556,-19.863026501136495
242,41.3169122064054,758.5478393604371,85.35525196389577,238.2487378828995,163.0370106452741,18.088037786367863,226.887334440405,165.31154701092646,177.59161400691963,140.45063203617667,608.1087318947748,-22.537260941874393
243,54.756399429744576,826.5235364181804,,255.29465805502872,188.1674269002716,21.00308911085716,248.16271933246318,212.51005426712595,251.55740862957782,155.81376332498846,,-21.248380430518722
244,59.106511591270134,824.5559914363018,158.96821684845705,255.0046418964152,212.70249983630083,25.58218322755232,222.41444043775618,264.16426663196296,273.80589198266523,,758.244700568796,3.7352117944926704
245,147.86420184574672,849.0258139730076,271.43168489907265,351.90243257160796,263.44198352704996,77.71785915055071,217.57695411528215,366.19145277304017,341.6618561290439,188.39968628279695,885.8436255675064,
246,264.09909338761145,875.7562759034819,389.6176755352584,474.20614349159246,326.61872445946557,122.75166231615002,224.46933832968358,524.120195863784,438.7934681331468,,1015.2230608529892,
247,,842.3800188405089,258.54024722812346,288.33623553351254,270.66331637192025,100.77051023375944,200.05308318553918,396.18183776131,382.4582298552771,153.57153918100803,,47.58657373596397
248,98.21246091861448,872.3974762791375,,271.3964667561346,269.7662770888255,120.12453563596151,199.26149307665878,371.3804229141068,370.00305714553303,132.61155343971757,920.6035425252899,52.728232974692276
249,82.88156313737832,819.5708078326323,240.68605917577025,257.757394541067,256.0611951518079,105.04226373087958,,313.24399723314957,327.73708390194037,65.55655320171854,,55.76465425015378
250,18.464833290154047,754.7581719416304,136.47724941033766,149.34880538624805,202.25166229469127,82.955806985224,101.91374842290526,251.59595064260532,232.16302426640797,-2.035886397750488,698.0163086344905,25.42237304923833
251,-0.5012540046056984,,150.484858751344,140.0187200538756,,105.1561580497487,99.69106889099214,233.67031438892036,212.19393458982705,-42.79927831788493,675.4679571840143,31.55196669302253
252,-0.2465339809004803,726.5455724227736,172.98840801754085,155.5192850283011,185.28935590782584,155.08846394485803,102.89627412642443,226.9038171862859,301.0790798283164,,811.6015500625308,
253,-13.06840068583628,701.2692805581726,155.6455787187735,103.69446004764343,165.27635371150944,154.23908705475813,68.44277387265893,212.29753307551545,287.8160574001511,-55.49870121576359,759.6981080206264,83.53818531524715
254,3.5170493878822526,707.909609156758,193.93133507471123,142.0653294065988,174.5723850697088,195.09093845138796,91.47024582894727,204.47815322659514,354.1004971046975,-37.620206838964265,715.0268910155435,115.009835208731
255,52.3437783926762,743.6420945580057,266.8259489629464,189.23301258770041,186.13125287198088,218.85342817008785,123.90831647915832,230.12174981638773,455.8525484287541,-7.411208582165699,713.4788899019862,161.79365767391067
256,49.24898841450583,797.8946638310397,287.10637186982564,197.80622238022647,177.61129715589303,218.7299854233213,132.3668991999308,234.83547907837664,492.0607421966711,30.078254475941222,746.3214954457011,209.93397521574894
257,146.62592184663393,801.6445407544139,381.97669027099647,306.6991719629799,197.32794200220135,250.98728059336378,144.47440891527052,297.2735781373156,606.3384474098152,77.41460905844332,851.9906215193898,255.0713522503628
258,258.31496915542306,847.680885506564,494.13798515346787,437.82567903042303,225.58448271930365,291.13743704178773,171.8077967760134,389.0922271244239,701.9686412729575,111.55011041113067,921.8924153674625,265.5547451543009
259,104.65975510591943,801.4300265623181,359.4754394967165,251.21400967952513,143.02761118909848,254.68764756935644,170.07973246180057,273.410204719897,628.0219007654039,125.16652706762966,803.2652830588313,247.25335822626192
260,102.39178199575983,837.6735911543468,385.98947879920763,295.1453343361537,141.10089341500742,275.0397452519934,204.08027646639235,256.16282223898327,628.7207827589283,148.97272800533875,791.0055024462525,233.08900580898447
261,48.10939276502397,724.0126523124021,306.12688034074864,226.77314943018087,98.91388214221428,228.49051624511637,173.08087271462222,158.10935517333138,544.8843395610036,129.01952294398268,703.7655936354104,185.2491120977313
262,2.2898391621129406,674.7638573263065,222.36967159528749,148.9937832196856,48.80696493648682,187.7201562355801,147.9706967911697,76.96143989988317,372.00932451217244,88.40377767715174,516.1043597988997,112.71155008412548
263,-18.364350564207,592.3029794869409,225.75585720469672,139.03628376595233,32.916748195928974,177.995858324255,149.2024859699763,41.046981193778024,309.6769776916011,98.40214877944425,481.50769494334025,62.31312504439335
264,-2.3349717275965816,636.4615913202055,261.10980468808714,161.72594583117825,26.843172164716286,221.594743940654,184.73334451174523,43.51071599446903,366.28128812852617,118.6326705685197,635.0799131887931,52.301165800912145
265,0.27170324299328286,645.1248756198934,247.38226792820694,141.05940192562423,2.721027750382774,197.47194234570728,175.83872050628239,33.02568435317659,318.8655041082335,135.48718891242896,620.7465384455902,13.727127397470746
266,11.231374769154613,653.0854294537426,274.8707982811009,194.81089055509773,-0.598099394823393,195.29177692337447,230.72484558573973,45.37065161615169,325.7943754464442,166.03357389480527,622.0085978806994,9.319028456576056
267,65.32161621308136,701.338141232516,336.39059626312434,262.8311676823619,5.8517867463280595,199.3168678857049,260.91251320488686,100.77789736930444,408.7661059654421,213.95648471578085,670.5429773885444,22.546718285896574
268,76.9559512355084,744.5728654340115,339.08759890397505,277.30053198589616,5.630906790604257,185.5333435402326,286.12081756670375,125.86410468218642,430.0350719033027,249.89813384185433,764.6480870706611,44.82812531217722
269,181.5350090707335,711.4159848850728,451.0670109481933,373.9911691725731,40.25438011164394,198.76367764242354,304.76656368896784,226.9773942644058,509.7311219907351,269.66031338515154,888.9001223643752,49.88505663974843
270,301.9680528630329,764.8238495541746,575.9763066663673,481.45923586352797,92.40934647150421,211.44998623332543,323.702490320841,351.332702546194,567.5614176809777,274.6063425683556,967.3264359006101,52.40459184607026
271,141.94851605084534,706.2834625541216,401.4366893649723,330.1787014098727,22.894062494319613,162.60357719065394,320.39183161565023,273.0824752385042,472.7379108514644,259.90449248935766,892.5185295034155,57.319760375426725
272,126.83922513491898,723.4427128581231,407.1983365999291,365.8791737260165,41.960408668364394,154.20628484055226,344.83711082736914,248.32224712184168,439.66502082340725,226.0154292633386,905.8658479486849,57.11174105529659
273,111.55113242441139,629.973747491731,351.8849032950423,370.9258412182389,26.760960727851597,105.85417055700928,316.7106100185783,225.58522244026145,378.36488376591194,174.7982628294066,886.3926680591599,39.76825530310171
274,39.38351275730332,574.2708010596591,235.5336098891857,264.90279119197123,-9.33501928137602,32.61968907311834,260.6031251161901,163.33337701918117,196.87475258635544,109.35076058535299,694.0486287032234,0.045935275628238514
275,20.024545187634587,506.79773611687824,225.28962587544322,265.77365659982996,-11.709725276565536,12.653103426467254,239.35694271651846,161.5581617776043,149.14022292297037,74.75393743038724,668.5164930176677,7.366259119969385
276,27.674119530329342,557.9892414475883,247.8740188081457,298.3159512560481,0.41573422184526976,40.766669338482245,255.33546106381448,188.42654612797563,204.95046813905466,52.75508873831161,817.9062761630694,30.59637710576454
277,33.17682892018344,579.593084091524,225.45001717297941,263.01353189040134,17.382474333979985,25.575866292761418,234.08021976821084,206.15002634661658,214.53179963498266,31.8528972956241,806.0773019778435,57.53104892666769
278,45.07309347723905,606.4284620504341,252.64522113169173,296.4040451615164,34.84978319155934,30.94681124428908,232.32078523427978,232.66710567090166,253.19306313479098,19.194399178823552,769.9628687316983,95.80971571939457
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/passenger_movements.csv | Date,Arrivals,Departures
1975Q1,196.744,195.304
1975Q2,135.272,161.93
1975Q3,152.362,144.968
1975Q4,193.622,155.884
1976Q1,197.408,210.69
1976Q2,127.584,164.434
1976Q3,154.792,148.92
1976Q4,191.448,159.216
1977Q1,193.4,210.924
1977Q2,134.976,175.568
1977Q3,171.152,166.392
1977Q4,201.816,174.244
1978Q1,207.836,221.732
1978Q2,143.872,188.256
1978Q3,191.948,191.66
1978Q4,234.76,202.8
1979Q1,235.296,249.704
1979Q2,173.068,228.052
1979Q3,236.777,231.252
1979Q4,257.007,219.461
1980Q1,259.087,268.488
1980Q2,198.315,252.729
1980Q3,259.841,245.906
1980Q4,264.851,226.613
1981Q1,247.42,261.388
1981Q2,202.117,252.452
1981Q3,248.423,233.75
1981Q4,260.145,217.206
1982Q1,235.602,247.622
1982Q2,186.802,223.28
1982Q3,233.951,226.676
1982Q4,257.902,205.902
1983Q1,236.808,244.163
1983Q2,179.249,211.279
1983Q3,234.134,222.935
1983Q4,264.684,221.886
1984Q1,244.801,256.211
1984Q2,213.35,251.227
1984Q3,248.26,236.603
1984Q4,280.526,239.011
1985Q1,275.076,290.154
1985Q2,222.224,264.264
1985Q3,256.055,251.8
1985Q4,325.357,285.39
1986Q1,308.29,328.99
1986Q2,251.667,299.056
1986Q3,312.424,299.851
1986Q4,383.735,339.674
1987Q1,373.903,378.791
1987Q2,310.344,356.676
1987Q3,389.75,372.566
1987Q4,451.864,413.215
1988Q1,403.034,413.492
1988Q2,337.009,398.689
1988Q3,453.43,441.387
1988Q4,464.288,427.238
1989Q1,414.91,420.621
1989Q2,332.611,386.565
1989Q3,441.251,429.702
1989Q4,482.287,436.855
1990Q1,444.907,449.567
1990Q2,373.243,423.519
1990Q3,455.969,435.946
1990Q4,485.777,440.002
1991Q1,457.535,458.481
1991Q2,358.725,404.644
1991Q3,472.772,464.154
1991Q4,507.566,462.807
1992Q1,470.822,475.342
1992Q2,377.508,425.279
1992Q3,464.497,452.627
1992Q4,530.483,486.276
1993Q1,526.281,526.507
1993Q2,408.667,459.489
1993Q3,494.146,481.795
1993Q4,567.277,520.848
1994Q1,586.915,579.08
1994Q2,436.852,492.413
1994Q3,548.797,528.647
1994Q4,622.036,572.455
1995Q1,627.561,621.33
1995Q2,485.231,542.951
1995Q3,602.089,574.903
1995Q4,676.667,616.186
1996Q1,700.745,702.066
1996Q2,553.303,621.261
1996Q3,685.45,647.955
1996Q4,750.158,675.242
1997Q1,737.986,744.66
1997Q2,546.913,625.77
1997Q3,665.011,644.474
1997Q4,730.07,670.896
1998Q1,709.105,710.242
1998Q2,565.965,644.431
1998Q3,677.565,664.116
1998Q4,767.059,701.612
1999Q1,745.645,760.167
1999Q2,586.839,668.398
1999Q3,719.67,688.768
1999Q4,824.749,736.678
2000Q1,791.821,827.374
2000Q2,666.408,747.458
2000Q3,747.812,738.197
2000Q4,915.569,823.775
2001Q1,888.718,904.637
2001Q2,710.354,785.293
2001Q3,823.237,789.839
2001Q4,871.405,780.594
2002Q1,931.559,913.432
2002Q2,703.669,784.404
2002Q3,837.034,791.924
2002Q4,972.107,886.45
2003Q1,968.688,954.762
2003Q2,685.999,752.242
2003Q3,865.744,847.918
2003Q4,1060.358,980.034
2004Q1,1090.653,1088.154
2004Q2,867.947,967.087
2004Q3,1049.971,1026.347
2004Q4,1158.307,1076.68
2005Q1,1184.781,1176.285
2005Q2,921.536,1005.351
2005Q3,1060.707,1082.096
2005Q4,1174.648,1082.313
2006Q1,1185.193,1164.938
2006Q2,920.53,1029.805
2006Q3,1065.822,1045.806
2006Q4,1210.04,1110.306
2007Q1,1216.846,1205.54
2007Q2,967.023,1087.703
2007Q3,1112.652,1091.917
2007Q4,1235.268,1138.986
2008Q1,1277.952,1249.191
2008Q2,949.958,1063.107
2008Q3,1085.272,1075.884
2008Q4,1217.582,1118.327
2009Q1,1190.492,1182.427
2009Q2,942.113,1038.67
2009Q3,1098.051,1083.755
2009Q4,1249.783,1132.101
2010Q1,1236.185,1250.707
2010Q2,964.651,1080.987
2010Q3,1142.267,1133.715
2010Q4,1287.21,1173.277
2011Q1,1243.009,1270.192
2011Q2,991.408,1097.473
2011Q3,1211.646,1144.107
2011Q4,1330.1,1282.785
2012Q1,1297.665,1319.13
2012Q2,1014.604,1143.443
2012Q3,1186.977,1152.537
2012Q4,1330.336,1225.784
2013Q1,1333.665,1332.172
2013Q2,1057.12,1166.422
2013Q3,1247.505,1234.975
2013Q4,1398.899,1275.913
2014Q1,1400.489,1403.283
2014Q2,1120.314,1224.833
2014Q3,1282.762,1253.679
2014Q4,1486.046,1355.66
2015Q1,1524.452,1518.789
2015Q2,1204.256,1316.284
2015Q3,1378.786,1348.544
2015Q4,1615.269,1466.08
2016Q1,1684.054,1656.328
2016Q2,1294.381,1424.459
2016Q3,1511.466,1486.529
2016Q4,1778.54,1630.69
2017Q1,1795.367,1781.618
2017Q2,1443.852,1558.173
2017Q3,1596.815,1577.548
2017Q4,1879.008,1747.314
2018Q1,1905.442,1873.545
2018Q2,1455.391,1623.081
2018Q3,1696.885,1646.056
2018Q4,1947.516,1808.172
2019Q1,1933.528,1925.093
2019Q2,1497.057,1646.473
2019Q3,1702.691,1658.586
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/guest_nights_by_region_missing.csv | ,Northland,Auckland,Waikato,Bay of Plenty,"Hawke's Bay, Gisborne","Taranaki, Manawatu, Wanganui",Wellington,"Nelson, Marlborough, Tasman",Canterbury,West Coast,Otago,Southland
0,66.0,257.0,124.0,159.0,49.0,93.0,111.0,52.0,209.0,27.0,175.0,21.0
1,58.0,266.0,111.0,,45.0,89.0,105.0,54.0,210.0,28.0,211.0,23.0
2,79.0,264.0,140.0,174.0,62.0,115.0,113.0,79.0,256.0,45.0,214.0,31.0
3,,280.0,147.0,177.0,61.0,111.0,,79.0,256.0,57.0,194.0,41.0
4,104.0,334.0,,199.0,66.0,112.0,,100.0,324.0,64.0,212.0,54.0
5,185.0,337.0,219.0,257.0,87.0,111.0,,171.0,364.0,,262.0,66.0
6,279.0,386.0,343.0,419.0,145.0,151.0,142.0,,440.0,96.0,360.0,76.0
7,158.0,362.0,205.0,238.0,91.0,122.0,144.0,165.0,,,286.0,78.0
8,,377.0,225.0,251.0,90.0,129.0,145.0,150.0,348.0,81.0,,73.0
9,108.0,302.0,151.0,198.0,73.0,104.0,119.0,105.0,278.0,57.0,213.0,45.0
10,73.0,,,,50.0,87.0,104.0,68.0,192.0,32.0,138.0,29.0
11,55.0,224.0,102.0,119.0,45.0,,92.0,56.0,159.0,23.0,,19.0
12,70.0,259.0,137.0,164.0,50.0,101.0,102.0,63.0,213.0,27.0,177.0,24.0
13,62.0,266.0,113.0,134.0,48.0,100.0,95.0,63.0,202.0,26.0,215.0,22.0
14,70.0,239.0,,149.0,52.0,106.0,,67.0,211.0,,182.0,27.0
15,102.0,292.0,172.0,210.0,,132.0,127.0,,288.0,55.0,199.0,46.0
16,105.0,303.0,152.0,188.0,64.0,,125.0,101.0,300.0,,208.0,54.0
17,218.0,301.0,249.0,258.0,,104.0,,179.0,358.0,71.0,244.0,
18,380.0,340.0,381.0,,155.0,147.0,137.0,303.0,447.0,92.0,360.0,85.0
19,158.0,349.0,,252.0,92.0,115.0,142.0,178.0,367.0,83.0,259.0,77.0
20,,339.0,193.0,205.0,82.0,,147.0,147.0,327.0,73.0,246.0,62.0
21,130.0,291.0,193.0,216.0,83.0,113.0,135.0,123.0,301.0,64.0,246.0,54.0
22,75.0,255.0,129.0,148.0,54.0,85.0,119.0,74.0,189.0,36.0,136.0,34.0
23,,224.0,108.0,118.0,39.0,69.0,,53.0,145.0,23.0,104.0,19.0
24,60.0,267.0,133.0,170.0,51.0,83.0,137.0,66.0,206.0,30.0,192.0,22.0
25,58.0,251.0,115.0,140.0,,81.0,,57.0,,,223.0,
26,66.0,253.0,139.0,160.0,56.0,,123.0,72.0,211.0,42.0,208.0,31.0
27,99.0,,170.0,218.0,77.0,113.0,146.0,95.0,292.0,62.0,206.0,44.0
28,107.0,330.0,154.0,191.0,65.0,97.0,136.0,109.0,303.0,70.0,227.0,57.0
29,187.0,312.0,250.0,270.0,90.0,99.0,134.0,189.0,344.0,86.0,271.0,60.0
30,280.0,367.0,381.0,452.0,149.0,138.0,163.0,328.0,454.0,116.0,432.0,87.0
31,145.0,360.0,,234.0,88.0,111.0,146.0,182.0,,97.0,294.0,82.0
32,129.0,379.0,211.0,208.0,84.0,115.0,157.0,,333.0,85.0,273.0,79.0
33,,309.0,187.0,255.0,81.0,115.0,153.0,129.0,317.0,71.0,244.0,58.0
34,66.0,266.0,117.0,150.0,46.0,76.0,120.0,69.0,195.0,35.0,141.0,32.0
35,,253.0,136.0,156.0,51.0,84.0,127.0,67.0,190.0,34.0,126.0,25.0
36,61.0,302.0,140.0,176.0,54.0,88.0,134.0,62.0,216.0,33.0,225.0,25.0
37,59.0,281.0,,146.0,,98.0,114.0,62.0,219.0,29.0,249.0,
38,81.0,,158.0,195.0,62.0,130.0,,85.0,279.0,50.0,256.0,
39,99.0,335.0,178.0,205.0,70.0,103.0,138.0,94.0,302.0,58.0,216.0,44.0
40,110.0,396.0,170.0,209.0,72.0,96.0,144.0,120.0,338.0,76.0,254.0,60.0
41,176.0,368.0,251.0,282.0,94.0,102.0,144.0,181.0,340.0,84.0,282.0,
42,,459.0,405.0,469.0,157.0,,,316.0,430.0,118.0,,82.0
43,149.0,436.0,241.0,251.0,95.0,114.0,159.0,192.0,383.0,109.0,,86.0
44,138.0,,220.0,230.0,89.0,116.0,180.0,166.0,362.0,98.0,292.0,74.0
45,136.0,355.0,223.0,246.0,92.0,112.0,162.0,145.0,357.0,89.0,302.0,65.0
46,67.0,291.0,124.0,146.0,48.0,81.0,111.0,,216.0,39.0,163.0,35.0
47,56.0,281.0,134.0,145.0,49.0,80.0,110.0,67.0,,,141.0,26.0
48,60.0,317.0,157.0,183.0,58.0,106.0,131.0,77.0,248.0,43.0,257.0,28.0
49,58.0,288.0,,156.0,50.0,108.0,138.0,72.0,227.0,38.0,,28.0
50,74.0,284.0,167.0,174.0,63.0,119.0,137.0,86.0,254.0,49.0,261.0,37.0
51,102.0,348.0,192.0,196.0,77.0,111.0,146.0,,314.0,67.0,240.0,47.0
52,119.0,,188.0,201.0,75.0,106.0,170.0,124.0,361.0,89.0,283.0,
53,190.0,401.0,,296.0,107.0,113.0,163.0,204.0,408.0,101.0,348.0,74.0
54,289.0,483.0,,499.0,170.0,,185.0,342.0,501.0,138.0,,93.0
55,152.0,409.0,235.0,259.0,108.0,118.0,177.0,210.0,412.0,117.0,360.0,89.0
56,144.0,433.0,240.0,259.0,104.0,116.0,187.0,180.0,413.0,115.0,338.0,87.0
57,124.0,354.0,203.0,249.0,95.0,114.0,,143.0,358.0,87.0,296.0,63.0
58,,317.0,126.0,153.0,53.0,84.0,127.0,,231.0,51.0,188.0,42.0
59,59.0,298.0,140.0,162.0,54.0,86.0,130.0,71.0,,40.0,155.0,27.0
60,65.0,340.0,160.0,197.0,65.0,112.0,146.0,82.0,266.0,46.0,291.0,32.0
61,63.0,339.0,142.0,159.0,53.0,116.0,132.0,80.0,278.0,43.0,322.0,
62,76.0,335.0,163.0,184.0,70.0,126.0,152.0,,290.0,57.0,276.0,39.0
63,101.0,380.0,196.0,214.0,80.0,118.0,153.0,113.0,332.0,,257.0,54.0
64,121.0,,195.0,203.0,81.0,109.0,174.0,135.0,367.0,91.0,272.0,69.0
65,210.0,437.0,280.0,306.0,118.0,120.0,174.0,216.0,427.0,,361.0,78.0
66,314.0,521.0,400.0,490.0,,159.0,192.0,329.0,516.0,133.0,501.0,98.0
67,167.0,,251.0,270.0,115.0,,,,,115.0,370.0,95.0
68,179.0,507.0,274.0,301.0,125.0,153.0,206.0,208.0,511.0,121.0,385.0,98.0
69,124.0,402.0,209.0,227.0,87.0,104.0,161.0,,350.0,87.0,302.0,75.0
70,81.0,364.0,149.0,174.0,,91.0,138.0,83.0,,51.0,194.0,45.0
71,,333.0,150.0,,59.0,91.0,134.0,74.0,222.0,41.0,172.0,32.0
72,,370.0,175.0,201.0,68.0,119.0,158.0,77.0,297.0,51.0,,36.0
73,68.0,369.0,154.0,168.0,,124.0,,77.0,277.0,43.0,317.0,34.0
74,80.0,367.0,168.0,187.0,75.0,133.0,147.0,99.0,310.0,58.0,264.0,42.0
75,104.0,437.0,199.0,213.0,82.0,135.0,170.0,112.0,367.0,79.0,251.0,57.0
76,120.0,,210.0,222.0,81.0,115.0,181.0,140.0,408.0,101.0,299.0,80.0
77,204.0,479.0,287.0,,120.0,120.0,168.0,230.0,447.0,113.0,363.0,85.0
78,318.0,,393.0,,,165.0,191.0,340.0,547.0,147.0,486.0,104.0
79,,503.0,279.0,269.0,,136.0,204.0,227.0,482.0,131.0,,
80,,510.0,267.0,245.0,111.0,,200.0,201.0,462.0,131.0,369.0,99.0
81,148.0,411.0,252.0,,,136.0,176.0,169.0,379.0,109.0,322.0,
82,85.0,356.0,158.0,167.0,67.0,101.0,156.0,93.0,248.0,57.0,190.0,
83,64.0,307.0,142.0,,59.0,83.0,142.0,,214.0,42.0,159.0,31.0
84,75.0,370.0,181.0,,75.0,131.0,154.0,81.0,286.0,53.0,320.0,39.0
85,70.0,366.0,157.0,169.0,65.0,126.0,142.0,82.0,269.0,48.0,301.0,36.0
86,90.0,367.0,177.0,205.0,76.0,140.0,165.0,93.0,315.0,63.0,287.0,47.0
87,110.0,406.0,195.0,227.0,89.0,133.0,172.0,114.0,375.0,82.0,260.0,60.0
88,123.0,472.0,214.0,227.0,89.0,113.0,189.0,139.0,413.0,,309.0,83.0
89,214.0,486.0,,309.0,142.0,131.0,182.0,,482.0,130.0,397.0,98.0
90,322.0,556.0,406.0,502.0,223.0,,208.0,331.0,,165.0,514.0,111.0
91,177.0,492.0,273.0,281.0,135.0,144.0,214.0,219.0,513.0,149.0,399.0,112.0
92,159.0,515.0,266.0,,126.0,146.0,212.0,197.0,504.0,,368.0,103.0
93,142.0,,242.0,268.0,110.0,132.0,182.0,156.0,433.0,117.0,341.0,84.0
94,84.0,382.0,148.0,176.0,69.0,98.0,150.0,88.0,290.0,60.0,204.0,43.0
95,77.0,350.0,165.0,,65.0,102.0,145.0,77.0,256.0,46.0,203.0,32.0
96,85.0,387.0,187.0,,73.0,158.0,171.0,84.0,319.0,53.0,321.0,37.0
97,71.0,371.0,,185.0,65.0,136.0,145.0,78.0,293.0,,324.0,37.0
98,90.0,380.0,186.0,216.0,78.0,154.0,167.0,98.0,355.0,77.0,317.0,47.0
99,109.0,434.0,,230.0,86.0,141.0,169.0,107.0,383.0,89.0,,59.0
100,130.0,467.0,219.0,240.0,92.0,133.0,193.0,150.0,459.0,118.0,326.0,87.0
101,218.0,479.0,297.0,319.0,,138.0,194.0,228.0,498.0,130.0,386.0,99.0
102,329.0,569.0,415.0,535.0,228.0,186.0,218.0,358.0,635.0,167.0,518.0,121.0
103,172.0,,269.0,288.0,128.0,,220.0,223.0,522.0,152.0,414.0,117.0
104,187.0,531.0,301.0,312.0,134.0,166.0,236.0,217.0,557.0,156.0,421.0,117.0
105,137.0,449.0,248.0,256.0,103.0,134.0,203.0,145.0,425.0,108.0,,80.0
106,83.0,369.0,,181.0,67.0,,158.0,,274.0,63.0,218.0,42.0
107,73.0,351.0,163.0,183.0,,115.0,166.0,80.0,302.0,55.0,221.0,35.0
108,84.0,415.0,193.0,222.0,73.0,147.0,191.0,82.0,310.0,57.0,337.0,36.0
109,73.0,361.0,157.0,173.0,66.0,147.0,158.0,,282.0,50.0,336.0,38.0
110,87.0,383.0,185.0,197.0,79.0,151.0,,91.0,325.0,66.0,327.0,44.0
111,111.0,412.0,209.0,230.0,102.0,145.0,187.0,116.0,387.0,94.0,283.0,58.0
112,127.0,463.0,212.0,237.0,,129.0,205.0,148.0,,117.0,338.0,
113,195.0,457.0,,,141.0,142.0,195.0,217.0,469.0,126.0,400.0,88.0
114,312.0,526.0,395.0,488.0,225.0,194.0,228.0,,,169.0,550.0,119.0
115,184.0,483.0,274.0,301.0,,150.0,234.0,241.0,524.0,152.0,434.0,
116,170.0,,,274.0,133.0,,242.0,203.0,508.0,144.0,404.0,
117,149.0,430.0,244.0,265.0,113.0,127.0,209.0,157.0,,118.0,372.0,83.0
118,82.0,365.0,148.0,177.0,65.0,,170.0,86.0,289.0,63.0,229.0,44.0
119,72.0,346.0,162.0,,67.0,103.0,158.0,75.0,249.0,51.0,,31.0
120,84.0,356.0,189.0,204.0,,154.0,188.0,81.0,306.0,58.0,332.0,35.0
121,73.0,,158.0,,74.0,150.0,168.0,78.0,281.0,54.0,,
122,,393.0,188.0,208.0,,169.0,190.0,92.0,341.0,69.0,312.0,43.0
123,126.0,,203.0,,102.0,,206.0,121.0,395.0,96.0,294.0,62.0
124,135.0,488.0,240.0,242.0,98.0,123.0,,153.0,445.0,118.0,343.0,85.0
125,,477.0,282.0,340.0,153.0,139.0,208.0,,,132.0,412.0,91.0
126,357.0,557.0,402.0,506.0,231.0,189.0,,364.0,605.0,173.0,555.0,115.0
127,199.0,515.0,295.0,313.0,134.0,172.0,260.0,246.0,544.0,169.0,,118.0
128,179.0,,294.0,304.0,138.0,163.0,264.0,213.0,517.0,147.0,443.0,115.0
129,141.0,,246.0,281.0,117.0,,215.0,167.0,447.0,115.0,,81.0
130,89.0,377.0,162.0,193.0,75.0,102.0,180.0,99.0,293.0,68.0,238.0,48.0
131,,373.0,161.0,178.0,72.0,104.0,171.0,85.0,272.0,55.0,213.0,
132,78.0,413.0,185.0,210.0,77.0,146.0,184.0,86.0,336.0,64.0,373.0,36.0
133,75.0,,171.0,174.0,74.0,150.0,177.0,89.0,321.0,,371.0,37.0
134,92.0,426.0,195.0,,81.0,156.0,202.0,94.0,352.0,74.0,322.0,45.0
135,114.0,451.0,198.0,239.0,103.0,131.0,204.0,117.0,406.0,91.0,297.0,59.0
136,126.0,537.0,210.0,252.0,96.0,133.0,228.0,152.0,472.0,116.0,346.0,84.0
137,210.0,480.0,262.0,349.0,151.0,143.0,219.0,232.0,510.0,132.0,407.0,
138,,588.0,400.0,529.0,216.0,196.0,253.0,,649.0,173.0,,112.0
139,,557.0,281.0,336.0,145.0,163.0,277.0,265.0,562.0,168.0,466.0,122.0
140,190.0,564.0,289.0,319.0,152.0,177.0,295.0,244.0,593.0,163.0,471.0,116.0
141,132.0,472.0,221.0,256.0,104.0,130.0,233.0,157.0,,114.0,352.0,80.0
142,89.0,416.0,165.0,193.0,75.0,107.0,202.0,100.0,315.0,68.0,236.0,50.0
143,,372.0,140.0,164.0,58.0,,174.0,84.0,270.0,48.0,194.0,31.0
144,69.0,396.0,165.0,203.0,71.0,149.0,194.0,83.0,347.0,59.0,366.0,37.0
145,58.0,415.0,161.0,,61.0,149.0,172.0,80.0,316.0,,353.0,35.0
146,83.0,413.0,176.0,189.0,78.0,151.0,204.0,93.0,349.0,,298.0,45.0
147,112.0,477.0,215.0,238.0,105.0,152.0,237.0,117.0,416.0,97.0,288.0,59.0
148,122.0,506.0,210.0,220.0,100.0,,235.0,,458.0,114.0,319.0,83.0
149,192.0,486.0,278.0,325.0,148.0,137.0,212.0,226.0,491.0,126.0,391.0,92.0
150,313.0,555.0,394.0,510.0,212.0,204.0,240.0,362.0,616.0,170.0,561.0,115.0
151,182.0,515.0,270.0,300.0,141.0,155.0,250.0,250.0,,150.0,420.0,
152,165.0,534.0,269.0,270.0,141.0,155.0,270.0,,531.0,138.0,414.0,
153,151.0,443.0,,277.0,124.0,143.0,238.0,165.0,453.0,113.0,357.0,80.0
154,91.0,382.0,161.0,191.0,,,206.0,103.0,318.0,75.0,239.0,49.0
155,,333.0,141.0,145.0,56.0,98.0,169.0,73.0,252.0,50.0,211.0,29.0
156,,421.0,175.0,195.0,72.0,159.0,193.0,82.0,,61.0,396.0,36.0
157,65.0,383.0,156.0,162.0,65.0,146.0,174.0,81.0,327.0,56.0,374.0,
158,85.0,413.0,181.0,199.0,79.0,156.0,211.0,91.0,360.0,73.0,317.0,
159,116.0,482.0,211.0,233.0,99.0,139.0,236.0,121.0,426.0,97.0,300.0,65.0
160,126.0,488.0,211.0,223.0,98.0,120.0,232.0,146.0,460.0,116.0,342.0,89.0
161,,491.0,,348.0,136.0,148.0,,235.0,516.0,,430.0,96.0
162,332.0,568.0,435.0,522.0,219.0,193.0,263.0,393.0,647.0,170.0,579.0,118.0
163,183.0,524.0,273.0,,135.0,146.0,267.0,244.0,536.0,153.0,441.0,
164,165.0,539.0,270.0,290.0,135.0,159.0,267.0,,542.0,,441.0,110.0
165,146.0,468.0,237.0,276.0,,139.0,222.0,163.0,,112.0,386.0,83.0
166,87.0,,148.0,183.0,66.0,,184.0,86.0,291.0,62.0,227.0,44.0
167,66.0,356.0,147.0,180.0,61.0,101.0,170.0,70.0,265.0,47.0,234.0,33.0
168,72.0,,172.0,218.0,70.0,144.0,190.0,77.0,350.0,59.0,408.0,39.0
169,64.0,427.0,143.0,172.0,62.0,,,78.0,324.0,50.0,,33.0
170,78.0,438.0,165.0,206.0,73.0,137.0,205.0,90.0,361.0,64.0,324.0,42.0
171,120.0,491.0,215.0,238.0,96.0,138.0,,114.0,408.0,88.0,292.0,56.0
172,118.0,545.0,219.0,242.0,95.0,130.0,224.0,145.0,465.0,109.0,330.0,80.0
173,202.0,515.0,277.0,,132.0,137.0,226.0,228.0,498.0,120.0,420.0,89.0
174,327.0,611.0,418.0,485.0,204.0,187.0,249.0,381.0,641.0,163.0,558.0,
175,172.0,,273.0,,,152.0,265.0,238.0,461.0,152.0,465.0,109.0
176,160.0,582.0,269.0,280.0,123.0,165.0,279.0,204.0,377.0,,,
177,150.0,508.0,253.0,272.0,104.0,141.0,232.0,161.0,337.0,,,74.0
178,,459.0,146.0,188.0,67.0,98.0,197.0,93.0,244.0,56.0,,44.0
179,67.0,401.0,154.0,181.0,62.0,,187.0,80.0,219.0,49.0,210.0,32.0
180,82.0,485.0,181.0,231.0,63.0,138.0,220.0,90.0,280.0,54.0,399.0,36.0
181,70.0,492.0,163.0,214.0,,134.0,217.0,93.0,269.0,,393.0,33.0
182,85.0,,183.0,207.0,86.0,134.0,221.0,91.0,265.0,59.0,323.0,48.0
183,108.0,557.0,221.0,230.0,99.0,142.0,238.0,128.0,311.0,78.0,,48.0
184,123.0,536.0,227.0,246.0,98.0,127.0,243.0,149.0,,,358.0,77.0
185,196.0,578.0,290.0,321.0,133.0,143.0,220.0,220.0,427.0,123.0,,87.0
186,321.0,612.0,,437.0,186.0,197.0,,354.0,519.0,161.0,608.0,109.0
187,180.0,582.0,,280.0,136.0,166.0,271.0,,402.0,140.0,436.0,97.0
188,142.0,620.0,274.0,261.0,123.0,175.0,266.0,195.0,392.0,123.0,414.0,86.0
189,126.0,537.0,240.0,263.0,90.0,137.0,209.0,151.0,343.0,103.0,,69.0
190,80.0,458.0,,172.0,66.0,,188.0,92.0,242.0,57.0,237.0,
191,67.0,453.0,,176.0,61.0,98.0,176.0,80.0,225.0,44.0,230.0,32.0
192,71.0,448.0,169.0,191.0,60.0,136.0,189.0,84.0,271.0,57.0,398.0,35.0
193,60.0,,146.0,169.0,54.0,118.0,172.0,76.0,262.0,46.0,378.0,
194,79.0,470.0,173.0,193.0,68.0,,213.0,91.0,,59.0,311.0,39.0
195,112.0,574.0,213.0,235.0,97.0,133.0,236.0,123.0,348.0,86.0,317.0,53.0
196,117.0,595.0,219.0,224.0,98.0,125.0,247.0,138.0,,100.0,330.0,70.0
197,,,290.0,311.0,133.0,141.0,247.0,221.0,435.0,130.0,492.0,82.0
198,315.0,636.0,429.0,,180.0,183.0,,347.0,528.0,135.0,577.0,105.0
199,168.0,604.0,,310.0,128.0,148.0,,234.0,420.0,142.0,462.0,104.0
200,170.0,655.0,315.0,312.0,138.0,179.0,,,,130.0,476.0,96.0
201,119.0,546.0,,247.0,95.0,122.0,223.0,142.0,353.0,97.0,388.0,69.0
202,83.0,497.0,158.0,192.0,70.0,101.0,216.0,103.0,,56.0,,42.0
203,69.0,442.0,151.0,,58.0,,195.0,82.0,246.0,43.0,262.0,30.0
204,70.0,489.0,186.0,212.0,68.0,,,85.0,,53.0,427.0,34.0
205,,490.0,,188.0,57.0,,189.0,,273.0,48.0,420.0,33.0
206,77.0,522.0,185.0,215.0,72.0,139.0,,92.0,298.0,57.0,323.0,40.0
207,,571.0,223.0,242.0,96.0,138.0,239.0,116.0,370.0,76.0,330.0,55.0
208,116.0,616.0,227.0,,96.0,142.0,243.0,151.0,405.0,107.0,366.0,81.0
209,206.0,601.0,,341.0,133.0,157.0,241.0,224.0,446.0,136.0,491.0,97.0
210,345.0,,436.0,495.0,185.0,210.0,256.0,364.0,,159.0,603.0,115.0
211,177.0,652.0,305.0,,126.0,160.0,264.0,259.0,478.0,153.0,,103.0
212,149.0,639.0,289.0,283.0,125.0,166.0,265.0,215.0,459.0,133.0,476.0,105.0
213,155.0,566.0,269.0,297.0,109.0,135.0,,167.0,,115.0,,82.0
214,97.0,538.0,182.0,210.0,71.0,109.0,,107.0,303.0,59.0,279.0,48.0
215,69.0,473.0,160.0,168.0,57.0,90.0,182.0,78.0,,,,31.0
216,75.0,505.0,188.0,221.0,69.0,138.0,208.0,88.0,315.0,57.0,469.0,40.0
217,70.0,525.0,167.0,188.0,60.0,124.0,200.0,77.0,290.0,51.0,420.0,38.0
218,85.0,557.0,191.0,232.0,75.0,141.0,224.0,91.0,300.0,70.0,339.0,48.0
219,120.0,595.0,,272.0,,143.0,254.0,121.0,385.0,93.0,369.0,67.0
220,133.0,641.0,239.0,265.0,102.0,136.0,,157.0,416.0,118.0,420.0,91.0
221,230.0,,326.0,,137.0,169.0,248.0,233.0,498.0,151.0,538.0,112.0
222,363.0,689.0,467.0,488.0,185.0,207.0,276.0,384.0,583.0,178.0,628.0,118.0
223,200.0,660.0,323.0,325.0,132.0,169.0,274.0,271.0,510.0,170.0,554.0,116.0
224,164.0,676.0,311.0,309.0,141.0,170.0,274.0,221.0,505.0,151.0,512.0,119.0
225,148.0,599.0,280.0,303.0,,141.0,265.0,163.0,428.0,117.0,467.0,90.0
226,91.0,536.0,190.0,229.0,79.0,118.0,231.0,,317.0,69.0,321.0,52.0
227,66.0,470.0,160.0,179.0,64.0,107.0,187.0,80.0,258.0,44.0,286.0,34.0
228,,518.0,207.0,238.0,70.0,148.0,227.0,,329.0,58.0,460.0,43.0
229,66.0,537.0,177.0,195.0,60.0,141.0,204.0,91.0,301.0,52.0,434.0,42.0
230,95.0,562.0,,,72.0,144.0,222.0,99.0,,74.0,378.0,
231,124.0,588.0,251.0,281.0,98.0,147.0,263.0,130.0,398.0,95.0,391.0,69.0
232,132.0,633.0,256.0,290.0,97.0,143.0,271.0,,436.0,,466.0,97.0
233,231.0,650.0,352.0,381.0,145.0,171.0,,252.0,546.0,149.0,,113.0
234,,715.0,469.0,514.0,200.0,216.0,300.0,390.0,,188.0,710.0,141.0
235,201.0,679.0,345.0,354.0,147.0,,303.0,,544.0,183.0,595.0,136.0
236,213.0,,351.0,363.0,150.0,193.0,297.0,,549.0,172.0,606.0,148.0
237,144.0,634.0,273.0,313.0,127.0,149.0,265.0,173.0,428.0,,503.0,106.0
238,89.0,560.0,177.0,226.0,83.0,105.0,216.0,112.0,300.0,67.0,337.0,58.0
239,78.0,509.0,192.0,225.0,74.0,108.0,209.0,90.0,266.0,,319.0,40.0
240,92.0,561.0,217.0,256.0,86.0,145.0,236.0,107.0,345.0,60.0,485.0,45.0
241,83.0,567.0,198.0,,73.0,143.0,218.0,101.0,302.0,54.0,443.0,
242,100.0,575.0,229.0,264.0,86.0,,239.0,113.0,345.0,76.0,402.0,
243,131.0,641.0,275.0,294.0,108.0,146.0,270.0,138.0,419.0,102.0,409.0,71.0
244,148.0,642.0,288.0,307.0,,146.0,259.0,178.0,442.0,133.0,,110.0
245,241.0,662.0,383.0,410.0,161.0,,275.0,265.0,509.0,164.0,603.0,122.0
246,364.0,695.0,497.0,541.0,,222.0,297.0,410.0,598.0,,715.0,146.0
247,222.0,663.0,357.0,360.0,,190.0,286.0,279.0,525.0,173.0,592.0,
248,188.0,694.0,341.0,356.0,154.0,191.0,304.0,,499.0,167.0,583.0,138.0
249,168.0,645.0,309.0,346.0,142.0,162.0,265.0,192.0,,131.0,545.0,114.0
250,107.0,578.0,,246.0,96.0,120.0,229.0,120.0,326.0,,353.0,
251,83.0,519.0,202.0,243.0,,122.0,230.0,103.0,289.0,50.0,340.0,44.0
252,91.0,561.0,215.0,270.0,89.0,153.0,238.0,106.0,354.0,,489.0,46.0
253,83.0,537.0,187.0,226.0,79.0,138.0,204.0,114.0,316.0,62.0,449.0,45.0
254,,,220.0,268.0,,160.0,237.0,121.0,361.0,80.0,423.0,57.0
255,146.0,601.0,283.0,321.0,,168.0,271.0,154.0,,,439.0,85.0
256,,658.0,300.0,324.0,,158.0,274.0,187.0,475.0,137.0,501.0,117.0
257,253.0,661.0,396.0,424.0,168.0,188.0,279.0,264.0,572.0,165.0,,138.0
258,369.0,706.0,501.0,,211.0,223.0,302.0,379.0,661.0,,723.0,155.0
259,213.0,663.0,356.0,365.0,141.0,181.0,290.0,274.0,590.0,174.0,630.0,153.0
260,213.0,705.0,371.0,400.0,148.0,205.0,315.0,265.0,596.0,175.0,630.0,154.0
261,154.0,598.0,287.0,332.0,126.0,159.0,272.0,184.0,507.0,133.0,558.0,121.0
262,109.0,558.0,198.0,252.0,95.0,127.0,231.0,117.0,345.0,72.0,369.0,67.0
263,82.0,489.0,194.0,238.0,90.0,123.0,220.0,91.0,301.0,56.0,338.0,47.0
264,91.0,548.0,223.0,251.0,89.0,169.0,235.0,101.0,375.0,61.0,483.0,52.0
265,87.0,569.0,203.0,223.0,85.0,149.0,217.0,93.0,345.0,56.0,451.0,47.0
266,97.0,585.0,228.0,264.0,98.0,160.0,253.0,114.0,371.0,74.0,439.0,64.0
267,143.0,644.0,287.0,320.0,121.0,177.0,276.0,166.0,467.0,106.0,473.0,87.0
268,155.0,693.0,292.0,322.0,122.0,170.0,278.0,180.0,508.0,140.0,552.0,123.0
269,254.0,669.0,403.0,409.0,156.0,194.0,287.0,271.0,600.0,163.0,650.0,144.0
270,366.0,726.0,530.0,519.0,213.0,226.0,306.0,376.0,669.0,176.0,708.0,155.0
271,207.0,664.0,352.0,355.0,145.0,191.0,296.0,278.0,592.0,171.0,610.0,153.0
272,192.0,699.0,367.0,375.0,159.0,202.0,321.0,234.0,570.0,150.0,594.0,145.0
273,180.0,616.0,319.0,372.0,141.0,175.0,287.0,198.0,525.0,117.0,556.0,119.0
274,109.0,570.0,206.0,256.0,102.0,125.0,243.0,116.0,345.0,70.0,356.0,66.0
275,82.0,508.0,200.0,243.0,91.0,122.0,228.0,96.0,301.0,52.0,330.0,46.0
276,91.0,559.0,222.0,270.0,97.0,163.0,256.0,107.0,354.0,55.0,472.0,53.0
277,91.0,589.0,204.0,231.0,91.0,151.0,242.0,102.0,348.0,53.0,465.0,51.0
278,97.0,619.0,233.0,260.0,105.0,172.0,239.0,116.0,369.0,65.0,430.0,64.0
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/population_estimate.csv | "Year","Male","Female"
1875,238.1,183.2
1876,250.4,194.2
1877,252.5,201.6
1878,264.6,211.5
1879,281.8,225.5
1880,292.3,236.2
1881,299.4,245.7
1882,307.7,254.1
1883,319.0,265.9
1884,331.0,277.4
1885,336.5,282.8
1886,340.5,290.9
1887,347.4,297.9
1888,347.8,301.6
1889,351.4,306.6
1890,355.4,312.1
1891,359.0,317.0
1892,368.0,324.4
1893,380.5,333.8
1894,386.6,341.5
1895,392.6,348.1
1896,398.7,355.4
1897,406.4,362.5
1898,413.8,369.5
1899,420.4,376.0
1900,425.3,382.8
1901,437.3,393.5
1902,449.0,402.0
1903,462.8,412.9
1904,477.1,423.6
1905,490.5,435.1
1906,507.2,449.3
1907,518.2,459.0
1908,535.9,472.5
1909,545.9,484.7
1910,555.5,494.9
1911,566.2,509.0
1912,579.7,522.8
1913,595.6,538.9
1914,594.6,551.2
1915,590.4,562.2
1916,575.8,574.5
1917,563.3,584.1
1918,568.0,590.2
1919,627.8,599.4
1920,643.7,613.9
1921,660.9,631.8
1922,673.8,645.1
1923,686.0,657.1
1924,700.0,670.4
1925,716.4,684.9
1926,730.6,699.0
1927,740.8,709.3
1928,749.1,717.8
1929,758.5,727.1
1930,767.9,738.9
1931,775.6,747.1
1932,780.9,753.8
1933,786.4,760.8
1934,792.0,766.4
1935,796.7,773.0
1936,804.3,780.3
1937,813.1,788.7
1938,821.7,796.6
1939,832.8,808.8
1940,813.0,820.6
1941,799.2,832.0
1942,793.7,842.7
1943,790.8,851.2
1944,813.6,862.7
1945,855.9,872.6
1946,893.3,891.1
1947,913.6,909.5
1948,934.3,927.6
1949,949.4,942.6
1950,967.3,960.3
1951,989.5,981.0
1952,1017.9,1006.7
1953,1043.1,1031.6
1954,1065.5,1052.9
1955,1089.1,1075.7
1956,1111.2,1098.0
1957,1137.8,1125.0
1958,1165.6,1150.3
1959,1186.1,1173.7
1960,1207.9,1195.6
1961,1238.0,1223.3
1962,1264.1,1251.7
1963,1288.4,1278.5
1964,1313.0,1304.0
1965,1336.7,1327.1
1966,1360.3,1351.0
1967,1373.6,1371.4
1968,1385.4,1387.6
1969,1399.8,1404.2
1970,1425.4,1426.7
1971,1447.4,1451.1
1972,1477.8,1481.9
1973,1510.0,1514.9
1974,1543.9,1548.0
1975,1567.6,1576.1
1976,1578.1,1585.3
1977,1578.4,1588.0
1978,1575.9,1589.3
1979,1573.8,1590.1
1980,1581.5,1594.9
1981,1586.9,1607.6
1982,1601.9,1624.9
1983,1620.7,1644.1
1984,1632.2,1660.8
1985,1636.8,1666.3
1986,1639.2,1674.3
1987,1652.9,1689.2
1988,1649.7,1695.5
1989,1659.7,1710.1
1990,1681.9,1728.5
1991,1730.0,1786.0
1992,1749.1,1803.1
1993,1772.5,1825.4
1994,1797.8,1850.4
1995,1828.0,1878.7
1996,1855.4,1906.9
1997,1872.9,1929.7
1998,1883.3,1945.9
1999,1891.7,1959.5
2000,1900.4,1972.6
2001,1920.5,1995.7
2002,1956.7,2032.9
2003,1991.8,2069.8
2004,2016.2,2098.1
2005,2037.7,2123.3
2006,2061.8,2149.6
2007,2083.4,2169.2
2008,2104.1,2187.4
2009,2134.0,2213.2
2010,2158.2,2234.9
2011,2174.3,2248.4 | 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/exog_deaths_by_region_exog.csv | ,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
0,0.0,-0.0,0.0,0.0,0.0,-0.0,0.0,0.0,0.0,0.0,0.0,-0.0,0.0,-0.0,0.0,-0.0,0.0,-0.0,0.0,-0.0,0.0,0.0,0.0,0.0,0.0,-0.0,0.0,-0.0,0.0,-0.0,0.0,0.0
1,0.19875209056106533,-0.03703668679562561,0.295948341698736,0.037036756290946694,0.38958056246995176,-0.03703686180403085,0.47942543957854117,0.037036891815146945,0.5654604994420314,0.03703385723138967,0.650319402879635,-0.03703686888160362,0.7176549202255535,-0.03703588287852368,0.7937041483849424,-0.03703588151587406,0.8414708110015084,-0.03703672098440032,0.8912071759819662,-0.037036644135165414,0.9322228269534192,0.03703667738402852,0.9649541407882584,0.037036840568423934,0.9907454886548319,-0.03703685981409009,0.9975729368463327,-0.037036427545358824,0.9999899958342958,-0.03703650200042182,0.9918603059772687,0.037036115422779625
2,0.38958056246995176,-0.07407337359125123,0.5654604994420314,0.07407351258189339,0.7176549202255535,-0.0740737236080617,0.8414708110015084,0.07407378363029389,0.9333893780282453,0.07406771446277934,0.9947834290640049,-0.07407373776320723,0.9999899958342958,-0.07407176575704735,0.9867488209877175,-0.07407176303174812,0.9092972390096973,-0.07407344196880064,0.8084962368241235,-0.07407328827033083,0.6755963404934754,0.07407335476805704,0.5162482046746085,0.07407368113684787,0.3367883600959299,-0.07407371962818018,0.14113103602388338,-0.07407285509071765,-0.05839846036885846,-0.07407300400084364,-0.2555914790708911,0.07407223084555925
3,0.5648776867457179,-0.11111006038687685,0.7844617548535878,0.11111026887284009,0.9324273458780673,-0.11111058541209257,0.9974947805708058,0.11111067544544083,0.9752584930881769,0.11110157169416901,0.8713852646397194,-0.11111060664481084,0.6757445585300422,-0.11110764863557103,0.4330416582256388,-0.11110764454762218,0.14111997891143627,-0.11111016295320096,-0.15774566156077083,-0.11110993240549623,-0.44260768120567157,0.11111003215208558,-0.6887625607282502,0.1111105217052718,-0.8762595779997068,-0.11111057944227028,-0.977606507732746,-0.11110928263607647,-0.996579581542493,-0.11110950600126546,-0.9259971962430006,0.11110834626833888
4,0.7176549202255535,-0.14814674718250245,0.9333893780282453,0.14814702516378678,0.9999899958342958,-0.1481474472161234,0.9092972390096973,0.14814756726058778,0.676441758149388,0.14813542892555867,0.338160994485027,-0.14814747552641447,-0.05839846036885846,-0.1481435315140947,-0.44838279812868687,-0.14814352606349623,-0.7568023389898726,-0.14814688393760128,-0.9516018773354787,-0.14814657654066166,-0.9963609914451044,0.14814670953611409,-0.8847345610651648,0.14814736227369574,-0.634659033919092,-0.14814743925636037,-0.2794373334021834,-0.1481457101814353,0.11659775580137664,-0.14814600800168728,0.49421076000889724,0.1481444616911185
5,0.8418215167270572,-0.1851834339781281,0.9989401079316864,0.1851837814547335,0.9096762132340875,-0.1851843090201543,0.5984720204891397,0.18518445907573472,0.1413244557374412,0.18516928615694836,-0.354105675287268,-0.1851843444080181,-0.7571177568390984,-0.18517941439261837,-0.9904800920614084,-0.1851794075793703,-0.9589240765966958,-0.18518360492200162,-0.7055401798405714,-0.18518322067582707,-0.27947058181072887,0.1851833869201426,0.21543164327100997,0.18518420284211964,0.6605172125775963,-0.18518429907045045,0.9380732777200355,-0.18518213772679412,0.989770384000888,-0.18518251000210909,0.7986445253454938,0.18518057711389813
6,0.9324273458780673,-0.2222201207737537,0.9752584930881769,0.22222053774568018,0.6757445585300422,-0.22222117082418513,0.14111997891143627,0.22222135089088166,-0.44316154499371474,0.22220314338833802,-0.8798309129181567,-0.2222212132896217,-0.996579581542493,-0.22221529727114206,-0.783001799069347,-0.22221528909524435,-0.27941544048547,-0.22222032590640192,0.31154129916430334,-0.22221986481099246,0.793824326510654,0.22222006430417116,0.9999899855124953,0.2222210434105436,0.8591914808355817,-0.22222115888454055,0.41215069065251386,-0.22221856527215295,-0.17439940060295403,-0.2222190120025309,-0.7000126599889558,0.22221669253667775
7,0.9858602391935614,-0.2592568075693793,0.8644599416218897,0.2592572940366269,0.3351276963093557,-0.259258032628216,-0.3507831552351123,0.2592582427060286,-0.8728384682208625,0.2592370006197277,-0.9917579244663942,-0.2592580821712253,-0.6315296048789927,-0.25925118014966575,0.01703664511066828,-0.2592511706111184,0.6569864630177729,-0.25925704689080226,0.9881680297701986,-0.25925650894615787,0.8547673826174453,0.2592567416881997,0.3195606562508116,0.25925788397896754,-0.3684485701738951,-0.2592580186986307,-0.8797645046756954,-0.25925499281751174,-0.9795856256264135,-0.25925551400295277,-0.6182589709920092,0.2592528079594574
8,0.9999899958342958,-0.2962934943650049,0.676441758149388,0.29629405032757355,-0.05839846036885846,-0.2962948944322468,-0.7568023389898726,0.29629513452117556,-0.9976078027780596,0.29627085785111734,-0.6372456874930467,-0.29629495105282894,0.11659775580137664,-0.2962870630281894,0.8041820959227095,-0.29628705212699247,0.9893580422707821,-0.29629376787520256,0.5849170720767289,-0.2962931530813233,-0.17436114778033118,0.29629341907222817,-0.8290257830693025,0.2962947245473915,-0.9844397823918536,-0.29629487851272074,-0.5366148490271767,-0.2962914203628706,0.2316062652459557,-0.29629201600337457,0.85933118891123,0.296288923382237
9,0.9742533069920303,-0.33333018116063057,0.4279990470348874,0.3333308066185202,-0.44270478422051296,-0.3333317562362777,-0.9775299157556051,0.3333320263363225,-0.7738840304681833,0.33330471508250703,0.016973153546815833,-0.33333181993443256,0.793998482402275,-0.3333229459067131,0.9827385691500352,-0.3333229336428665,0.41211840011841105,-0.3333304888596029,-0.4575357992709801,-0.33332979721648875,-0.9811296105727584,0.3333300964562567,-0.763087507992051,0.3333315651158154,0.033803735832937575,-0.33333173832681084,0.803847239088657,-0.33332784790822945,0.9660600410120782,-0.33332851800379637,0.39681878605673,0.3333250388050166
10,0.9096762132340875,-0.3703668679562562,0.1413244557374412,0.370367562909467,-0.7571177568390984,-0.3703686180403086,-0.9589240765966958,0.37036891815146944,-0.27982030153241716,0.3703385723138967,0.6632092552607707,-0.3703686888160362,0.989770384000888,-0.37035882878523674,0.41757808565271376,-0.3703588151587406,-0.5440209985214498,-0.37036720984400323,-0.9999900000020655,-0.37036644135165414,-0.5366786972951043,0.3703667738402852,0.4207757540174443,0.3703684056842393,0.9959308311896113,-0.3703685981409009,0.6503386575500577,-0.37036427545358824,-0.28802324866978724,-0.37036502000421817,-0.9615870205441172,0.37036115422779625
11,0.8088331995037931,-0.4074035547518818,-0.157974228291299,0.4074043192004137,-0.9519984831623965,-0.4074054798443394,-0.7055401798405714,0.40740580996661635,0.31199270920924493,0.40737242954528635,0.9975276812951132,-0.40740555769763975,0.5851608520908025,-0.40739471166376046,-0.46359716800381917,-0.40739469667461464,-0.9999900000020655,-0.4074039308284035,-0.4496473716596205,-0.4074030854868196,0.5921902353181169,0.40740345122431376,0.9882015506186382,0.4074052462526633,0.30474729997418254,-0.40740545795499106,-0.7118409655458136,-0.40740070299894704,-0.9492397584664318,-0.40740152200464,-0.14902839929043718,0.4073972696505759
12,0.6757445585300422,-0.4444402415475074,-0.44316154499371474,0.44444107549136036,-0.996579581542493,-0.44444234164837026,-0.27941544048547,0.4444427017817633,0.7948176905375149,0.44440628677667604,0.8626932520163858,-0.4444424265792434,-0.17439940060295403,-0.4444305945422841,-0.9939313274391613,-0.4444305781904887,-0.5365728071709441,-0.44444065181280384,0.5920733924140472,-0.4444397296219849,0.965848145080876,0.4444401286083423,0.10790976045352942,0.4444420868210872,-0.8923367754232626,-0.4444423177690811,-0.751045933420351,-0.4444371305443059,0.3434579436630076,-0.4444380240050618,0.9999899980286128,0.4444333850733555
13,0.515716114442965,-0.48147692834313305,-0.6887625607282502,0.481477831782307,-0.8838226767946192,-0.48147920345240114,0.2151199436546398,0.4814795935969103,0.9999899855124953,0.48144014400806573,0.3221207063604493,-0.48147929546084706,-0.8281713171032081,-0.4814664774208078,-0.7720780738215345,-0.48146645970636276,0.4201669500408618,-0.4814773727972042,0.986771760456212,-0.48147637375715036,0.10777489471581637,0.4814768059923708,-0.9304700815816802,0.48147892738951115,-0.6080831644211951,-0.4814791775831712,0.6055871902376866,-0.48147355808966474,0.9291821426953464,-0.4814745260054837,-0.10865801190272363,0.4814695004961351
14,0.3351276963093557,-0.5185136151387586,-0.8728384682208625,0.5185145880732538,-0.6315296048789927,-0.518516065256432,0.6569864630177729,0.5185164854120572,0.855837008655436,0.5184740012394554,-0.3699502407716862,-0.5185161643424506,-0.9795856256264135,-0.5185023602993315,0.03406847349780193,-0.5185023412222368,0.9906071510842663,-0.5185140937816045,0.30311829413640534,-0.5185130178923157,-0.8877420074045105,0.5185134833763994,-0.6057090742401774,0.5185157679579351,0.6856284592184476,-0.5185160373972614,0.8367210198267926,-0.5185099856350235,-0.39772129305819093,-0.5185110280059055,-0.971990024839086,0.5185056159189148
15,0.14117879448049286,-0.5555503019343843,-0.9789463148795308,0.5555513443642005,-0.2795318944295686,-0.5555529270604628,0.9379997830302238,0.5555533772272041,0.412715542089693,0.555507858470845,-0.888027809036927,-0.5555530332240542,-0.5367964384047176,-0.5555382431778552,0.8144326792815318,-0.5555382227381108,0.6502877058397336,-0.5555508147660048,-0.7117851953493901,-0.5555496620274812,-0.7511352954312417,0.5555501607604278,0.6064171458886196,0.555552608526359,0.841151785030102,-0.5555528972113514,-0.4872125831994987,-0.5555464131803823,-0.9059555991626467,-0.5555475300063273,0.35912913832409527,0.5555417313416944
16,-0.05839846036885846,-0.5925869887300098,-0.9976078027780596,0.5925881006551471,0.11659775580137664,-0.5925897888644936,0.9893580422707821,0.5925902690423511,-0.1745793372286799,0.5925417157022347,-0.9884520228948253,-0.5925899021056579,0.2316062652459557,-0.5925741260563788,0.9784504703557679,-0.5925741042539849,-0.287903257198445,-0.5925875357504051,-0.9488443019336659,-0.5925863061626466,0.3433826094832344,0.5925868381444563,0.930140826606319,0.592589449094783,-0.39969212792443704,-0.5925897570254415,-0.9056491293324435,-0.5925828407257412,0.45062823450164763,-0.5925840320067491,0.8794464004107753,0.592577846764474
17,-0.2556475528903014,-0.6296236755256354,-0.9271559567810723,0.6296248569460938,0.49431918414341114,-0.6296266506685245,0.7984869476954495,0.629627160857498,-0.7008886313740063,0.6295755729336244,-0.6239918053963832,-0.6296267709872615,0.8595197162521094,-0.6296100089349025,0.40199645238299897,-0.6296099857698589,-0.9613972933022695,-0.6296242567348055,-0.14899899503835154,-0.629622950297812,0.9999899980286128,0.629623515528485,-0.10879398274242924,0.6296262896632069,-0.9770208430761707,-0.6296266168395317,0.3590864129959616,-0.6296192682711,0.8796393407968044,-0.629620534007171,-0.5857527917659857,0.6296139621872536
18,-0.44270478422051296,-0.6666603623212611,-0.7738840304681833,0.6666616132370404,0.793998482402275,-0.6666635124725554,0.41211840011841105,0.666664052672645,-0.9823573618878574,0.6666094301650141,0.03394150832089122,-0.6666636398688651,0.9660600410120782,-0.6666458918134262,-0.4786804663343352,-0.666645867285733,-0.750987091654764,-0.6666609777192058,0.8136735694422572,-0.6666595944329775,0.38132565088632636,0.6666601929125134,-0.988345352496325,0.6666631302316308,0.06756924542115483,-0.6666634766536217,0.9564506653568983,-0.6666556958164589,-0.5019983315990214,-0.6666570360075927,-0.7285043559362132,0.6666500776100333
19,-0.6121127728395851,-0.7036970491168867,-0.5514833485346234,0.7036983695279873,0.9683228787330317,-0.7037003742765862,-0.07515110493929571,0.703700944487792,-0.9206604032955211,0.7036432873964037,0.6759116003241548,-0.7037005087504686,0.4866013081585165,-0.7036817746919498,-0.9971015513627467,-0.703681748801607,0.14987717870571562,-0.7036976987036061,0.8871573454493772,-0.7036962385681428,-0.723637384869562,0.7036968702965419,-0.4199684653962168,0.7036999708000546,0.9999899462604889,-0.7037003364677118,-0.22377312579484251,-0.7036921233618176,-0.8503231178393341,-0.7036935380080146,0.773480342431362,0.7036861930328129
20,-0.7571177568390984,-0.7407337359125123,-0.27982030153241716,0.740735125818934,0.989770384000888,-0.7407372360806171,-0.5440209985214498,0.7407378363029389,-0.5373502782659456,0.7406771446277934,0.9999899052848786,-0.7407373776320724,-0.28802324866978724,-0.7407176575704735,-0.7609360610761183,-0.7407176303174812,0.9129450621581827,-0.7407344196880065,-0.008851307462160092,-0.7407328827033083,-0.9057568865597412,0.7407335476805704,0.7636632073908185,0.7407368113684786,0.27236162276856596,-0.7407371962818018,-0.9881088348112222,-0.7407285509071765,0.5516563892843228,-0.7407300400084363,0.5291869888000177,0.7407223084555925
21,-0.87193884509668,-0.7777704227081379,0.016838259630527815,0.7777718821098806,0.8549549087119184,-0.777774097884648,-0.8796955782699294,0.7777747281180858,0.033671758626498904,0.7777110018591831,0.8537573325168359,-0.777774246513676,-0.8879367677509318,-0.7777535404489972,0.051090669799684324,-0.7777535118333552,0.836655465724281,-0.7777711406724068,-0.8951871829181393,-0.7777695268384737,0.06722132183841885,0.777770225064599,0.8285264922776944,0.7777736519369026,-0.9074048925451954,-0.7777740560958919,0.08398101795948178,-0.7777649784525352,0.8181069117562207,-0.7777665420088582,-0.9098460023507845,0.7777584238783721
22,-0.9519984831623965,-0.8148071095037636,0.31199270920924493,0.8148086384008274,0.5851608520908025,-0.8148109596886788,-0.9999900000020655,0.8148116199332327,0.5929312814881291,0.8147448590905727,0.3059893459398148,-0.8148111153952795,-0.9492397584664318,-0.8147894233275209,0.8244530003423381,-0.8147893933492293,-0.008851307462160092,-0.814807861656807,-0.8032555607809535,-0.8148061709736392,0.9544732210283877,0.8148069024486275,-0.3204034750533767,0.8148104925053266,-0.5808196568477796,-0.8148109159099821,0.9999899992185399,-0.8148014059978941,-0.5994330513136535,-0.81480304400928,-0.2947296926806361,0.8147945393011518
23,-0.9941049459093658,-0.8518437962993892,0.5792777792669651,0.851845394691774,0.2229827634447428,-0.8518478214927097,-0.8754519938632033,0.8518485117483797,0.9450628489858791,0.8517787163219624,-0.38569021124812053,-0.8518479842768831,-0.4347466492543621,-0.8518253062060446,0.9738857369672237,-0.8518252748651033,-0.846220229387787,-0.8518445826412073,0.1664799691506045,-0.8518428151088044,0.6245002243215082,0.851843579832656,-0.999941600805745,0.8518473330737505,0.7099643773157669,-0.8518477757240721,0.05749197052133409,-0.8518378335432529,-0.7831005942562982,-0.8518395460097018,0.9857945987654071,0.8518306547239314
24,-0.996579581542493,-0.8888804830950148,0.7948176905375149,0.8888821509827207,-0.17439940060295403,-0.8888846832967405,-0.5365728071709441,0.8888854035635266,0.9670567737039758,0.8888125735533521,-0.8959736355102713,-0.8888848531584868,0.3434579436630076,-0.8888611890845682,0.3863011637685579,-0.8888611563809774,-0.9055781749588148,-0.8888813036256077,0.95428489738448,-0.8888794592439698,-0.5018882231177466,0.8888802572166846,-0.21456293876368854,0.8888841736421744,0.8221608903943978,-0.8888846355381622,-0.9918563569924574,-0.8888742610886118,0.6451653778459284,-0.8888760480101237,0.040701279566260545,0.888866770146711
25,-0.9593237341482497,-0.9259171698906403,0.9393589046790989,0.9259189072736673,-0.5442477339904711,-0.9259215451007714,-0.06632188365236895,0.9259222953786735,0.6512299449692022,0.9258464307847417,-0.984866659017702,-0.9259217220400904,0.9133255567115579,-0.9258970719630919,-0.4936284286610337,-0.9258970378968514,-0.13235172276043156,-0.925918024610008,0.699239887226607,-0.9259161033791353,-0.9882264033759778,0.9259169346007131,0.885150931234674,0.9259210142105984,-0.43048370222645677,-0.9259214953522524,-0.1978142568212713,-0.9259106886339706,0.7454235525794333,-0.9259125500105455,-0.9962828703311972,0.9259028855694906
26,-0.8838226767946192,-0.9629538566862661,0.9999899855124953,0.962955663564614,-0.8281713171032081,-0.9629584069048023,0.4201669500408618,0.9629591871938206,0.10790976045352942,0.9628802880161315,-0.610561503823061,-0.9629585909216941,0.9291821426953464,-0.9629329548416156,-0.9999898675235251,-0.9629329194127255,0.7625582929726507,-0.9629547455944084,-0.3199398958003875,-0.9629527475143007,-0.2142947777660121,0.9629536119847416,0.6881166132861658,0.9629578547790223,-0.9684970602594969,-0.9629583551663424,0.9638707030374314,-0.9629471161793295,-0.6886974011408288,-0.9629490520109674,0.2160298456399789,0.9629390009922703
27,-0.7730863983809267,-0.9999905434818916,0.9712949391614468,0.9999924198555608,-0.9813448590810474,-0.999995268708833,0.8037842605294165,0.9999960790089675,-0.4731064079718626,0.9999141452475211,0.05090026690622852,-0.9999954598032976,0.3814093093312873,-0.9999688377201392,-0.749578910983535,-0.9999688009285995,0.956375730864423,-0.9999914665788087,-0.989486878875324,-0.9999893916494662,0.8329236544412376,0.9999902893687701,-0.5170101552123412,0.9999946953474461,0.10125834574727577,-0.9999952149804325,0.33417728942596997,-0.9999835437246882,-0.7052042823325289,-0.9999855540113892,0.9406143579027518,0.9999751164150499
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/README.md | # Time Series datasets
This folder contains various datasets to test our time series analysis. Using datasets from the real world allows more generic testing than using a data generator.
**Disclaimer:** the data has been filtered and organized in a way that makes it suitable to test times series models. If you wish to use this data for other purposes, please take the data from its source.
## From Statistics New Zealand
**Source:** [Stats NZ](http://archive.stats.govt.nz/infoshare/) and licensed by Stats NZ for re-use under the Creative Commons Attribution 4.0 International licence.
- `alcohol.csv`: Alcohol available for consumption (millions of litres), quarterly 1994-2019.
- `cattle.csv`: Agricultural survey: counts of different types of cattle (units) per year, 2002-2018.
- `deaths_by_region.csv`: Deaths (units) in 16 regions per year, 1991-2018.
- `guest_nights_by_region.csv`: Guest nights (thousands) in 12 regions, monthly 1996-2019.
- `hourly_earnings_by_industry.csv`: Hourly earnings ($) in 14 industries, quarterly 1989-2019.
- `long_term_arrivals_by_citizenship.csv`: Long-term arrivals (units) from 8 countries per year, 2004-2018.
- `net_migrations_auckland_by_age.csv`: Net migrations in Auckland by age range (from 0 to 49) per year, 1991-2010.
- `passenger_movements.csv`: Passenger movements (thousands), quarterly 1975-2019.
- `police_recorded_crime.csv`: Recorded crimes (units) per year, 1878-2014.
- `population_estimate.csv`: Population estimates (thousands) per year, 1875-2011.
The following files are derived from the Stats NZ dataset by removing observations (to test support for missing observations) and/or adding procedural exogenous variables:
- `guest_nights_by_region_missing.csv`
- `hourly_earnings_by_industry_missing.csv`
- `population_estimate_missing.csv`
- `endog_deaths_by_region_exog.csv`
- `endog_guest_nights_by_region_missing_exog.csv`
- `endog_hourly_earnings_by_industry_missing_exog.csv`
The following files represent procedural exogenous variables linked to the series above (normalized):
- `exog_deaths_by_region_exog.csv`
- `exog_guest_nights_by_region_missing_exog.csv`
- `exog_hourly_earnings_by_industry_missing_exog.csv`
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/long_term_arrivals_by_citizenship.csv | Year,Australia,Japan,"Korea, Republic of",Philippines,Germany,United Kingdom,United States of America,South Africa
2004,5437,3493,2293,623,1469,12584,2038,1385
2005,5120,3054,1726,861,2060,13952,2304,1530
2006,4791,2839,1910,2648,2280,14817,2268,1817
2007,4863,2304,2061,3579,2438,12595,2351,2071
2008,4282,2188,1842,4139,2611,11617,2290,3087
2009,3886,1935,2053,2751,2588,10082,2319,1742
2010,4143,1853,1913,1972,2400,8877,2264,1225
2011,3697,1832,1725,2425,2722,9536,2506,1231
2012,3580,1773,1565,2888,2606,9334,2533,1135
2013,4417,1850,1758,3178,3338,9763,2810,1215
2014,4894,2014,1689,4657,3685,10188,2854,1593
2015,5546,2181,1867,6304,4009,10264,3191,2273
2016,6033,2370,2489,6010,4610,10843,3267,4494
2017,6499,2449,2889,6610,4496,11363,3922,5200
2018,5654,2270,2880,6440,4054,10053,3616,5493
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/net_migrations_auckland_by_age.csv | Year,5-9 years,10-14 years,15-19 years,30-34 years,35-39 years,40-44 years,45-49 years
1991,357,471,498,148,318,293,170
1992,361,483,511,251,477,396,174
1993,574,793,659,714,673,605,343
1994,974,1037,918,1234,1176,1037,568
1995,1384,1309,1132,2103,1586,1204,690
1996,941,873,811,1581,1213,971,603
1997,574,663,580,798,595,568,345
1998,404,379,277,188,343,343,241
1999,437,386,564,373,447,362,161
2000,210,287,859,50,-8,130,25
2001,537,713,1944,868,531,456,279
2002,989,1118,2514,1893,1351,1098,611
2003,852,784,1860,1615,1158,868,534
2004,439,524,1057,918,624,552,284
2005,436,436,880,734,476,359,238
2006,401,437,931,944,642,428,168
2007,356,335,987,807,411,346,126
2008,162,255,1102,632,295,125,112
2009,259,306,1301,661,343,241,142
2010,151,221,1234,483,141,118,68
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/police_recorded_crime.csv | "Year","Recorded offences"
1878,14157
1879,16374
1880,17837
1881,16635
1882,18613
1883,18775
1884,18263
1885,18955
1886,18135
1887,17752
1888,12897
1889,12945
1890,13115
1891,12674
1892,13153
1893,13165
1894,13530
1895,14010
1896,14673
1897,15219
1898,16378
1899,16865
1900,18358
1901,19909
1902,19771
1903,20736
1904,21066
1905,20249
1906,21160
1907,23204
1908,23510
1909,23930
1910,25106
1911,24999
1912,25981
1913,25415
1914,27563
1915,28412
1916,24920
1917,21724
1918,19067
1919,24278
1920,26106
1921,26551
1922,24699
1923,26119
1924,27025
1925,30470
1926,31615
1927,32144
1928,33138
1929,34250
1930,37214
1931,36680
1932,35368
1933,33302
1934,32286
1935,33168
1936,35448
1937,38629
1938,44308
1939,46378
1940,45009
1941,38559
1942,34608
1943,33192
1944,31960
1945,34000
1946,33744
1947,34628
1948,37046
1949,34381
1950,35383
1951,38689
1952,42580
1953,45950
1954,51526
1955,63550
1956,75583
1957,81998
1958,85153
1959,88071
1960,102792
1961,96384
1962,115921
1963,113942
1964,118422
1965,132311
1966,135374
1967,139737
1968,149103
1969,153914
1970,165859
1971,177924
1972,189283
1973,192079
1974,206115
1975,223362
1976,232376
1977,243619
1978,245640
1979,257922
1980,286789
1981,294015
1982,309843
1983,336155
1984,347453
1985,370844
1986,376558
1987,368712
1988,378122
1989,384928
1990,409747
1991,446417
1992,464596
1993,462536
1994,447525
1995,465052
1996,477596
1997,473547
1998,461677
1999,438074
2000,427230
2001,426526
2002,440129
2003,442489
2004,406363
2005,407496
2006,424137
2007,426384
2008,431383
2009,451405
2010,426345
2011,406056
2012,376013
2013,360411
2014,350389
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/hourly_earnings_by_industry.csv | Date,Forestry and Mining,Manufacturing,"Electricity, Gas, Water and Waste Services",Construction,Wholesale Trade,Retail Trade,Accommodation and Food Services,"Transport, Postal and Warehousing",Information Media and Telecommunications,Financial and Insurance Services,"Rental, Hiring and Real Estate Services","Professional, Scientific, Technical, Administrative and Support Services",Public Administration and Safety,Health Care and Social Assistance
1989Q1,13.65,12.11,13.65,11.38,13.44,9.5,9.71,12.35,17.14,13.83,12.61,14.79,15.19,13.68
1989Q2,13.77,12.09,14.12,11.54,13.6,9.48,9.74,12.65,17.35,14.31,12.7,14.93,15.36,13.72
1989Q3,13.77,12.13,14.32,11.72,13.77,9.56,9.85,12.84,17.56,14.52,12.92,15.19,15.66,13.68
1989Q4,14.03,12.06,14.44,11.85,14.01,9.64,9.74,13.06,17.83,14.95,13.46,15.81,16.04,13.82
1990Q1,14.14,12.74,14.68,11.95,14.54,10.04,10.15,13.37,18.29,15.25,13.85,16.27,16.17,14.24
1990Q2,14.09,12.92,14.94,12.08,14.7,9.99,10.34,13.82,18.65,15.57,13.55,15.95,16.26,14.35
1990Q3,14.3,12.82,15.11,12.4,14.94,10.2,10.41,14.33,19.06,15.86,13.73,16.17,16.43,14.27
1990Q4,14.67,12.84,15.22,12.47,15.1,10.21,10.38,14.36,19.23,16.09,13.84,16.31,16.85,14.57
1991Q1,14.54,13.21,15.22,12.6,15.2,10.48,10.61,14.33,19.29,16.11,13.88,16.35,16.86,15.05
1991Q2,14.89,13.41,15.39,12.54,15.28,10.35,10.59,14.48,19.55,16.25,13.92,16.41,17.07,14.7
1991Q3,15.34,13.22,15.57,12.7,15.53,10.55,10.66,14.67,19.66,16.48,13.97,16.49,17.24,14.92
1991Q4,15.82,13.34,15.54,12.7,15.59,10.56,10.69,14.82,19.91,16.64,14.09,16.65,17.31,14.94
1992Q1,15.36,13.58,15.63,12.79,15.63,10.59,10.67,14.92,19.88,17.03,14.11,16.69,17.31,15.47
1992Q2,15.52,13.68,15.63,12.71,15.78,10.55,10.64,14.99,20.1,17.2,14.2,16.78,17.46,15.1
1992Q3,15.61,13.39,15.62,12.7,15.83,10.62,10.64,14.95,19.95,17.48,14.24,16.83,17.57,15.15
1992Q4,15.5,13.46,15.51,12.65,16.05,10.51,10.6,14.91,20.01,17.54,14.49,17.11,17.67,15.3
1993Q1,14.91,13.69,15.4,12.66,15.89,10.64,10.64,14.79,19.85,17.58,14.25,16.84,17.47,15.38
1993Q2,14.79,13.71,15.54,12.65,16.12,10.65,10.75,14.87,20.01,17.86,14.48,17.11,17.6,15.27
1993Q3,14.73,13.47,15.49,12.65,16.13,10.66,10.69,14.82,19.83,17.88,14.48,17.12,17.68,15.23
1993Q4,14.94,13.56,15.79,12.72,16.26,10.6,10.73,14.91,19.99,18.04,14.53,17.18,17.59,15.24
1994Q1,15.15,13.72,15.97,12.91,16.14,10.75,10.75,14.96,19.95,18.15,14.63,17.28,17.62,15.53
1994Q2,14.76,13.91,16.15,12.99,16.4,10.79,10.8,15.13,20.26,18.64,14.73,17.41,17.72,15.38
1994Q3,14.79,13.83,16.39,13.06,16.57,10.88,10.88,14.92,20.2,18.56,14.9,17.59,17.94,15.01
1994Q4,15.34,13.8,16.63,13.11,16.35,10.82,10.75,15.01,20.26,18.9,14.9,17.63,17.93,15.76
1995Q1,15.76,14.06,16.62,13.27,16.43,10.9,10.91,15.06,20.23,18.76,14.84,17.52,17.95,15.83
1995Q2,15.86,14.19,16.91,13.35,16.75,10.96,11,15.01,20.42,19.65,15.12,17.86,18.07,15.75
1995Q3,15.82,14.18,17.01,13.5,16.74,11.12,11.06,15.13,20.46,19.93,15.15,17.91,18.24,15.95
1995Q4,15.69,14.31,17.09,13.54,16.84,11.11,10.96,15.26,20.67,20.24,15.28,18.07,18.36,16.25
1996Q1,16.18,14.61,17.41,13.76,17.03,11.23,11.16,15.39,20.69,20.22,15.37,18.14,18.4,16.32
1996Q2,16.65,14.71,17.48,13.89,17.23,11.37,11.23,15.32,20.96,20.62,15.78,18.63,18.48,16.37
1996Q3,16.62,14.69,17.72,14.06,17.31,11.44,11.24,15.42,20.97,20.89,15.9,18.78,18.71,16.49
1996Q4,17.06,14.82,18,14.13,17.61,11.34,11.55,15.95,21.55,20.98,16.12,19.04,18.82,16.69
1997Q1,16.84,15.1,18.28,14.28,17.88,11.54,11.54,16.05,21.64,21,16.18,19.08,19.22,16.95
1997Q2,16.98,15.26,18.57,14.27,17.96,11.64,11.58,16,21.9,21.69,16.41,19.37,19.17,17.01
1997Q3,17.34,15.21,19.17,14.44,18.11,11.77,11.66,16.11,21.94,21.88,16.62,19.62,19.11,17.26
1997Q4,17.63,15.37,19.21,14.57,18.29,11.82,11.68,16.23,22.1,21.77,16.59,19.6,19.2,17.3
1998Q1,18.32,15.52,19.2,14.65,18.25,11.96,11.88,16.19,22.03,21.73,16.51,19.46,19.36,17.41
1998Q2,18.26,15.72,19.18,14.9,18.46,12.1,11.8,16.3,22.4,22.04,16.71,19.76,19.6,17.43
1998Q3,18.26,15.73,19.4,15.02,18.6,12.1,11.79,16.43,22.58,22.64,16.81,19.88,19.6,17.47
1998Q4,18.65,15.88,19.89,15.19,18.82,11.97,11.86,16.46,22.68,22.96,17.15,20.25,20.12,17.72
1999Q1,18.6,16.07,19.8,15.11,19.01,12,11.76,16.31,22.88,22.85,17.21,20.33,20.39,17.79
1999Q2,18.84,16.16,20.33,15.33,18.89,12.07,11.83,16.19,22.84,23.16,17.5,20.65,20.34,17.55
1999Q3,18.22,16.16,19.69,15.67,19.25,12.3,11.95,16.73,23.1,23.14,17.73,20.92,20.76,17.88
1999Q4,17.55,16.21,21.19,15.75,19.02,12.1,11.66,16.62,23.38,23.08,17.28,20.37,20.93,17.87
2000Q1,17.03,16.56,21.24,15.67,19.25,12.25,11.59,16.51,23.26,23.45,17.31,20.38,20.51,17.98
2000Q2,17.37,16.62,21.72,16.07,19.13,12.22,11.95,16.11,23.21,23.84,17.55,20.66,20.78,17.72
2000Q3,18.5,16.6,21.85,16.24,19.31,12.31,12.02,16.68,23.56,24.16,17.82,20.94,21.29,17.83
2000Q4,17.89,16.78,21.74,16.35,19.06,12.39,12.07,16.8,23.67,24.53,17.74,20.88,21.53,17.94
2001Q1,17.75,17.05,21.88,16.48,19.35,12.54,12.26,16.91,23.91,24.75,17.99,21.19,21.51,18.25
2001Q2,18.19,17.04,22.2,16.26,19.88,12.7,12.27,16.78,23.96,24.85,18.39,21.59,21.77,18.24
2001Q3,18.2,17.23,23.05,16.51,20.1,12.78,12.24,17.06,24.19,24.71,18.54,21.79,22.09,18.72
2001Q4,18.82,17.38,23.28,16.62,20.21,12.79,12.31,16.95,24.32,25.13,18.62,21.91,22.1,18.56
2002Q1,19.49,17.65,23.62,16.42,20.52,12.95,12.35,17.64,24.65,25.33,18.73,22.06,21.95,18.85
2002Q2,19.51,17.82,23.12,16.53,20.07,13.01,12.39,17.16,24.72,25.32,18.46,21.7,22.27,18.78
2002Q3,19.69,17.7,24.22,17.05,20.59,13.31,12.47,17.76,25.57,25.34,18.77,22.13,22.93,19.02
2002Q4,19.66,18.04,23.98,17.26,20.34,13.32,12.44,17.68,25.69,26.25,19.35,22.77,22.73,19.28
2003Q1,20.06,18.02,24.05,16.93,20.49,13.36,12.53,17.26,25.12,26.88,19.28,22.67,22.61,19.38
2003Q2,20.09,18.29,24.14,17.21,20.9,13.53,12.83,17.37,25.44,27.18,19.76,23.2,22.64,19.37
2003Q3,20.19,18.46,24.1,17.12,21,13.44,12.98,17.8,25.63,27.42,19.87,23.39,23.22,19.64
2003Q4,20.38,18.46,24.34,17.46,21.44,13.73,13.03,17.85,25.93,27.83,19.8,23.4,23.9,20.28
2004Q1,21.1,18.66,23.59,17.44,21.19,13.77,12.97,17.9,25.85,28.01,19.48,22.98,23.55,20.03
2004Q2,20.83,18.91,24.39,17.94,21.31,13.95,13.08,18.17,26.66,29.22,20.13,23.71,24.2,20.31
2004Q3,21.21,18.76,24.64,17.79,21.63,14.15,13.08,18.63,26.99,28.11,20.23,23.92,24.39,20.67
2004Q4,21.63,18.98,24.69,17.86,21.64,14.12,12.96,18.83,27.1,28.51,19.59,23.21,24.34,20.78
2005Q1,22.03,19.13,25.53,18.13,22,14.11,13.23,18.9,27.11,29.18,20.06,23.74,24.83,21.09
2005Q2,22.03,19.34,25.04,18.23,22.3,14.16,13.58,19.83,27.68,29.55,20.62,24.47,24.71,21.53
2005Q3,22.27,19.64,25.91,18.77,22.77,14.69,13.43,19.09,28.05,29.55,21.25,25.16,25.27,21.81
2005Q4,22.86,19.83,26.41,18.91,23.19,14.74,13.36,19.61,28.3,29.48,21.31,25.25,25.61,22.14
2006Q1,23.7,20.08,26.96,18.88,23.14,14.75,13.44,20.49,28.35,30.09,21.36,25.41,25.68,22.57
2006Q2,25.53,20.39,26.46,19.14,23.58,14.91,13.83,19.81,28.81,30.76,21.74,25.78,25.96,22.4
2006Q3,24.61,20.57,26.58,19.66,23.92,15.11,13.92,20.26,29.26,31.46,22.25,26.48,26.17,23.18
2006Q4,24.7,20.64,26.92,20,24.06,15.23,14.12,20.77,29.44,31.87,22.11,26.33,26.67,23.26
2007Q1,24.73,21.04,26.97,20.44,23.97,15.4,14.19,20.86,29.45,32.11,22.05,26.26,26.67,23.76
2007Q2,24.24,21.22,26.94,20.55,24.28,15.42,14.56,20.63,29.76,32.73,22.42,26.68,27.18,23.95
2007Q3,24.13,21.26,27.44,20.99,24.78,15.73,14.46,21.29,30.01,32,22.55,26.88,27.72,24.22
2007Q4,24.59,21.69,28.04,21.1,24.76,15.97,14.69,21.82,30.58,32.53,22.44,26.78,28.55,24.68
2008Q1,25.07,22.01,28.63,21.44,25.24,15.92,14.81,22.35,30.9,32.07,22.59,26.99,28.82,25.22
2008Q2,25.43,22.4,28.38,22.05,25.39,16.37,15.24,22.47,31.47,33.44,23.18,27.68,28.82,25.47
2008Q3,26.49,22.21,28.61,22.27,25.81,16.64,15.15,23.3,31.98,34.06,23.57,28.25,29.14,25.96
2008Q4,26.64,22.37,29.03,22.37,26.41,16.68,15.49,22.92,32.09,34.92,23.69,28.39,29.21,26.37
2009Q1,27.85,22.54,30.06,22.68,26.49,16.76,15.49,23.51,32.47,35.22,24.08,28.92,29.91,26.53
2009Q2,28.32,23.02,29.98,22.42,26.84,17.02,15.54,23.3,32.68,35.66,23.97,28.85,30.04,26.78
2009Q3,26.82,23.2,30.2,22.73,26.9,16.87,15.76,24.26,32.4,36.07,24.83,29.14,30.46,26.55
2009Q4,27.56,23.05,30.07,22.93,26.29,16.84,16.39,23.86,33.1,35.92,24.48,29.14,30.81,27.05
2010Q1,26.18,23.49,30.43,22.96,26.03,16.72,16.03,23.72,32.98,35.66,23.87,28.98,30.57,27.11
2010Q2,25.74,23.68,30.23,23.08,26.44,17.05,16.34,23.47,32.34,35.62,24.55,29.14,31.13,26.79
2010Q3,25.68,23.79,30.21,23.31,26.45,17.18,16.2,24.1,32.47,35.37,25.16,29.72,31.48,27.04
2010Q4,25.9,24.04,30.74,23.22,26.68,17.37,16.33,24.09,33.07,35.61,24.98,30.01,31.44,27.27
2011Q1,26.22,24.3,31.39,23.46,26.44,17.53,16.65,23.72,33.52,35.46,25.85,29.88,31.41,27.75
2011Q2,26.67,24.51,31.63,23.73,26.9,17.66,16.4,23.78,32.88,36.63,25.7,29.4,32,27.92
2011Q3,27.79,24.81,31.13,23.93,27.01,17.82,16.32,24.3,33.19,37.57,26.88,30.47,32.1,27.62
2011Q4,27.27,24.88,30.89,24.06,26.83,17.9,16.43,24.46,34.28,37.64,26.38,30.68,32.43,28.06
2012Q1,27.1,25.29,34.01,23.75,26.76,18.04,16.77,25.1,34.64,38.36,26.57,31.42,32.48,28.29
2012Q2,28.69,25.27,34.58,24.42,27.01,18.27,16.95,24.92,33.61,38.41,27.76,30.34,32.57,28.59
2012Q3,27.82,25.74,32.81,24.35,27.67,18.35,16.97,25.06,34.78,39.48,29.21,31.7,32.66,28.11
2012Q4,29.96,25.61,33.1,24.34,27.82,18.38,16.99,25.41,34.92,39.01,30.26,31.26,33.29,28.29
2013Q1,30.13,26.12,33.45,24.6,27.78,18.43,17.24,25.99,35.74,39.24,27.26,31.68,32.79,28.21
2013Q2,29.34,25.97,33.54,25.06,28.02,18.67,17.16,26.21,35.71,39.41,28.05,30.94,33.33,28.36
2013Q3,29.21,26.22,36.22,25.72,27.94,18.7,17.2,26.76,36.64,39.63,28.32,32.19,33.89,28.41
2013Q4,30.16,26.29,35.35,25.54,28.17,18.97,17.41,26.5,37.01,39.86,28.69,31.51,34.46,28.94
2014Q1,29.29,26.6,35.07,26.02,29.01,19,17.16,26.57,38.91,40.75,28.84,31.03,34.1,29.29
2014Q2,30.55,26.93,35.65,26.2,28.63,19.11,17.55,26.85,37.1,41.87,29.5,31.03,33.78,28.96
2014Q3,31.24,27.32,36.23,26.01,28.75,19.27,17.92,27.15,38.44,42.11,29.84,32.31,34.14,29.2
2014Q4,31.39,26.93,37.14,26.48,29.1,19.7,18,27.21,38.74,42.77,29.81,32.55,34.88,29.67
2015Q1,32.25,27.19,37.48,26.58,28.9,19.7,17.9,27.79,37.93,42.07,29.68,32.53,34.79,29.7
2015Q2,31.9,27.71,37.41,26.65,29.31,19.97,18.24,27.84,38.41,42.51,28.83,32.39,35,29.7
2015Q3,32.49,28.03,37.7,26.98,29.86,19.95,18.41,27.73,38.73,41.62,29.74,33.12,35.34,29.66
2015Q4,32.89,28.05,37.84,26.87,29.62,20,18.59,28.14,39.52,42.01,29.77,33.57,35.46,30.12
2016Q1,32.71,28.18,38.21,26.93,29.51,20.23,18.86,27.85,38.76,42.47,29.52,33.54,35.64,30.22
2016Q2,32.23,28.13,38.34,27.5,29.94,20.43,19.13,28.05,39.13,42.74,30.07,33.74,35.88,29.91
2016Q3,31.96,28.51,38.46,27.5,30.27,20.46,19.18,28.24,39.31,42.3,30.04,33.72,36.5,30.1
2016Q4,34.58,28.53,38.19,27.37,30.4,20.72,18.99,28.56,40.07,41.9,30.04,33.38,36.9,29.83
2017Q1,30.8,29.08,38.27,27.61,30.75,21.01,18.83,27.71,39.64,42.27,29.86,33.43,37.07,30.52
2017Q2,33.77,28.28,37.89,28.01,31.15,21.12,19.41,27.77,40.75,42.42,30.34,33.64,37.05,30.6
2017Q3,32.45,28.92,38.46,28.43,31.23,21.24,19.24,27.89,40.74,42.08,31.64,34.49,37.26,31.18
2017Q4,33.29,29.02,39.63,28.47,31.73,21.37,19.41,28.49,40.49,42.38,32.5,34.64,37.84,31.45
2018Q1,33.66,29.3,40.01,28.92,32.5,21.7,19.49,28.9,40.18,44.42,32.63,34.39,37.85,32.12
2018Q2,33.51,29.11,38.95,28.98,31.77,22.02,20,28.51,39.74,45.06,33.2,34.16,37.85,32
2018Q3,33.21,29.74,39.41,29.44,32.28,22.19,20.26,29.04,40.02,43.99,32.36,35.2,37.91,32.29
2018Q4,32.9,29.77,39.77,29.69,32.73,22.34,20.4,29.43,40.05,45.06,32.33,36.12,38.32,32.7
2019Q1,33.7,30.41,40.86,29.28,33.24,22.69,20.59,29.7,40.17,45.84,32.11,36.67,38.44,33.23
2019Q2,32.93,30.39,41.71,29.66,33.3,22.82,21.5,30.12,40.64,46.8,32.63,36.62,39.22,33.59
2019Q3,34.39,30.8,41.01,30.17,33.85,23.2,21.45,29.98,40.85,45.63,33.22,36.66,39.86,33.85
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/guest_nights_by_region.csv | ,Northland,Auckland,Waikato,Bay of Plenty,"Hawke's Bay, Gisborne","Taranaki, Manawatu, Wanganui",Wellington,"Nelson, Marlborough, Tasman",Canterbury,West Coast,Otago,Southland
1996M07,66,257,124,159,49,93,111,52,209,27,175,21
1996M08,58,266,111,138,45,89,105,54,210,28,211,23
1996M09,79,264,140,174,62,115,113,79,256,45,214,31
1996M10,96,280,147,177,61,111,119,79,256,57,194,41
1996M11,104,334,159,199,66,112,132,100,324,64,212,54
1996M12,185,337,219,257,87,111,123,171,364,70,262,66
1997M01,279,386,343,419,145,151,142,279,440,96,360,76
1997M02,158,362,205,238,91,122,144,165,363,83,286,78
1997M03,147,377,225,251,90,129,145,150,348,81,282,73
1997M04,108,302,151,198,73,104,119,105,278,57,213,45
1997M05,73,257,112,142,50,87,104,68,192,32,138,29
1997M06,55,224,102,119,45,71,92,56,159,23,112,19
1997M07,70,259,137,164,50,101,102,63,213,27,177,24
1997M08,62,266,113,134,48,100,95,63,202,26,215,22
1997M09,70,239,121,149,52,106,103,67,211,38,182,27
1997M10,102,292,172,210,74,132,127,87,288,55,199,46
1997M11,105,303,152,188,64,108,125,101,300,60,208,54
1997M12,218,301,249,258,91,104,115,179,358,71,244,67
1998M01,380,340,381,429,155,147,137,303,447,92,360,85
1998M02,158,349,219,252,92,115,142,178,367,83,259,77
1998M03,134,339,193,205,82,112,147,147,327,73,246,62
1998M04,130,291,193,216,83,113,135,123,301,64,246,54
1998M05,75,255,129,148,54,85,119,74,189,36,136,34
1998M06,53,224,108,118,39,69,98,53,145,23,104,19
1998M07,60,267,133,170,51,83,137,66,206,30,192,22
1998M08,58,251,115,140,46,81,115,57,201,30,223,24
1998M09,66,253,139,160,56,94,123,72,211,42,208,31
1998M10,99,307,170,218,77,113,146,95,292,62,206,44
1998M11,107,330,154,191,65,97,136,109,303,70,227,57
1998M12,187,312,250,270,90,99,134,189,344,86,271,60
1999M01,280,367,381,452,149,138,163,328,454,116,432,87
1999M02,145,360,205,234,88,111,146,182,358,97,294,82
1999M03,129,379,211,208,84,115,157,158,333,85,273,79
1999M04,119,309,187,255,81,115,153,129,317,71,244,58
1999M05,66,266,117,150,46,76,120,69,195,35,141,32
1999M06,62,253,136,156,51,84,127,67,190,34,126,25
1999M07,61,302,140,176,54,88,134,62,216,33,225,25
1999M08,59,281,119,146,50,98,114,62,219,29,249,24
1999M09,81,326,158,195,62,130,142,85,279,50,256,32
1999M10,99,335,178,205,70,103,138,94,302,58,216,44
1999M11,110,396,170,209,72,96,144,120,338,76,254,60
1999M12,176,368,251,282,94,102,144,181,340,84,282,60
2000M01,283,459,405,469,157,151,173,316,430,118,404,82
2000M02,149,436,241,251,95,114,159,192,383,109,333,86
2000M03,138,393,220,230,89,116,180,166,362,98,292,74
2000M04,136,355,223,246,92,112,162,145,357,89,302,65
2000M05,67,291,124,146,48,81,111,75,216,39,163,35
2000M06,56,281,134,145,49,80,110,67,194,32,141,26
2000M07,60,317,157,183,58,106,131,77,248,43,257,28
2000M08,58,288,135,156,50,108,138,72,227,38,286,28
2000M09,74,284,167,174,63,119,137,86,254,49,261,37
2000M10,102,348,192,196,77,111,146,107,314,67,240,47
2000M11,119,400,188,201,75,106,170,124,361,89,283,69
2000M12,190,401,291,296,107,113,163,204,408,101,348,74
2001M01,289,483,403,499,170,152,185,342,501,138,486,93
2001M02,152,409,235,259,108,118,177,210,412,117,360,89
2001M03,144,433,240,259,104,116,187,180,413,115,338,87
2001M04,124,354,203,249,95,114,158,143,358,87,296,63
2001M05,68,317,126,153,53,84,127,79,231,51,188,42
2001M06,59,298,140,162,54,86,130,71,210,40,155,27
2001M07,65,340,160,197,65,112,146,82,266,46,291,32
2001M08,63,339,142,159,53,116,132,80,278,43,322,31
2001M09,76,335,163,184,70,126,152,96,290,57,276,39
2001M10,101,380,196,214,80,118,153,113,332,78,257,54
2001M11,121,411,195,203,81,109,174,135,367,91,272,69
2001M12,210,437,280,306,118,120,174,216,427,104,361,78
2002M01,314,521,400,490,187,159,192,329,516,133,501,98
2002M02,167,462,251,270,115,127,194,221,443,115,370,95
2002M03,179,507,274,301,125,153,206,208,511,121,385,98
2002M04,124,402,209,227,87,104,161,141,350,87,302,75
2002M05,81,364,149,174,61,91,138,83,251,51,194,45
2002M06,64,333,150,166,59,91,134,74,222,41,172,32
2002M07,69,370,175,201,68,119,158,77,297,51,305,36
2002M08,68,369,154,168,61,124,137,77,277,43,317,34
2002M09,80,367,168,187,75,133,147,99,310,58,264,42
2002M10,104,437,199,213,82,135,170,112,367,79,251,57
2002M11,120,505,210,222,81,115,181,140,408,101,299,80
2002M12,204,479,287,310,120,120,168,230,447,113,363,85
2003M01,318,543,393,462,188,165,191,340,547,147,486,104
2003M02,171,503,279,269,116,136,204,227,482,131,390,105
2003M03,155,510,267,245,111,149,200,201,462,131,369,99
2003M04,148,411,252,260,111,136,176,169,379,109,322,78
2003M05,85,356,158,167,67,101,156,93,248,57,190,45
2003M06,64,307,142,158,59,83,142,76,214,42,159,31
2003M07,75,370,181,210,75,131,154,81,286,53,320,39
2003M08,70,366,157,169,65,126,142,82,269,48,301,36
2003M09,90,367,177,205,76,140,165,93,315,63,287,47
2003M10,110,406,195,227,89,133,172,114,375,82,260,60
2003M11,123,472,214,227,89,113,189,139,413,106,309,83
2003M12,214,486,285,309,142,131,182,229,482,130,397,98
2004M01,322,556,406,502,223,181,208,331,579,165,514,111
2004M02,177,492,273,281,135,144,214,219,513,149,399,112
2004M03,159,515,266,265,126,146,212,197,504,141,368,103
2004M04,142,448,242,268,110,132,182,156,433,117,341,84
2004M05,84,382,148,176,69,98,150,88,290,60,204,43
2004M06,77,350,165,177,65,102,145,77,256,46,203,32
2004M07,85,387,187,215,73,158,171,84,319,53,321,37
2004M08,71,371,159,185,65,136,145,78,293,51,324,37
2004M09,90,380,186,216,78,154,167,98,355,77,317,47
2004M10,109,434,205,230,86,141,169,107,383,89,274,59
2004M11,130,467,219,240,92,133,193,150,459,118,326,87
2004M12,218,479,297,319,141,138,194,228,498,130,386,99
2005M01,329,569,415,535,228,186,218,358,635,167,518,121
2005M02,172,489,269,288,128,157,220,223,522,152,414,117
2005M03,187,531,301,312,134,166,236,217,557,156,421,117
2005M04,137,449,248,256,103,134,203,145,425,108,323,80
2005M05,83,369,150,181,67,100,158,86,274,63,218,42
2005M06,73,351,163,183,67,115,166,80,302,55,221,35
2005M07,84,415,193,222,73,147,191,82,310,57,337,36
2005M08,73,361,157,173,66,147,158,80,282,50,336,38
2005M09,87,383,185,197,79,151,174,91,325,66,327,44
2005M10,111,412,209,230,102,145,187,116,387,94,283,58
2005M11,127,463,212,237,90,129,205,148,429,117,338,83
2005M12,195,457,280,313,141,142,195,217,469,126,400,88
2006M01,312,526,395,488,225,194,228,377,616,169,550,119
2006M02,184,483,274,301,136,150,234,241,524,152,434,115
2006M03,170,497,277,274,133,152,242,203,508,144,404,105
2006M04,149,430,244,265,113,127,209,157,443,118,372,83
2006M05,82,365,148,177,65,102,170,86,289,63,229,44
2006M06,72,346,162,172,67,103,158,75,249,51,202,31
2006M07,84,356,189,204,72,154,188,81,306,58,332,35
2006M08,73,373,158,189,74,150,168,78,281,54,348,36
2006M09,91,393,188,208,77,169,190,92,341,69,312,43
2006M10,126,455,203,240,102,140,206,121,395,96,294,62
2006M11,135,488,240,242,98,123,228,153,445,118,343,85
2006M12,205,477,282,340,153,139,208,227,496,132,412,91
2007M01,357,557,402,506,231,189,246,364,605,173,555,115
2007M02,199,515,295,313,134,172,260,246,544,169,455,118
2007M03,179,564,294,304,138,163,264,213,517,147,443,115
2007M04,141,458,246,281,117,133,215,167,447,115,378,81
2007M05,89,377,162,193,75,102,180,99,293,68,238,48
2007M06,75,373,161,178,72,104,171,85,272,55,213,35
2007M07,78,413,185,210,77,146,184,86,336,64,373,36
2007M08,75,414,171,174,74,150,177,89,321,59,371,37
2007M09,92,426,195,226,81,156,202,94,352,74,322,45
2007M10,114,451,198,239,103,131,204,117,406,91,297,59
2007M11,126,537,210,252,96,133,228,152,472,116,346,84
2007M12,210,480,262,349,151,143,219,232,510,132,407,91
2008M01,349,588,400,529,216,196,253,378,649,173,556,112
2008M02,197,557,281,336,145,163,277,265,562,168,466,122
2008M03,190,564,289,319,152,177,295,244,593,163,471,116
2008M04,132,472,221,256,104,130,233,157,432,114,352,80
2008M05,89,416,165,193,75,107,202,100,315,68,236,50
2008M06,67,372,140,164,58,97,174,84,270,48,194,31
2008M07,69,396,165,203,71,149,194,83,347,59,366,37
2008M08,58,415,161,172,61,149,172,80,316,57,353,35
2008M09,83,413,176,189,78,151,204,93,349,69,298,45
2008M10,112,477,215,238,105,152,237,117,416,97,288,59
2008M11,122,506,210,220,100,133,235,139,458,114,319,83
2008M12,192,486,278,325,148,137,212,226,491,126,391,92
2009M01,313,555,394,510,212,204,240,362,616,170,561,115
2009M02,182,515,270,300,141,155,250,250,530,150,420,113
2009M03,165,534,269,270,141,155,270,207,531,138,414,107
2009M04,151,443,257,277,124,143,238,165,453,113,357,80
2009M05,91,382,161,191,78,111,206,103,318,75,239,49
2009M06,61,333,141,145,56,98,169,73,252,50,211,29
2009M07,71,421,175,195,72,159,193,82,351,61,396,36
2009M08,65,383,156,162,65,146,174,81,327,56,374,37
2009M09,85,413,181,199,79,156,211,91,360,73,317,47
2009M10,116,482,211,233,99,139,236,121,426,97,300,65
2009M11,126,488,211,223,98,120,232,146,460,116,342,89
2009M12,208,491,281,348,136,148,218,235,516,135,430,96
2010M01,332,568,435,522,219,193,263,393,647,170,579,118
2010M02,183,524,273,303,135,146,267,244,536,153,441,120
2010M03,165,539,270,290,135,159,267,215,542,142,441,110
2010M04,146,468,237,276,121,139,222,163,469,112,386,83
2010M05,87,404,148,183,66,98,184,86,291,62,227,44
2010M06,66,356,147,180,61,101,170,70,265,47,234,33
2010M07,72,416,172,218,70,144,190,77,350,59,408,39
2010M08,64,427,143,172,62,129,161,78,324,50,382,33
2010M09,78,438,165,206,73,137,205,90,361,64,324,42
2010M10,120,491,215,238,96,138,226,114,408,88,292,56
2010M11,118,545,219,242,95,130,224,145,465,109,330,80
2010M12,202,515,277,321,132,137,226,228,498,120,420,89
2011M01,327,611,418,485,204,187,249,381,641,163,558,111
2011M02,172,556,273,295,138,152,265,238,461,152,465,109
2011M03,160,582,269,280,123,165,279,204,377,131,427,98
2011M04,150,508,253,272,104,141,232,161,337,104,379,74
2011M05,85,459,146,188,67,98,197,93,244,56,221,44
2011M06,67,401,154,181,62,94,187,80,219,49,210,32
2011M07,82,485,181,231,63,138,220,90,280,54,399,36
2011M08,70,492,163,214,71,134,217,93,269,47,393,33
2011M09,85,488,183,207,86,134,221,91,265,59,323,48
2011M10,108,557,221,230,99,142,238,128,311,78,285,48
2011M11,123,536,227,246,98,127,243,149,372,107,358,77
2011M12,196,578,290,321,133,143,220,220,427,123,470,87
2012M01,321,612,393,437,186,197,254,354,519,161,608,109
2012M02,180,582,287,280,136,166,271,231,402,140,436,97
2012M03,142,620,274,261,123,175,266,195,392,123,414,86
2012M04,126,537,240,263,90,137,209,151,343,103,391,69
2012M05,80,458,160,172,66,99,188,92,242,57,237,42
2012M06,67,453,167,176,61,98,176,80,225,44,230,32
2012M07,71,448,169,191,60,136,189,84,271,57,398,35
2012M08,60,476,146,169,54,118,172,76,262,46,378,35
2012M09,79,470,173,193,68,131,213,91,284,59,311,39
2012M10,112,574,213,235,97,133,236,123,348,86,317,53
2012M11,117,595,219,224,98,125,247,138,366,100,330,70
2012M12,194,602,290,311,133,141,247,221,435,130,492,82
2013M01,315,636,429,442,180,183,268,347,528,135,577,105
2013M02,168,604,281,310,128,148,257,234,420,142,462,104
2013M03,170,655,315,312,138,179,275,216,436,130,476,96
2013M04,119,546,222,247,95,122,223,142,353,97,388,69
2013M05,83,497,158,192,70,101,216,103,275,56,250,42
2013M06,69,442,151,180,58,96,195,82,246,43,262,30
2013M07,70,489,186,212,68,152,212,85,290,53,427,34
2013M08,64,490,159,188,57,131,189,85,273,48,420,33
2013M09,77,522,185,215,72,139,195,92,298,57,323,40
2013M10,108,571,223,242,96,138,239,116,370,76,330,55
2013M11,116,616,227,254,96,142,243,151,405,107,366,81
2013M12,206,601,312,341,133,157,241,224,446,136,491,97
2014M01,345,669,436,495,185,210,256,364,562,159,603,115
2014M02,177,652,305,313,126,160,264,259,478,153,524,103
2014M03,149,639,289,283,125,166,265,215,459,133,476,105
2014M04,155,566,269,297,109,135,242,167,410,115,470,82
2014M05,97,538,182,210,71,109,212,107,303,59,279,48
2014M06,69,473,160,168,57,90,182,78,246,42,277,31
2014M07,75,505,188,221,69,138,208,88,315,57,469,40
2014M08,70,525,167,188,60,124,200,77,290,51,420,38
2014M09,85,557,191,232,75,141,224,91,300,70,339,48
2014M10,120,595,242,272,93,143,254,121,385,93,369,67
2014M11,133,641,239,265,102,136,253,157,416,118,420,91
2014M12,230,632,326,354,137,169,248,233,498,151,538,112
2015M01,363,689,467,488,185,207,276,384,583,178,628,118
2015M02,200,660,323,325,132,169,274,271,510,170,554,116
2015M03,164,676,311,309,141,170,274,221,505,151,512,119
2015M04,148,599,280,303,123,141,265,163,428,117,467,90
2015M05,91,536,190,229,79,118,231,105,317,69,321,52
2015M06,66,470,160,179,64,107,187,80,258,44,286,34
2015M07,75,518,207,238,70,148,227,93,329,58,460,43
2015M08,66,537,177,195,60,141,204,91,301,52,434,42
2015M09,95,562,209,239,72,144,222,99,332,74,378,51
2015M10,124,588,251,281,98,147,263,130,398,95,391,69
2015M11,132,633,256,290,97,143,271,162,436,124,466,97
2015M12,231,650,352,381,145,171,272,252,546,149,588,113
2016M01,352,715,469,514,200,216,300,390,649,188,710,141
2016M02,201,679,345,354,147,190,303,289,544,183,595,136
2016M03,213,689,351,363,150,193,297,257,549,172,606,148
2016M04,144,634,273,313,127,149,265,173,428,126,503,106
2016M05,89,560,177,226,83,105,216,112,300,67,337,58
2016M06,78,509,192,225,74,108,209,90,266,51,319,40
2016M07,92,561,217,256,86,145,236,107,345,60,485,45
2016M08,83,567,198,225,73,143,218,101,302,54,443,42
2016M09,100,575,229,264,86,151,239,113,345,76,402,55
2016M10,131,641,275,294,108,146,270,138,419,102,409,71
2016M11,148,642,288,307,118,146,259,178,442,133,494,110
2016M12,241,662,383,410,161,188,275,265,509,164,603,122
2017M01,364,695,497,541,220,222,297,410,598,195,715,146
2017M02,222,663,357,360,162,190,286,279,525,173,592,139
2017M03,188,694,341,356,154,191,304,251,499,167,583,138
2017M04,168,645,309,346,142,162,265,192,443,131,545,114
2017M05,107,578,193,246,96,120,229,120,326,71,353,64
2017M06,83,519,202,243,85,122,230,103,289,50,340,44
2017M07,91,561,215,270,89,153,238,106,354,61,489,46
2017M08,83,537,187,226,79,138,204,114,316,62,449,45
2017M09,96,552,220,268,94,160,237,121,361,80,423,57
2017M10,146,601,283,321,122,168,271,154,451,107,439,85
2017M11,151,658,300,324,124,158,274,187,475,137,501,117
2017M12,253,661,396,424,168,188,279,264,572,165,632,138
2018M01,369,706,501,550,211,223,302,379,661,189,723,155
2018M02,213,663,356,365,141,181,290,274,590,174,630,153
2018M03,213,705,371,400,148,205,315,265,596,175,630,154
2018M04,154,598,287,332,126,159,272,184,507,133,558,121
2018M05,109,558,198,252,95,127,231,117,345,72,369,67
2018M06,82,489,194,238,90,123,220,91,301,56,338,47
2018M07,91,548,223,251,89,169,235,101,375,61,483,52
2018M08,87,569,203,223,85,149,217,93,345,56,451,47
2018M09,97,585,228,264,98,160,253,114,371,74,439,64
2018M10,143,644,287,320,121,177,276,166,467,106,473,87
2018M11,155,693,292,322,122,170,278,180,508,140,552,123
2018M12,254,669,403,409,156,194,287,271,600,163,650,144
2019M01,366,726,530,519,213,226,306,376,669,176,708,155
2019M02,207,664,352,355,145,191,296,278,592,171,610,153
2019M03,192,699,367,375,159,202,321,234,570,150,594,145
2019M04,180,616,319,372,141,175,287,198,525,117,556,119
2019M05,109,570,206,256,102,125,243,116,345,70,356,66
2019M06,82,508,200,243,91,122,228,96,301,52,330,46
2019M07,91,559,222,270,97,163,256,107,354,55,472,53
2019M08,91,589,204,231,91,151,242,102,348,53,465,51
2019M09,97,619,233,260,105,172,239,116,369,65,430,64
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/exog_hourly_earnings_by_industry_missing_exog.csv | ,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27
0,0.0,-0.0,0.0,-0.0,0.0,0.0,0.0,-0.0,0.0,-0.0,0.0,-0.0,0.0,0.0,0.0,0.0,0.0,-0.0,0.0,-0.0,0.0,0.0,0.0,0.0,0.0,-0.0,0.0,0.0
1,0.09973465873441716,-0.008193039837578154,0.14935054265784684,-0.008194815669663652,0.19847280172665502,0.00819052427043743,0.24715922057524256,-0.008089077522064248,0.2952268271106974,-0.008194360406357605,0.3432034574627649,-0.00819461596484854,0.3890550169554975,0.008194386472961343,0.4358505061494723,0.008185445103196303,0.4789512779113474,-0.00819276885829072,0.5221701726051636,-0.008183693061075874,0.5641280675148789,0.008194542812409741,0.6054198916439166,0.008169544057826118,0.6435763695987253,-0.008194726758847215,0.6809594564678602,0.00816459311277819
2,0.19847280172665502,-0.016386079675156307,0.29534699410826415,-0.016389631339327304,0.3890331191656121,0.01638104854087486,0.4789512779113474,-0.016178155044128496,0.564081921015252,-0.01638872081271521,0.644791925790795,-0.01638923192969708,0.7166868012776797,0.016388772945922687,0.7849206506424798,0.016370890206392607,0.8406385789802162,-0.01638553771658144,0.8903257536678935,-0.016367386122151748,0.9311899709804021,0.016389085624819482,0.9639299341140939,0.016339088115652236,0.984468716416999,-0.01638945351769443,0.996500908903801,0.01632918622555638
3,0.2952278700904258,-0.024579119512734462,0.43471058880134983,-0.02458444700899096,0.5640839138084358,0.02457157281131229,0.6809644645497407,-0.02426723256619274,0.782549256893179,-0.024583081219072815,0.8681964236416051,-0.02458384789454562,0.931169498191638,0.02458315941888403,0.9777085447461252,0.024556335309588912,0.9965082376193338,-0.02457830657487216,0.9958789026408166,-0.02455107918322762,0.9729604270787925,0.024583628437229218,0.9293181153254614,0.024508632173478356,0.862350043156335,-0.024584180276541644,0.777297789267253,0.024493779338334572
4,0.3890331191656121,-0.032772159350312614,0.5643115208503803,-0.03277926267865461,0.716646462877443,0.03276209708174972,0.8406385789802162,-0.03235631008825699,0.9311137982811112,-0.03277744162543042,0.9863281337304184,-0.03277846385939416,0.998641005901436,0.032777545891845374,0.9758290014817222,0.032741780412785214,0.9083979252494543,-0.03277107543316288,0.8076966173381208,-0.032734772244303496,0.6748478137513148,0.032778171249638964,0.5157002565033916,0.03267817623130447,0.3346546700081299,-0.03277890703538886,0.14097937151035483,0.03265837245111276
5,0.4789512779113474,-0.040965199187890766,0.6812392327245661,-0.04097407834831826,0.8406385789802162,0.04095262135218715,0.9480458581364921,-0.04044538761032124,0.9965047171593899,-0.04097180203178803,0.9848630458384661,-0.040973079824242704,0.9084490569035975,0.04097193236480672,0.7796562488057206,0.04092722551598152,0.597880119293297,-0.0409638442914536,0.38128344268970554,-0.04091846530537937,0.14099144358699423,0.0409727140620487,-0.10823687710557638,0.04084772028913059,-0.3504340235683619,-0.04097363379423608,-0.5709917155173455,0.04082296556389095
6,0.5640839138084358,-0.049158239025468925,0.7828677800964029,-0.04916889401798192,0.9311170877274866,0.04914314562262458,0.9965082376193338,-0.04853446513238548,0.972880837495202,-0.04916616243814563,0.8639788085744062,-0.04916769578909124,0.6748329768037858,0.04916631883776806,0.42824941865481525,0.049112670619177824,0.14098040833601205,-0.04915661314974432,-0.15758964784163237,-0.04910215836645524,-0.44211729417156453,0.049167256874458436,-0.6880315050419809,0.04901726434695671,-0.8707081201779896,-0.04916836055308329,-0.9765559364367731,0.048987558676669145
7,0.6435804097001016,-0.05735127886304707,0.8669148048900676,-0.05736370968764557,0.9844748964969458,0.05733366989306202,0.9830125613962186,-0.05662354265444973,0.8623524101011013,-0.057360522844503235,0.6383331886679064,-0.057362311753939785,0.3346756078980445,0.057360705310729405,-0.008424352581826635,0.05729811572237413,-0.35043622344556025,-0.05734938200803504,-0.6499815211051329,-0.05728585142753111,-0.8707817412815769,0.05736179968686818,-0.9872245910049116,0.057186808404782824,-0.9814745826782354,-0.05736308731193051,-0.8580785015295292,0.05715215178944733
8,0.716646462877443,-0.06554431870062523,0.9314927921233287,-0.06555852535730922,0.9985847978332577,0.06552419416349944,0.9083979252494543,-0.06471262017651398,0.6747926102147798,-0.06555488325086084,0.3352867497045722,-0.06555692771878832,-0.05831968064560095,0.06555509178369075,-0.4434207863978218,0.06548356082557043,-0.7560538458370958,-0.06554215086632575,-0.9506607234136952,-0.06546954448860699,-0.9952570736139368,0.06555634249927793,-0.8837954998144751,0.06535635246260894,-0.630638212981515,-0.06555781407077772,-0.27913703994139577,0.06531674490222553
9,0.7825520214895382,-0.07373735853820339,0.9751514594248076,-0.07375334102697287,0.9728842744965123,0.07371471843393687,0.7773035058645293,-0.07280169769857822,0.42695559615988093,-0.07374924365721844,-0.008414741364378654,-0.07375154368363686,-0.44210757394878014,0.07374947825665208,-0.7901295718879009,0.07366900592876674,-0.9765631184680146,-0.07373491972461647,-0.9709416366328761,-0.07365323754968287,-0.7720604764071538,0.07375088531168766,-0.4199259662608264,0.07352589652043506,0.016797162274309777,-0.07375254082962494,0.4495955714985163,0.07348133801500371
10,0.8406385789802162,-0.08193039837578153,0.9969103272494149,-0.08194815669663652,0.9083979252494543,0.0819052427043743,0.597880119293297,-0.08089077522064247,0.14097991028104204,-0.08194360406357606,-0.351095906551303,-0.08194615964848541,-0.756096402389334,0.08194386472961344,-0.9795189805814214,0.08185445103196304,-0.9579756808685278,-0.0819276885829072,-0.7048423860225389,-0.08183693061075874,-0.2791609424719786,0.0819454281240974,0.21520298315387307,0.08169544057826118,0.656332569649626,-0.08194726758847216,0.9370651902622528,0.0816459311277819
11,0.8903257536678935,-0.09012343821335969,0.9962807383351455,-0.09014297236630017,0.8076966173381208,0.09009576697481174,0.38128344268970554,-0.08897985274270673,-0.15758909110961367,-0.09013796446993366,-0.651205087049036,-0.09014077561333396,-0.9507142339446707,0.09013825120257478,-0.973880483640077,0.09003989613515935,-0.7048423860225389,-0.09012045744119791,-0.23084919990941727,-0.09002062367183461,0.31125754017940216,0.09013997093650715,0.7625651828372738,0.0898649846360873,0.9871845140394278,-0.09014199434731937,0.9216847667495226,0.0898105242405601
12,0.9311170877274866,-0.09831647805093785,0.973276831891701,-0.09833778803596384,0.6747949941275839,0.09828629124524917,0.14098040833601205,-0.09706893026477097,-0.4420811283313496,-0.09833232487629126,-0.87235267193123,-0.09833539157818248,-0.9952351922702056,0.09833263767553611,-0.7743367384816401,0.09822534123835565,-0.27913909284063765,-0.09831322629948865,0.3112331783813484,-0.09820431673291048,0.7929448091104607,0.09833451374891687,0.998928591634831,0.09803452869391342,0.8537481562929551,-0.09833672110616658,0.4117077786201266,0.09797511735333829
13,0.9626050076328949,-0.106509517888516,0.9284152260643215,-0.10653260370562749,0.5149914239399227,0.10647681551568658,-0.10808810498050356,-0.10515800778683522,-0.687083374987902,-0.10652668528264887,-0.9877235049343619,-0.10653000754303102,-0.8826303969734252,0.10652702414849748,-0.4206180611819545,0.10641078634155196,0.21490718558485614,-0.10650599515777937,0.7615170332066578,-0.10638800979398635,0.9976336430538367,0.10652905656132662,0.8278965525785565,0.10620407275173954,0.31878070045897905,-0.10653144786501381,-0.31920076905929495,0.10613971046611648
14,0.9844748964969458,-0.11470255772609414,0.8627034158040058,-0.11472741937529114,0.33465677082919276,0.11466733978612403,-0.35043622344556025,-0.11324708530889946,-0.8707105100636796,-0.11472104568900647,-0.9833283448153961,-0.11472462350787957,-0.6306776692768031,0.11472141062145881,0.016848109704656875,0.11459623144474826,0.6563366898290532,-0.11469876401607008,0.9871907111680082,-0.11457170285506223,0.8538203433783806,0.11472359937373636,0.3192214731299795,0.11437361680956565,-0.3661142998867028,-0.11472617462386102,-0.878819077933425,0.11430430357889466
15,0.9965082376193338,-0.12289559756367231,0.7776171467029437,-0.12292223504495478,0.14098040833601205,0.12285786405656146,-0.5709959148470373,-0.12133616283096371,-0.9765596684701005,-0.1229154060953641,-0.8597001248435612,-0.12291923947272812,-0.27915480494560424,0.12291579709442016,0.4509597243093111,0.12278167654794456,0.937072081860799,-0.12289153287436079,0.921691545233522,-0.12275539591613811,0.4117430331953406,0.1229181421861461,-0.3196424667630778,0.12254316086739177,-0.8788200243019274,-0.12292090138270824,-0.966843505096389,0.12246889669167285
16,0.9985847978332577,-0.13108863740125046,0.6750672729300327,-0.13111705071461843,-0.05831639814812038,0.1310483883269989,-0.7560538458370958,-0.12942524035302796,-0.9951756601321037,-0.13110976650172168,-0.631829332203657,-0.13111385543757664,0.11644046502904361,0.1311101835673815,0.7952826443595055,0.13096712165114086,0.9883795467216366,-0.1310843017326515,0.5843385770049826,-0.13093908897721399,-0.17416796440427548,0.13111268499855586,-0.828145851366662,0.1307127049252179,-0.9782029593462851,-0.13111562814155545,-0.5360381833105293,0.13063348980445105
17,0.9906838288354797,-0.1392816772388286,0.557356843565802,-0.1393118663842821,-0.25528831386612294,0.13923891259743632,-0.8941040105802966,-0.13751431787509222,-0.9248955739576221,-0.13930412690807928,-0.32734634285383213,-0.1393084714024252,0.49365234586922163,0.13930457004034283,0.9812601810204711,0.13915256675433718,0.7976972276032396,-0.13927707059094224,0.07463438693068827,-0.13912278203828984,-0.6992370811936799,0.13930722781096558,-0.9989045234541498,0.13888224898304402,-0.6175217577672163,-0.13931035490040267,0.1824171610570709,0.13879808291722923
18,0.9728842744965123,-0.14747471707640677,0.4271293810851432,-0.14750668205394574,-0.44208269011914386,0.14742943686787374,-0.9765631184680146,-0.14560339539715644,-0.7719973207328699,-0.14749848731443688,0.016828887949111148,-0.14750308736727372,0.7929273757272797,0.14749895651330416,0.9718631289483876,0.14733801185753348,0.41171080650416075,-0.14746983944923295,-0.45708328691040556,-0.14730647509936573,-0.9800425683449941,0.14750177062337533,-0.762277563471767,0.14705179304087013,0.033589575533404636,-0.1475050816592499,0.802983395784573,0.14696267603000743
19,0.9453639820795602,-0.1556677569139849,0.2873095135416216,-0.15570149772360942,-0.6112526245897155,0.15561996113831117,-0.9983042615559901,-0.1536924729192207,-0.5501388460490069,-0.1556928477207945,0.3589635391077552,-0.15569770333212227,0.9670166078497127,0.15569334298626555,0.7689624956694915,0.15552345696072978,-0.07507677894349374,-0.15566260830752368,-0.85398380836005,-0.1554901681604416,-0.9184909903716716,0.15569631343578508,-0.21476911310049615,0.15522133709869623,0.6689032066090711,-0.1556998084180971,0.9926508641150769,0.15512726914278563
20,0.9083979252494543,-0.16386079675156306,0.1410372937265151,-0.16389631339327304,-0.7560538458370958,0.1638104854087486,-0.9579756808685278,-0.16178155044128495,-0.2791381066992712,-0.16388720812715213,0.6575722191407843,-0.16389231929697082,0.9884351803594296,0.1638877294592269,0.4129569731385663,0.16370890206392608,-0.5434829505115776,-0.1638553771658144,-0.9990009892270116,-0.16367386122151747,-0.536084084309812,0.1638908562481948,0.42032914073565747,0.16339088115652237,0.9896212077155743,-0.16389453517694433,0.6496397801173898,0.1632918622555638
21,0.8623554566266871,-0.17205383658914122,-0.008402319647324475,-0.17209112906293672,-0.8707135861168288,0.17200100967918602,-0.8580848122244682,-0.1698706279633492,0.016797208378505686,-0.1720815685335097,0.8764472596669446,-0.17208693526181934,0.8538015716088414,0.17208211593218822,-0.02527067595158275,0.1718943471671224,-0.878825541163884,-0.17204814602410512,-0.8493618733954124,-0.17185755428259336,0.03359241563741021,0.17208539906060455,0.8840035510959018,0.17156042521434847,0.8449048915755235,-0.17208926193579152,-0.0419824723360154,0.171456455368342
22,0.8076966173381208,-0.18024687642671938,-0.15765323503621154,-0.18028594473260035,-0.9506607234136952,0.18019153394962348,-0.7048423860225389,-0.17795970548541346,0.3112320788581369,-0.18027592893986732,0.9890490608211937,-0.18028155122666792,0.5843714680951001,0.18027650240514956,-0.45846678700876015,0.1800797922703187,-0.9990009892270116,-0.18024091488239583,-0.44920266111697,-0.18004124734366922,0.591534118342619,0.1802799418730143,0.9871526690389019,0.1797299692721746,0.3028166029243987,-0.18028398869463874,-0.711075995509521,0.1796210484811202
23,0.744967540438117,-0.1884399162642975,-0.3033635986463942,-0.18848076040226402,-0.9927080176514501,0.1883820582200609,-0.5077762741063465,-0.18604878300747768,0.5778655146606875,-0.18847028934622492,0.981724139138569,-0.18847616719151644,0.2226819589323564,0.1884708888781109,-0.8003795038223758,0.188265237373515,-0.8745861537498308,-0.18843368374068656,0.08344930544220765,-0.1882249404047451,0.9428359349673102,0.18847448468542405,0.6877089419367549,0.1878995133300007,-0.38169106572201283,-0.18847871545348596,-0.9985903093393598,0.18778564159389838
24,0.6747949941275839,-0.1966329561018757,-0.44226106984409214,-0.19667557607192768,-0.995179175896744,0.19657258249049833,-0.27913909284063765,-0.19413786052954193,0.7928799450672677,-0.19666464975258252,0.855360674879517,-0.19667078315636496,-0.17416413521360993,0.19666527535107223,-0.9829320229899038,0.1964506824767113,-0.5360421255762363,-0.1966264525989773,0.5914878195936001,-0.19640863346582096,0.964778033947753,0.19666902749783374,0.10779522454742542,0.19606905738782685,-0.8866834620719409,-0.19667344221233315,-0.750238832308229,0.19595023470667658
25,0.597880119293297,-0.20482599593945383,-0.5712263108713863,-0.20487039174159133,-0.9579756808685278,0.20476310676093573,-0.03314639477032304,-0.2022269380516062,0.9370687713766463,-0.20485901015894015,0.6252808161105047,-0.20486539912121352,-0.5435135419313573,0.2048596618240336,-0.9697770799994512,0.2046361275799076,-0.0662562899388779,-0.204819221457268,0.9250664359638998,-0.20459232652689685,0.6496954088319284,0.2048635703102435,-0.5160808782904293,0.20423860144565295,-0.9746547713981599,-0.2048681689711804,-0.09929249585428884,0.20411482781945475
26,0.5149914239399227,-0.213019035777032,-0.687363040447367,-0.21306520741125498,-0.8825807185111588,0.21295363103137316,0.21490718558485614,-0.21031601557367044,0.9975520351651532,-0.21305337056529774,0.31938279816277887,-0.21306001508606204,-0.8270541111570393,0.21305404829699495,-0.7635339002371887,0.21282157268310392,0.41975139624441277,-0.21301199031555873,0.9857958227932273,-0.2127760195879727,0.10765548555683631,0.21305811312265324,-0.9294824764433802,0.21240814550347908,-0.6042307123349234,-0.21306289573002762,0.6049364043496628,0.21227942093223295
27,0.4269571045120725,-0.22121207561461012,-0.7880630780016905,-0.22126002308091863,-0.7720000480512634,0.22114415530181059,0.4495988780264069,-0.2184050930957347,0.9689269466129149,-0.22124773097165537,-0.025241845016591723,-0.22125463105091062,-0.980021021501593,0.2212484347699563,-0.4052666960337766,0.22100701778630022,0.8029893012853434,-0.22120475917384944,0.7557637893862074,-0.2209597126490486,-0.4719915960910254,0.22125265593506296,-0.9638110027732304,0.22057768956130516,0.050372492104744704,-0.22125762248887482,0.984542962732691,0.22044401404501116
28,0.33465677082919276,-0.22940511545218828,-0.8710649177872871,-0.22945483875058229,-0.6306421718625934,0.22933467957224807,0.6563366898290532,-0.22649417061779892,0.8537504996275977,-0.22944209137801294,-0.3668057990237982,-0.22944924701575914,-0.9782641612589208,0.22944282124291762,0.03369145598987282,0.22919246288949652,0.9896274201406913,-0.22939752803214017,0.30281850387948656,-0.22914340571012445,-0.8867584339408113,0.2294471987474727,-0.6050661718987956,0.2287472336191313,0.6812847264156532,-0.22945234924772204,0.8358218491694634,0.22860860715778933
29,0.23901265731500806,-0.23759815528976647,-0.9345045174239208,-0.2376496544202459,-0.46414258238821615,0.2375252038426855,0.8222666651732259,-0.23458324813986317,0.6623110631940086,-0.23763645178437057,-0.6638928720176782,-0.23764386298060766,-0.8220609000314877,0.23763720771587896,0.4659414438740256,0.23737790799269282,0.9339702320827127,-0.23759029689043087,-0.23944338880493815,-0.23732709877120034,-0.9917550386147392,0.23764174155988246,0.00044424977577437575,0.23691677767695746,0.9917781085257773,-0.23764707600656926,0.238580124064969,0.23677320027056753
30,0.14098040833601205,-0.24579119512734462,-0.9769571602714157,-0.24584447008990956,-0.27913909284063765,0.24571572811312292,0.937072081860799,-0.24267232566192742,0.4117093520140241,-0.2458308121907282,-0.88047989743077,-0.24583847894545624,-0.5360722981688957,0.24583159418884032,0.8054197900149034,0.24556335309588911,0.6496445578602947,-0.24578306574872158,-0.7110812250819646,-0.24551079183227623,-0.7503030753289238,0.2458362843722922,0.605773491996799,0.24508632173478354,0.8358227492358042,-0.24584180276541648,-0.48668900694371736,0.2449377933833457
31,0.04153952971884974,-0.2539842349649228,-0.9974694514938842,-0.25403928575957324,-0.08300720853584859,0.25390625238356035,0.9936148951331387,-0.25076140318399165,0.12433087059250325,-0.25402517259708574,-0.990304707696571,-0.2540330949103048,-0.16544966758622034,0.25402598066180165,0.9845343883188172,0.2537487981990854,0.2062632387276392,-0.25397583460701234,-0.9729849743120409,-0.2536944848933521,-0.24674866147569965,0.25403082718470194,0.9640486753625022,0.2532558657926096,0.28676689088951746,-0.25403652952426364,-0.9507899820329124,0.25310238649612393
32,-0.05831639814812038,-0.2621772748025009,-0.9955807292523456,-0.26223410142923687,0.11643391122887078,0.2620967766539978,0.9883795467216366,-0.25885048070605593,-0.17415371721047815,-0.26221953300344336,-0.9800505421981471,-0.2622277108751533,0.2312938276000737,0.262220367134763,0.9676224842415853,0.2619342433022817,-0.28761851492748053,-0.262168603465303,-0.9479058753109407,-0.26187817795442797,0.3430021588345477,0.2622253699971117,0.929153570940734,0.2614254098504358,-0.3971599170983825,-0.2622312562831109,-0.904675886036727,0.2612669796089021
33,-0.15758964784163237,-0.27037031464007905,-0.9713334101765015,-0.27042891709890055,0.3112331783813484,0.2702873009244352,0.921691545233522,-0.26693955822812016,-0.45708167212856854,-0.270413893409801,-0.8509607654078644,-0.27042232684000184,0.5915211130983306,0.2704147536077243,0.7580513358944515,0.27011968840547806,-0.7110812250819646,-0.27036137232359375,-0.6432410323016756,-0.27006187101550383,0.8129324568298196,0.2704199128095214,0.5153195330173368,0.26959495390826194,-0.8942962102800252,-0.27042598304195814,-0.3730925694703565,0.2694315727216803
34,-0.25528831386612294,-0.2785633544776572,-0.9252720367786772,-0.2786237327685642,0.4936245608648517,0.27847782519487263,0.7976972276032396,-0.27502863575018444,-0.6991798825794565,-0.27860825381615856,-0.6186881032575886,-0.2786169428048504,0.8583602212080104,0.27860914008068566,0.39754777343988285,0.27830513350867436,-0.9604464515116701,-0.2785541411818845,-0.14885163195315587,-0.2782455640765797,0.9988820594408003,0.27861445562193116,-0.10867850831880253,0.27776449796608804,-0.9708310220032218,-0.27862070980080533,0.3587005257547418,0.27759616583445845
35,-0.35043622344556025,-0.28675639431523536,-0.8584310482029581,-0.28681854843822785,0.6563366898290532,0.28666834946531006,0.6241059599443225,-0.28311771327224866,-0.8788224364528556,-0.2868026142225162,-0.31139667851909636,-0.28681155876969894,0.9896831240184422,0.28680352655364705,-0.04210985461304808,0.28649057861187066,-0.9746608898702935,-0.2867469100401752,0.38944169952439245,-0.2864292571376556,0.8358934206718184,0.2868089984343409,-0.6883539324634889,0.2859340420239141,-0.5907688344251345,-0.2868154365596526,0.8980069333782202,0.2857607589472367
36,-0.44208269011914386,-0.29494943415281355,-0.772311548952134,-0.2950133641078915,0.7928827461599658,0.2948588737357475,0.41171080650416075,-0.2912067907943129,-0.9799623994319073,-0.29499697462887375,0.03365301791329512,-0.29500617473454743,0.964756822704603,0.2949979130266083,-0.4733831665735062,0.29467602371506696,-0.750244349901775,-0.2949396788984659,0.8128688294673042,-0.29461295019873146,0.38090316125745755,0.29500354124675066,-0.9872963182846276,0.29410358608174025,0.0671411670007659,-0.2950101633184998,0.9554228288939755,0.29392535206001486
37,-0.5293120126861656,-0.3031424739903917,-0.6688475973167545,-0.30320817977755515,0.8978190694876738,0.3030493980061849,0.1737174692044557,-0.29929586831637717,-0.9935652398439103,-0.3031913350352314,0.3746221319845234,-0.303200790699396,0.7875166321642914,0.3031922994995697,-0.8104031466742486,0.30286146881826326,-0.34214182739086046,-0.30313244775675663,0.9965395211529737,-0.3027966432598073,-0.20714753103688738,0.3031980840591604,-0.8835872742435591,0.3022731301395664,0.6934736284765324,-0.30320489007734697,0.5001375645610977,0.30208994517279303
38,-0.6112526245897155,-0.3113355138279698,-0.5503627705955039,-0.31140299544721883,0.9669621797468185,0.31123992227662234,-0.07507677894349374,-0.3073849458384414,-0.9184158564675367,-0.311385695441589,0.6701665989166474,-0.31139540666424453,0.48594488132549146,0.3113866859725311,-0.9860671637471322,0.31104691392145956,0.14972894707872506,-0.31132521661504736,0.886279928501392,-0.3109803363208832,-0.7228356310681627,0.31139262687157016,-0.41952270897421107,0.31044267419739247,0.9936546066554385,-0.3113996168361942,-0.2235326511039411,0.31025453828557126
39,-0.6870858023216195,-0.319528553665548,-0.4195179825584126,-0.31959781111688246,0.9975555593250707,0.31943044654705977,-0.3192031166052062,-0.3154740233605057,-0.7612271199060563,-0.31958005584794663,0.8844503001835399,-0.3195900226290931,0.10765311868424986,0.3195810724454924,-0.9653994939682055,0.31923235902465585,0.6049408533238516,-0.3195179854733381,0.5146112237601901,-0.3191640293819591,-0.9860164490556457,0.3195871696839799,0.21563681076800184,0.31861221825521857,0.8265042970427732,-0.3195943435950414,-0.827250269846316,0.31841913139834943
40,-0.7560538458370958,-0.32772159350312613,-0.2792517250601661,-0.3277926267865461,0.9883795467216366,0.3276209708174972,-0.5434829505115776,-0.3235631008825699,-0.5360402318489593,-0.32777441625430426,0.9914903568075338,-0.32778463859394164,-0.28763470431984006,0.3277754589184538,-0.7525151901656907,0.32741780412785215,0.9120421406254627,-0.3277107543316288,-0.00884255333616546,-0.32734772244303495,-0.904753353516636,0.3277817124963896,0.7628526518205978,0.32678176231304473,0.2706361020450086,-0.32778907035388866,-0.987046977334887,0.3265837245111276
41,-0.8174676492431415,-0.33591463334070426,-0.13271407584806688,-0.33598744245620976,0.9397999606024338,0.3358114950879346,-0.7339716468720321,-0.3316521784046341,-0.26297046634326493,-0.3359687766606618,0.9783076722890778,-0.3359792545587902,-0.6375113320253639,0.33596984539141506,-0.38980075095393485,0.33560324923104845,0.9958437033202947,-0.33590352318991956,-0.5296882108735904,-0.3355314155041108,-0.5074338816768452,0.3359762553087993,0.9989524628211233,0.3349513063708709,-0.4125164805502681,-0.33598379711273585,-0.6171723028966587,0.33474831762390583
42,-0.8707135861168288,-0.34410767317828245,0.016804045393007727,-0.34418225812587344,0.8537535157642845,0.34400201935837205,-0.878825541163884,-0.3397412559266984,0.03358966772876154,-0.3441631370670194,0.8465007074276576,-0.3441738705236387,-0.8867389380069325,0.34416423186437645,0.05052527678295439,0.3437886943342448,0.8358279961789783,-0.34409629204821024,-0.8943018242949589,-0.3437151085651867,0.06714684399710508,0.3441707981212091,0.8276470905245503,0.34312085042869694,-0.9016561165949415,-0.34417852387158304,0.08389076892147215,0.342912910736684
43,-0.9152596406576812,-0.3523007130158606,0.1659447840019286,-0.35237707379553707,0.7336706124276289,0.3521925436288095,-0.9690383198285016,-0.34783033344876263,0.3271493368210481,-0.352357497473377,0.6120516596380092,-0.35236848648848723,-0.9959699632980901,0.35235861833733784,0.4807914291034945,0.3519741394374411,0.4711724450525941,-0.352289060906501,-0.9951402597944171,-0.3518988016262626,0.6182712452360382,0.3523653409336188,0.3188004165445946,0.35129039448652305,-0.9667327922395572,-0.3523732506304303,0.7399361865388727,0.3510775038494622
44,-0.9506607234136952,-0.36049375285343876,0.31135876051789274,-0.3605718894652007,0.5843385770049826,0.36038306789924696,-0.9990009892270116,-0.3559194109708269,0.5914857299880178,-0.36055185787973465,0.30338854840613816,-0.36056310245333584,-0.9479592307777791,0.3605530048102991,0.8153292215615305,0.3601595845406374,-0.00884255333616546,-0.36048182976479165,-0.8024611244318575,-0.36008249468733844,0.953415712738609,0.3605598837460286,-0.32006339736086886,0.3594599385443492,-0.5771399300773457,-0.3605679773892775,0.9989153738134007,0.3592420969622404
45,-0.9765631184680146,-0.3686867926910169,0.4497802905222859,-0.36876670513486437,0.41171080650416075,0.3685735921696844,-0.9668506156980455,-0.36400848849289114,0.8029864644882515,-0.3687462182860923,-0.042061812111804486,-0.36875771841818433,-0.7502865794506939,0.3687473912832605,0.9875302409335939,0.3683450296438337,-0.4866925862734028,-0.3686745986230824,-0.3730953133607881,-0.3682661877484143,0.9555046418393013,0.3687544265584383,-0.8283949868397046,0.3676294826021753,0.08389085926040919,-0.36876270414812473,0.7218543333935213,0.3674066900750186
46,-0.9927080176514501,-0.376879832528595,0.5781007248703209,-0.37696152080452805,0.22266942537659834,0.3767641164401218,-0.8745861537498308,-0.37209756601495536,0.9427588096111604,-0.37694057869244985,-0.38241198550762484,-0.3769523343830329,-0.43416017453398315,0.3769417777562218,0.963108266307061,0.37653047474703,-0.8453833000935627,-0.3768673674813731,0.16631531702076296,-0.3764498808094902,0.6238083095043726,0.3769489693708481,-0.9988802582838266,0.3757990266600014,0.7054664666571613,-0.3769574309069719,0.05743018757134257,0.37557128318779676
47,-0.998934106468155,-0.3850728723661732,0.6934382632489828,-0.3851563364741917,0.024750916933274518,0.3849546407105592,-0.7279441607506484,-0.38018664353701964,0.9983173180440714,-0.38513493909880747,-0.6763929563914927,-0.3851469503478814,-0.0494894243747762,0.3851361642291832,0.7469258543626135,0.3847159198502263,-0.9970946982774902,-0.3850601363396638,0.6566710856693903,-0.38463357387056607,0.07419778758181797,0.38514351218325776,-0.7619897937807965,0.38396857071782753,0.9952501715674335,-0.3851521576658192,-0.6378122754269473,0.38373587630057493
48,-0.995179175896744,-0.3932659122037514,0.7932026731992672,-0.39335115214385535,-0.17415433246214224,0.39314516498099666,-0.5360421255762363,-0.38827572105908387,0.964699113695868,-0.39332929950516504,-0.8883581872850486,-0.39334156631272993,0.34299461771945594,0.39333055070214445,0.38202617615916196,0.3929013649534226,-0.9046825394297735,-0.3932529051979546,0.9533410899004228,-0.39281726693164193,-0.5013321562268207,0.3933380549956675,-0.21433520069343104,0.3921381147756537,0.8169521695766363,-0.3933468844246663,-0.9907904722933045,0.39190046941335316
49,-0.9814807439622717,-0.40145895204132953,0.8751534611529963,-0.401545967813519,-0.36611659819807135,0.4013356892514341,-0.31081158731133707,-0.39636479858114815,0.8449072106375181,-0.40152365991152267,-0.9926059243487779,-0.40153618227757853,0.6813273514434225,0.40152493717510584,-0.05893712767182949,0.4010868100566189,-0.5907725430230472,-0.40144567405624526,0.9688222283847042,-0.40100095999271784,-0.9017323544487316,0.40153259780807726,0.42073223231919765,0.4003076588334798,0.25442879700417614,-0.40154161118351356,-0.8120884444995462,0.40006506252613133
50,-0.9579756808685278,-0.40965199187890766,0.9374501890881459,-0.40974078348318266,-0.5434829505115776,0.40952621352187146,-0.0662562899388779,-0.4044538761032124,0.6496422627988417,-0.4097180203178803,-0.9764956526027355,-0.40973079824242703,0.912093477404205,0.4097193236480672,-0.4881657078253393,0.4092722551598152,-0.13222082417153896,-0.409638442914536,0.6985483245281671,-0.4091846530537937,-0.9871314982589727,0.409727140620487,0.8842114280468097,0.4084772028913059,-0.42775641435897105,-0.4097363379423608,-0.19760167846944277,0.4082296556389095
51,-0.9248988414362043,-0.4178450317164858,0.978693806799069,-0.4179355991528463,-0.6991823526478141,0.41771673779230895,0.18241850263485546,-0.4125429536252766,0.3963467064222014,-0.41791238072423786,-0.8419808161894278,-0.4179254142072756,0.9988600983995086,0.4179137101210285,-0.8201976664867113,0.41745770026301154,0.35870316379961587,-0.41783121177282667,0.22223692462298353,-0.4173683461148695,-0.7276972097758293,0.41792168343289676,0.9870805524007827,0.416646746949132,-0.9087611001707453,-0.417931064701208,0.5229225472857961,0.41639424875168773
52,-0.8825807185111588,-0.426038071554064,0.9979580715479425,-0.42613041482250996,-0.8270075607814866,0.4259072620627463,0.41975139624441277,-0.4206320311473409,0.10764667918092603,-0.4261067411305955,-0.6053719543358926,-0.4261200301721241,0.9279286727991368,0.4261080965939899,-0.9889235164634342,0.42564314536620784,0.7618041070624366,-0.42602398063111746,-0.3196234686318003,-0.4255520391759454,-0.21405734993781209,0.4261162262453065,0.6873862432114228,0.42481629100695817,-0.9623612407883293,-0.42612579146005525,0.9628348927337654,0.4245588418644659
53,-0.8314441407891479,-0.4342311113916421,0.9948103494797972,-0.4343252304921736,-0.921862587240073,0.4340977863331838,0.6309861810683414,-0.4287211086694051,-0.19066910531273035,-0.4343011015369531,-0.29535897386302384,-0.4343146461369726,0.710497713064794,0.4343024830669512,-0.9607489632091267,0.43382859046940414,0.978388836069306,-0.43421674948940814,-0.7672106142916351,-0.43373573223702133,0.3743589007021114,0.43431076905771626,0.10735355073144076,0.4329858350647843,-0.5633478525539742,-0.4343205182189024,0.8860685998674347,0.4327234349772441
54,-0.7720000480512634,-0.44242415122922024,0.9693213316461975,-0.44252004616183727,-0.9799658614510169,0.44228831060362117,0.8029893012853434,-0.4368101861914694,-0.4719529864892929,-0.44249546194331074,0.050467633252835764,-0.44250926210182123,0.3808947868653324,0.4424968695399126,-0.7412837235565591,0.44201403557260044,0.9554298555028464,-0.44240951834769887,-0.9885082559041144,-0.4419194252980972,0.8320008169535862,0.44250531187012593,-0.5164613983033893,0.4411553791226103,0.10061683328952202,-0.44251524497774963,0.33381817042946166,0.4408880280900223
55,-0.7048423860225389,-0.45061719106679843,0.9220634464366241,-0.4507148618315009,-0.9990009892270116,0.45047883487405865,0.9250664359638998,-0.4448992637135336,-0.7110787129773225,-0.45068982234966826,0.39017480898243306,-0.4507038780666697,-0.008843051063390898,0.4506912560128739,-0.37422459858628676,0.45019948067579674,0.6985483245281671,-0.45060228720598955,-0.9182444425414257,-0.450103118359173,0.999000911029376,0.4506998546825357,-0.9296466542620796,0.4493249231804365,0.7172598502555733,-0.4507099717365969,-0.39756652080531235,0.4490526212028005
56,-0.6306421718625934,-0.45881023090437656,0.8540980040709283,-0.45890967750116457,-0.9782091000924481,0.45866935914449614,0.9896274201406913,-0.45298834123559784,-0.8866858958061263,-0.4588841827560259,0.6825715043442359,-0.4588984940315183,-0.3971847656702626,0.45888564248583524,0.06734481270432775,0.45838492577899304,0.2706378009844662,-0.45879505606428034,-0.5771435531187373,-0.4582868114202489,0.81702124544593,0.4588943974949454,-0.9636918813633668,0.4574944672382626,0.9965643521521099,-0.4589046984954441,-0.9156081662497405,0.45721721431557866
57,-0.5501407895839824,-0.4670032707419547,0.7669513618598439,-0.46710449317082825,-0.9184191010545203,0.4668598834149335,0.9926581645156499,-0.4610774187576621,-0.9830880683359124,-0.4670785431623835,0.8922032825138873,-0.46709310999636683,-0.7228197390787221,0.4670800289587966,0.49550548150247814,0.46657037088218933,-0.22353429506351763,-0.466987824922571,-0.06581362102312688,-0.4664705044813248,0.3496325529793983,0.4670889403073552,-0.6047123328311921,0.46566401129608875,0.8071690674839651,-0.4670994252542913,-0.9423140861844995,0.4653818074283569
58,-0.46414258238821615,-0.47519631057953293,0.6625806455104198,-0.4752993088404918,-0.822014630696619,0.475050407685371,0.9339702320827127,-0.46916649627972634,-0.9916739116044826,-0.47527290356874113,0.9936513314685794,-0.4752877259612153,-0.9343373690512425,0.4752744154317579,0.8250081373332234,0.47475581598538563,-0.6629773996488669,-0.47518059378086175,0.4649281015032408,-0.4746541975424007,-0.23989284923453888,0.4752834831197649,0.000888499463940174,0.4738335553539149,0.2381495580135211,-0.4752941520131385,-0.4633532894386687,0.47354640054113506
59,-0.3735068159343654,-0.48338935041711106,0.543329796301948,-0.48349412451015555,-0.6928390310300134,0.48324093195580836,0.8172125542301782,-0.4772555738017906,-0.9116764778034536,-0.48346726397509876,0.9746146112182116,-0.48348234192606393,-0.9983436726660347,0.4834688019047193,0.9902468918556799,0.48294126108858193,-0.9401005146550224,-0.4833733626391525,0.8585388360752993,-0.4828378906034765,-0.7456167774502503,0.4834780259321747,0.606126972887711,0.48200309941174097,-0.4428754097801683,-0.4834888787719857,0.26425319770781214,0.48171099365391323
60,-0.27913909284063765,-0.49158239025468925,0.41187693121807495,-0.4916889401798191,-0.5360421255762363,0.49143145622624584,0.6496445578602947,-0.48534465132385485,-0.7502416994418167,-0.4916616243814564,0.8374014111729101,-0.4916769578909125,-0.9047334619532951,0.49166318837768064,0.9583217514371605,0.49112670619177823,-0.9870542365219581,-0.49156613149744316,0.9989227202859935,-0.49102158366455245,-0.9908753137732103,0.4916725687445844,0.96416722649504,0.49017264346956707,-0.9156091522350825,-0.49168360553083296,0.8500555360929072,0.4898755867666914
61,-0.18198230420236258,-0.4997754300922674,0.27117419821302213,-0.4998837558494828,-0.3578749121544809,0.4996219804966832,0.4416848093849402,-0.4934337288459191,-0.5217900644772929,-0.4998559847878139,0.5986494594932273,-0.499871573855761,-0.6682857308827129,0.499857574850642,0.7355891965505875,0.4993121512949746,-0.7923426565683493,-0.4997589003557339,0.8446733932970993,-0.49920527672562837,-0.8899925953334138,0.4998671115569942,0.9289888433216492,0.49834218752739323,-0.9577176036061816,-0.49987833228968015,0.9796991496596853,0.49804017987946964
62,-0.08300720853584859,-0.5079684699298456,0.1243814773329888,-0.5080785715191465,-0.16544035532471615,0.5078125047671207,0.2062632387276392,-0.5015228063679833,-0.24672847707490414,-0.5080503451941715,0.2873085224446329,-0.5080661898206096,-0.3263303771762547,0.5080519613236033,0.36639656967466055,0.5074975963981708,-0.40363796037059346,-0.5079516692140247,0.4412868415479837,-0.5073889697867042,-0.4782098580959155,0.5080616543694039,0.5149387079073461,0.5065117315852192,-0.5493965012509412,-0.5080730590485273,0.5836143892093713,0.5062047729922479
63,0.01679726771981963,-0.5161615097674237,-0.02520458337738688,-0.5162733871888101,0.03358978639461202,0.516003029037558,-0.04198278109398478,-0.5096118838900476,0.05037263036519647,-0.5162447056005292,-0.058869887187256775,-0.5162608057854581,0.06714536773211494,0.5162463477965646,-0.07574773759957326,0.5156830415013671,0.08389138589188909,-0.5161444380723154,-0.09225768593340686,-0.51557266284778,0.10062534075848271,0.5162561971818137,-0.10912011810000993,0.5146812756430454,0.11731436019975566,-0.5162677858073746,-0.1256508450615037,0.514369366105026
64,0.11643391122887078,-0.5243545496050018,-0.17422460348296592,-0.5244682028584737,0.2312808093290893,0.5241935533079956,-0.28761851492748053,-0.5177009614121119,0.3429741007571249,-0.5244390660068867,-0.3979100537088547,-0.5244554217503066,0.45002033546831993,0.524440734269526,-0.5028102313372707,0.5238684866045634,0.5508811950736693,-0.524337206930606,-0.598590720761377,-0.5237563559088559,0.644309213076718,0.5244507399942234,-0.6886762241376951,0.5228508197008715,0.7288504449610587,-0.5244625125662218,-0.7674890386015542,0.5225339592178042
65,0.21490718558485614,-0.53254758944258,-0.3193319146002766,-0.5326630185281375,0.41975139624441277,0.532384077578433,-0.515371522560424,-0.5257900389341761,0.6049387161914506,-0.5326334264132444,-0.6887018060562125,-0.5326500377151552,0.7618469872838072,0.5326351207424874,-0.829760294082276,0.5320539317077598,0.8829960750480726,-0.5325299757888968,-0.9283688503192838,-0.5319400489699319,0.9629173403748043,0.5326452828066331,-0.9873678508639058,0.5310203637586977,0.9975967768548264,-0.532657239325069,-0.9974755279933509,0.5306985523305824
66,0.3112331783813484,-0.5407406292801581,-0.4572677193544254,-0.5408578341978011,0.5914878195936001,0.5405746018488704,-0.7110812250819646,-0.5338791164562403,0.8128659577678583,-0.540827786819602,-0.8959853140869721,-0.5408446536800037,0.9533947513031344,0.5408295072154486,-0.9915002735701108,0.5402393768109561,0.9989227202859935,-0.5407227446471875,-0.9843237000653864,-0.5401237420310077,0.9451507373741426,0.5408398256190428,-0.8833788744242181,0.5391899078165239,0.7971577567141399,-0.5408519660839163,-0.6921944430119904,0.5388631454433606
67,0.4044494321394655,-0.5489336691177362,-0.5849342769425852,-0.5490526498674647,0.7396434901233433,0.5487651261193078,-0.862579341093853,-0.5419681939783045,0.948182304255887,-0.5490221472259597,-0.994626504274366,-0.5490392696448522,0.9944224473409904,0.5490238936884101,-0.9558268025539101,0.5484248219141524,0.8702782449500921,-0.5489155135054783,-0.74995133358089,-0.5483074350920835,0.5972157896512418,0.5490343684314526,-0.4191193689553377,0.5473594518743499,0.22180298765725523,-0.5490466928427634,-0.015466410103024505,0.5470277385561387
68,0.4936245608648517,-0.5571267089553144,-0.699464471714092,-0.5572474655371285,0.8583119087818731,0.5569556503897453,-0.9604464515116701,-0.5500572715003689,0.9988003494298336,-0.5572165076323171,-0.9726646810932612,-0.5572338856097008,0.8784527043102142,0.5572182801613713,-0.7298426758512726,0.5566102670173487,0.5285593032355248,-0.557108282363769,-0.2943801043924914,-0.5564911281531594,0.04065618459701715,0.5572289112438623,0.21607059585732746,0.5555289959321761,-0.45786919226212597,-0.5572414196016107,0.6695612427843491,0.5551923316689169
69,0.5778675561486627,-0.5653197487928925,-0.7982862024068306,-0.5654422812067921,0.9427621401971263,0.5651461746601827,-0.9985976534212823,-0.5581463490224331,0.9601985340675653,-0.5654108680386748,-0.8327628160644499,-0.5654285015745493,0.6237945946919845,0.5654126666343328,-0.35854264273329634,0.564795712120545,0.05743060993875231,-0.5653010512220596,0.2480188179788237,-0.5646748212142353,-0.5301057954227266,0.5654234540562721,0.7631399703650499,0.5636985399900022,-0.9221983366571437,-0.5654361463604578,0.995287426852285,0.5633569247816952
70,0.6563366898290532,-0.5735127886304707,-0.8791801459964508,-0.5736370968764557,0.9896274201406913,0.5733366989306201,-0.9746608898702935,-0.5662354265444973,0.8358250433694887,-0.5736052284450324,-0.5918846502765073,-0.5736231175393979,0.27065303457104034,0.5736070531072941,0.0841453084131369,0.5729811572237413,-0.42775909963357084,-0.5734938200803504,0.7172643529108057,-0.5728585142753112,-0.9156865698618166,0.5736179968686818,0.9989761370083196,0.5718680840478282,-0.9528031935758104,-0.5736308731193052,0.7869202203314191,0.5715215178944734
71,0.7282479242607367,-0.581705828468049,-0.9403295989069247,-0.5818319125461193,0.9970393777931617,0.5815272232010575,-0.8901244328804739,-0.5743245040665615,0.6367897908441639,-0.58179958885139,-0.2792377631814792,-0.5818177335042464,-0.12521868858818497,0.5818014395802554,0.5100794410076592,0.5811666023269376,-0.8082184629954489,-0.581686588938641,0.9749520813323892,-0.581042207336387,-0.981391680980145,0.5818125396810916,0.8273974652538418,0.5800376281056544,-0.5352898205951903,-0.5818255998781523,0.15627410496418287,0.5796861110072515
72,0.7928827461599658,-0.5898988683056271,-0.9803612762562817,-0.590026728215783,0.9647025217927181,0.589717747471495,-0.750244349901775,-0.5824135815886258,0.38087200282269595,-0.5899939492577475,0.06726798001806399,-0.5900123494690949,-0.5013211341285863,0.5899958260532167,0.8344538008369035,0.5893520474301339,-0.9907977590117201,-0.5898793577969318,0.9450767614268256,-0.5892259003974629,-0.7042684431161633,0.5900070824935013,0.3183792970899596,0.5882071721634805,0.1339787191455119,-0.5900203266369995,-0.5582321741403956,0.5878507041200297
73,0.8495953457517539,-0.5980919081432052,-0.9983761528744305,-0.5982215438854467,0.893906020543249,0.5979082717419324,-0.5637177069956406,-0.5905026591106901,0.09093205312158407,-0.5981883096641052,0.40561717293614663,-0.5982069654339435,-0.7982759956420706,0.5981902125261781,0.9926835730138766,0.5975374925333302,-0.9307952083420419,-0.5980721266552226,0.6364501473575123,-0.5974095934585388,-0.1811239761413819,0.598201625305911,-0.3204842648403412,0.5963767162213066,0.7402349737968508,-0.5982150533958468,-0.9731786410957026,0.5960152972328079
74,0.8978190694876738,-0.6062849479807834,-0.9939696534702979,-0.6064163595551103,0.7874723071382904,0.6060987960123698,-0.34214182739086046,-0.5985917366327543,-0.20713058606638482,-0.6063826700704628,0.694783428218958,-0.606401581398792,-0.9692006299408723,0.6063845989991394,0.9532642929099907,0.6057229376365265,-0.6429015280524647,-0.6062648955135133,0.1401019539544995,-0.6055932865196146,0.40529230666910204,0.6063961681183208,-0.8286439589485542,0.6045462602791328,0.9983471537810038,-0.6064097801546939,-0.8658957840905578,0.6041798903455861
75,0.937072081860799,-0.6144779878183615,-0.967240738520651,-0.6146111752247739,0.6496445578602947,0.6142893202828072,-0.0992932260959275,-0.6066808141548186,-0.4866908668879625,-0.6145770304768204,0.8997040146787455,-0.6145961973636406,-0.9871097955610528,0.6145789854721008,0.7240445676402713,0.6139083827397228,-0.19760313172099261,-0.6144576643718039,-0.39756944468818123,-0.6137769795806904,0.8501283264271917,0.6145907109307305,-0.9988557961286462,0.6127158043369588,0.7869210677373633,-0.6146045069135412,-0.29395397255195843,0.6123444834583643
76,0.9669621797468185,-0.6226710276559396,-0.9187896818311375,-0.6228059908944377,0.48591753013236094,0.6224798445532447,0.14972894707872506,-0.6147698916768828,-0.7227765020581024,-0.622771390883178,0.9955313738379434,-0.6227908133284891,-0.8491760290380246,0.6227733719450622,0.3506633729017655,0.6220938278429191,0.2960754029059257,-0.6226504332300947,-0.8179773555910103,-0.6219606726417664,0.9979900634187405,0.6227852537431403,-0.7617018738211153,0.6208853483947849,0.2053937075559733,-0.6227992336723884,0.4357300847355405,0.6205090765711425
77,0.9871907111680082,-0.6308640674935178,-0.8497045896807232,-0.6310008065641012,0.30281850387948656,0.6306703688236821,0.38944169952439245,-0.622858969198947,-0.894298664909385,-0.6309657512895356,0.9706460000549085,-0.6309854292933377,-0.5771760392173167,0.6309677584180234,-0.09253693157904311,0.6302792729461154,0.7172643529108057,-0.6308432020883854,-0.9971220635732705,-0.6301443657028423,0.7972251589037169,0.63097979655555,-0.21390124601824936,0.6290548924526111,-0.4727335226541953,-0.6309939604312357,0.931591678220842,0.6286736696839207
78,0.9975555593250707,-0.639057107331096,-0.7615369643004859,-0.6391956222337649,0.10764705947598698,0.6388608930941195,0.6049408533238516,-0.6309480467210113,-0.985935791470394,-0.6391601116958933,0.8280653587341291,-0.6391800452581862,-0.21405264375427785,0.6391621448909848,-0.5173125967036866,0.6384647180493117,0.962841973854285,-0.6390359709466762,-0.8821646657745696,-0.6383280587639182,0.3179665700717952,0.6391743393679598,0.4211352409319533,0.6372244365104371,-0.9285267904950506,-0.6391886871900828,0.9275404378438044,0.6368382627966989
79,0.9979531620813837,-0.6472501471686741,-0.6562668604783414,-0.6473904379034287,-0.09181593348888199,0.647051417364557,0.7828277148470415,-0.6390371242430756,-0.98950221014382,-0.6473544721022508,0.5850780048431273,-0.6473746612230347,0.18286495756679882,0.6473565313639461,-0.8390883258456986,0.646650163152508,0.9726822993104476,-0.6472287398049669,-0.5070119565612233,-0.646511751824994,-0.2723668896418689,0.6473688821803695,0.8844191306262055,0.6453939805682634,-0.9476194001347644,-0.64738341394893,0.42575034938048145,0.645002855909477
80,0.9883795467216366,-0.6554431870062523,-0.5362584177972577,-0.6555852535730922,-0.28761851492748053,0.6552419416349944,0.9120421406254627,-0.6471262017651398,-0.9046793433712547,-0.6555488325086085,0.2711472665395091,-0.6555692771878833,0.5509122029238168,0.6555509178369076,-0.9937967065477539,0.6548356082557043,0.7443760744142769,-0.6554215086632575,0.017684413882941338,-0.6546954448860699,-0.7675547587590115,0.6555634249927792,0.9870082411047749,0.6535635246260895,-0.5210317989295246,-0.6555781407077773,-0.30450685472212724,0.6531674490222552
81,0.9689303696458874,-0.6636362268438304,-0.40420676715700077,-0.6637800692427559,-0.4719546538086299,0.6634324659054318,0.9845502035041135,-0.655215279287204,-0.7390441652176856,-0.6637431929149661,-0.0756613181423876,-0.6637638931527318,0.8319825248998615,0.6637453043098689,-0.9506344036314159,0.6630210533589006,0.333820625478312,-0.6636142775215483,0.5371647495481374,-0.6628791379471458,-0.9946136679525669,0.6637579678051889,0.6870634089296191,0.6617330686839156,0.15060519865870756,-0.6637728674666244,-0.8713589015723978,0.6613320421350334
82,0.9397999606024338,-0.6718292666814085,-0.26307750394448315,-0.6719748849124195,-0.6374754499814631,0.6716229901758692,0.9958437033202947,-0.6633043568092682,-0.5073923728444012,-0.6719375533213237,-0.4132956219015522,-0.6719585091175804,0.9817010998307771,0.6719396907828301,-0.7181952817455959,0.6712064984620969,-0.15846575497606952,-0.6718070463798391,0.8982078288685432,-0.6710628310082216,-0.8742254077153917,0.6719525106175986,0.10691185574473533,0.6699026127417418,0.7514102180466009,-0.6719675942254717,-0.9706203634271584,0.6694966352478117
83,0.901279381009204,-0.6800223065189867,-0.11604008715482107,-0.6801697005820831,-0.777582111633649,0.6798135144463067,0.9452204649546138,-0.6713934343313326,-0.23041673094687917,-0.6801319137276812,-0.700815940964827,-0.6801531250824289,0.9764306567478603,0.6801340772557916,-0.34275931711095253,0.6793919435652932,-0.6119541919258943,-0.6799998152381297,0.9943236504843816,-0.6792465240692975,-0.44844506094033215,0.6801470534300085,-0.5168418164672298,0.6780721567995678,0.99881527077865,-0.680162320984319,-0.5490253300714311,0.6776612283605898
84,0.8537535157642845,-0.6882153463565649,0.033603339824755434,-0.6883645162517469,-0.8866890282972477,0.6880040387167441,0.8358279961789783,-0.6794825118533968,0.06714135128722254,-0.6883262741340388,-0.9033591214400818,-0.6883477410472774,0.8170032827273709,0.6883284637287529,0.10092201395171065,0.6875773886684896,-0.9156149000436888,-0.6881925840964205,0.7971627609347764,-0.6874302171303734,0.13399004746660154,0.6883415962424182,-0.9298106487491817,0.6862417008573939,0.7764618947443824,-0.6883570477431661,0.16718891794107527,0.685825821473368
85,0.7976972276032396,-0.696408386194143,0.18249210823637022,-0.6965593319214104,-0.9604464515116701,0.6961945629871815,0.674467790868045,-0.687571589375461,0.3587018965746475,-0.6965206345403965,-0.9963658762003635,-0.696542357012126,0.5285890546371254,0.6965228502017142,0.5245091871637875,0.6957628337716859,-0.9951011474447816,-0.6963853529547112,0.3648779530547339,-0.6956139101914494,0.669618577375581,0.6965361390548279,-0.9635725699079928,0.6944112449152201,0.18892635706005056,-0.6965517745020133,0.7936858705845179,0.6939904145861462
86,0.7336706124276289,-0.7046014260317212,0.3272824973266379,-0.7047541475910741,-0.9959139055683656,0.704385087257619,0.4711724450525941,-0.6956606668975253,0.6182206697454172,-0.704714994946754,-0.9685587107896996,-0.7047369729769745,0.15672223743886163,0.7047172366746757,0.8436635415262688,0.7039482788748822,-0.8309519285855932,-0.704578121813002,-0.17502795585869962,-0.7037976032525252,0.9713300731598357,0.7047306818672376,-0.6043583745108809,0.7025807889730461,-0.4874641984053844,-0.7047465012608606,0.994273315837164,0.7021550076989244
87,0.6623134030119022,-0.7127944658692994,0.46472282710614904,-0.7129489632607379,-0.9916774149981173,0.7125756115280564,0.23858187869042435,-0.7037497444195895,0.8225156317042877,-0.7129093553531117,-0.8233093712125975,-0.7129315889418231,-0.23988757504153696,0.7129116231476369,0.9948395954920617,0.7121337239780784,-0.46335669714700006,-0.7127708906712926,-0.6633092018857201,-0.711981296313601,0.933728029047689,0.7129252246796474,0.0013327489768817263,0.7107503330308722,-0.9345927245225544,-0.7129412280197078,0.6613115650481177,0.7103196008117025
88,0.5843385770049826,-0.7209875057068775,0.5917264840718246,-0.7211437789304014,-0.9479058753109407,0.7207661357984939,-0.00884255333616546,-0.7118388219416538,0.9533377219411896,-0.7211037157594693,-0.5782300043075936,-0.7211262049066717,-0.5986244140722671,0.7211060096205982,0.9479373206068049,0.7203191690812748,0.017684413882941338,-0.7209636595295833,-0.9559467647718869,-0.7201649893746769,0.5699479208651215,0.7211197674920572,0.6064803342469434,0.7189198770886984,-0.9421676888826179,-0.721135954778555,-0.026524693830696975,0.7184841939244808
89,0.5005252330932832,-0.7291805455444557,0.7054412398918571,-0.7293385946000651,-0.8663443196650842,0.7289566600689312,-0.2557171982245236,-0.719927899463718,0.9990009927563126,-0.729298076165827,-0.2630376043797571,-0.7293208208715201,-0.8628516206780508,0.7293003960935596,0.7122952316126568,0.7285046141844711,0.4943957636288428,-0.7291564283878741,-0.9666269156172481,-0.7283486824357528,0.00706860641972282,0.7293143103044668,0.9642855874883276,0.7270894211465245,-0.5066264673849542,-0.7293306815374021,-0.700127211500532,0.726648787037259
90,0.41171080650416075,-0.7373735853820338,0.803313306304995,-0.7375334102697287,-0.750244349901775,0.7371471843393688,-0.4866925862734028,-0.7280169769857823,0.9554264801644313,-0.7374924365721846,0.08404930829342079,-0.7375154368363687,-0.9908535287651854,0.737494782566521,0.3348310340436826,0.7366900592876674,0.8500617877833526,-0.7373491972461648,-0.6921995337210113,-0.7365323754968286,-0.5582799756131686,0.7375088531168766,0.9288239325006898,0.7352589652043506,0.1671890979807784,-0.7375254082962495,-0.9980258810705163,0.7348133801500372
91,0.3187827016299472,-0.7455666252196119,0.8831446876992547,-0.7457282259393924,-0.6042345054408356,0.7453377086098061,-0.6874077865650357,-0.7361060545078465,0.8265065655995335,-0.745686796978542,0.4209448578688298,-0.7457100528012172,-0.9624214515534063,0.7456891690394822,-0.10929996284788868,0.7448755043908636,0.9976030393472072,-0.7455419661044554,-0.21360723769338963,-0.7447160685579045,-0.9286053003486313,0.7457033959292864,0.5145577812485159,0.7434285092621767,0.7623730181644319,-0.7457201350550966,-0.7603616445540371,0.7429779732628153
92,0.22266942537659834,-0.75375966505719,0.943142543354768,-0.7539230416090561,-0.43413573802021466,0.7535282328802436,-0.8453833000935627,-0.7441951320299107,0.6237572810738097,-0.7538811573848997,0.7067989178973727,-0.7539046687660658,-0.7820441888697754,0.7538835555124436,-0.5316687037109495,0.75306094949406,0.9008962742565361,-0.7537347349627462,0.32798871727500084,-0.7528997616189804,-0.9745420775300782,0.7538979387416962,-0.10956170636210355,0.7515980533200028,0.9990009954983411,-0.7539148618139438,-0.114670422207047,0.7511425663755935
93,0.12433130982961088,-0.7619527048947682,0.9819594507812137,-0.7621178572787197,-0.24672934871926827,0.7617187571506809,-0.9507969745697455,-0.7522842095519751,0.365289616335644,-0.7620755177912573,0.9069503760168603,-0.7620992847309144,-0.47819934435573913,0.762077941985405,-0.8481791244883738,0.7612463945972564,0.5836186813718782,-0.761927503821037,0.7728440865649445,-0.7610834546800563,-0.6800432692786572,0.7620924815541058,-0.6889983800010406,0.7597675973778291,0.7657831948282388,-0.7621095885727911,0.5925555015181216,0.7593071594883717
94,0.024750916933274518,-0.7701457447323464,0.998723665922082,-0.7703126729483833,-0.04948663888437182,0.7699092814211184,-0.9970946982774902,-0.7603732870740393,0.07419171809445486,-0.7702698781976149,0.9971299523764483,-0.7702939006957628,-0.0988573380178795,0.7702723284583664,-0.9958121661322209,0.7694318397004526,0.12345088087429217,-0.7701202726793276,0.9897483537755918,-0.7692671477411321,-0.14798578210049618,0.7702870243665155,-0.987439188728639,0.7679371414356551,0.1724055919379234,-0.7703043153316383,0.9818029515085552,0.7674717526011499
95,-0.07507677894349374,-0.7783387845699246,0.9930587006468118,-0.778507488618047,0.14972894707872506,0.7780998056915559,-0.98139790299603,-0.7684623645961035,-0.22353350536253763,-0.7784642386039725,0.9664029608336226,-0.7784885166606114,0.2960920683172844,0.7784667149313276,-0.9451732344742293,0.7776172848036488,-0.36694200076130923,-0.7783130415376184,0.9147253979584918,-0.7774508408022079,0.43576739634305206,0.7784815671789254,-0.883170300397547,0.7761066854934812,-0.5020570547524827,-0.7784990420904855,0.8441930805744254,0.7756363457139281
96,-0.17415433246214224,-0.7865318244075028,0.9650917778625451,-0.7867023042877107,0.34297531241880874,0.7862903299619933,-0.9046825394297735,-0.7765514421181677,-0.5012911465244279,-0.7866585990103301,0.8184951896675896,-0.7866831326254599,0.644295047539256,0.7866611014042889,-0.7063448342750308,0.7858027299068452,-0.7674946830608699,-0.7865058103959092,0.5699033116449198,-0.7856345338632839,0.8672944861372603,0.786676109991335,-0.4187159462837424,0.7842762295513074,-0.9403944237349158,-0.7866937688493326,0.2535704089646602,0.7838009388267063
97,-0.27149179345859753,-0.7947248642450809,0.9154509743619085,-0.7948971199573743,0.5225483343739402,0.7944808542324306,-0.7717183973204453,-0.784640519640232,-0.7342699425382554,-0.7948529594166878,0.5713411327075237,-0.7948777485903085,0.890778005518003,0.7948554878772504,-0.3268790840952422,0.7939881750100416,-0.9801378996342864,-0.7946985792541998,0.05698769880193234,-0.7938182269243598,0.9958506597046849,0.7948706528037448,0.21650433833629812,0.7924457736091335,-0.9364496011666118,-0.79488849560818,-0.4731237891439766,0.7919655319394845
98,-0.36611659819807135,-0.8029179040826591,0.8452511155723047,-0.803091935627038,0.6812890032342108,0.8026713785028682,-0.5907725430230472,-0.7927295971622963,-0.9016585914254851,-0.8030473198230453,0.25490934991793845,-0.8030723645551571,0.9966267028573839,0.8030498743502117,0.11767018608852099,0.8021736201132378,-0.9528091748729423,-0.8028913481124905,-0.47273649027613684,-0.8020019199854357,0.7765275470338924,0.8030651956161545,0.7634271384139677,0.8006153176669596,-0.4920778987410095,-0.8030832223670271,-0.9459292291967651,0.8001301250522627
99,-0.45708328691040556,-0.8111109439202372,0.7560687389802307,-0.8112867512967016,0.8128688294673042,0.8108619027733056,-0.3730953133607881,-0.8008186746843605,-0.988504763706471,-0.811241680229403,-0.09243135758237851,-0.8112669805200055,0.945129957649275,0.811244260823173,0.5387906402886539,0.8103590652164341,-0.6921995337210113,-0.8110841169707812,-0.8630265996674409,-0.8101856130465115,0.2859410213463566,0.8112597384285644,0.9989996141917504,0.8087848617247857,0.18372572839176105,-0.8112779491258744,-0.9111279863473559,0.8082947181650408
100,-0.5434829505115776,-0.8193039837578153,0.6499066884981489,-0.8194815669663653,0.9120421406254627,0.8190524270437429,-0.13222082417153896,-0.8089077522064247,-0.9870507494610681,-0.8194360406357606,-0.42856434016658845,-0.8194615964848541,0.7444179736513089,0.8194386472961344,0.8526347555568085,0.8185445103196304,-0.2621153054114707,-0.819276885829072,-0.9987661885361102,-0.8183693061075874,-0.30453292967229934,0.819454281240974,0.8271476768156544,0.8169544057826118,0.7731202746681896,-0.8194726758847216,-0.3873951822627929,0.816459311277819
101,-0.6244523121228162,-0.8274970235953935,0.5291491349080861,-0.8276763826360289,0.9748552097522915,0.8272429513141804,0.11687451546354176,-0.816996829728489,-0.8974264314513984,-0.8276304010421182,-0.7127319361214913,-0.8276562124497026,0.42617875988050197,0.8276330337690957,0.9967143497239663,0.8267299554228268,0.23214389125365936,-0.8274696546873628,-0.8399187353944454,-0.8265529991686633,-0.7886247668890223,0.8276488240533837,0.31795811484910985,0.8251239498404379,0.9989042754306413,-0.8276674026435688,0.3442225009132947,0.8246239043905973
102,-0.6991823526478141,-0.8356900634329716,0.3965080325258361,-0.8358711983056926,0.9988038779998057,0.8354334755846179,0.35870316379961587,-0.8250859072505532,-0.7276376830815321,-0.8358247614484757,-0.9104775245682311,-0.8358508284145512,0.04065529074559569,0.835827420242057,0.9423423406077478,0.8349154005260231,0.6695661670386436,-0.8356624235456533,-0.4333364483858338,-0.834736692229739,-0.9972272841544051,0.8358433668657935,-0.32090506911849054,0.833293493898264,0.7548879871482043,-0.835862129402416,0.8911227269311257,0.8327884975033755
103,-0.7669263942241143,-0.8438831032705498,0.2549622145536561,-0.8440660139753562,0.9829333875393695,0.8436239998550553,0.5782293867611701,-0.8331749847726174,-0.492851227569807,-0.8440191218548333,-0.9978235483589577,-0.8440454443793998,-0.35128675486927924,0.8440218067150185,0.7003445103250038,0.8431008456292193,0.9430552931961209,-0.8438551924039441,0.10105883829224695,-0.8429203852908149,-0.8574696206555485,0.8440379096782032,-0.8288927676441116,0.8414630379560901,0.15583608305980468,-0.8440568561612632,0.9598266592786855,0.8409530906161536
104,-0.8270075607814866,-0.852076143108128,0.10769049490852028,-0.8522608296450199,0.927876444743412,0.8518145241254926,0.7618041070624366,-0.8412640622946818,-0.21403983973403418,-0.852213482261191,-0.9641789025616748,-0.8522400603442482,-0.6877683459854814,0.8522161931879798,0.3189040293337652,0.8512862907324157,0.985651593376013,-0.8520479612622349,0.6056467240158131,-0.8511040783518908,-0.4181731491058256,0.852232452490613,-0.9988311369934331,0.8496325820139163,-0.5165079658975976,-0.8522515829201105,0.5134662383539936,0.8491176837289318
105,-0.878825541163884,-0.8602691829457061,-0.041999721085329136,-0.8604556453146835,0.8358279961789783,0.8600050483959302,0.8980135377247918,-0.8493531398167461,0.08389108952076789,-0.8604078426675486,-0.8136231543801795,-0.8604346763090968,-0.9156664379249367,0.860410579660941,-0.1260320920406456,0.859471735835612,0.786926007696378,-0.8602407301205256,0.9315985295647208,-0.8592877714129667,0.16720323434360296,0.8604269953030227,-0.7614138036494987,0.8578021260717424,-0.945930247833764,-0.8604463096789576,-0.20843159698642683,0.85728227684171
106,-0.921862587240073,-0.8684622227832842,-0.1907467138896283,-0.8686504609843472,0.7104577230146554,0.8681955726663676,0.978388836069306,-0.8574422173388102,0.37432827759741877,-0.8686022030739062,-0.5644118769694264,-0.8686292922739453,-0.9990009329949663,0.8686049661339024,-0.5458744934966553,0.8676571809388083,0.39553349032868806,-0.8684334989788163,0.9827744583211934,-0.8674714644740427,0.6941707175695545,0.8686215381154325,-0.21346724916051735,0.8659716701295685,-0.9304667536458673,-0.8686410364378048,-0.8184803972271192,0.8654468699544882
107,-0.9556886870718162,-0.8766552626208624,-0.3352099467254821,-0.8768452766540109,0.5567637424127302,0.876386096936805,0.9979326553363491,-0.8655312948608744,0.6313278354799385,-0.8767965634802638,-0.2467630776839223,-0.8768239082387939,-0.9246151467833709,0.8767993526068638,-0.8570301197939464,0.8758426260420045,-0.09269942018419589,-0.8766262678371071,0.7440801211804074,-0.8756551575351185,0.9786443977315177,0.8768160809278422,0.4215381664944424,0.8741412141873947,-0.47739020627423745,-0.8768357631966521,-0.989314395098586,0.8736114630672663
108,-0.9799658614510169,-0.8848483024584405,-0.47214508682764667,-0.8850400923236745,0.3808733483703954,0.8845766212072423,0.9554298555028464,-0.8736203723829388,0.8319327580719243,-0.8849909238866215,0.10080687354039042,-0.8850185242036425,-0.7042529593378598,0.8849937390798251,-0.9975460824982016,0.8840280711452009,-0.5582362796306857,-0.8848190366953977,0.2859186410454222,-0.8838388505961944,0.9212494339897546,0.8850106237402519,0.8846266587931284,0.8823107582452206,0.2002104145358934,-0.8850304899554993,-0.6292602641934263,0.8817760561800446
109,-0.9944515408762445,-0.8930413422960187,-0.5984768661640778,-0.8932349079933382,0.18979873584388987,0.8927671454776799,0.8535230548034795,-0.8817094499050031,0.9582236050900936,-0.893185284292979,0.4361535302265275,-0.893213140168491,-0.3727047147310378,0.8931881255527864,-0.9394448391035939,0.8922135162483973,-0.8870974286526992,-0.8930118055536885,-0.2565748155701097,-0.8920225436572703,0.5420355384427306,0.8932051665526617,0.9869357351651403,0.8904803023030468,0.7836489490157829,-0.8932252167143465,0.06846893322872644,0.8899406492928228
110,-0.9990009892270116,-0.9012343821335969,-0.7113681453260299,-0.9014297236630018,-0.00884255333616546,0.9009576697481173,0.6985483245281671,-0.8897985274270672,0.9989191912961758,-0.9013796446993365,0.7186145762733179,-0.9014077561333395,0.01768540929839667,0.9013825120257478,-0.6942946838838123,0.9003989613515935,-0.9987661885361102,-0.9012045744119791,-0.7233912850782432,-0.900206236718346,-0.026526965142686314,0.9013997093650714,0.686740439155011,0.898649846360873,0.9985251379209477,-0.9014199434731938,0.7294561768076743,0.898105242405601
111,-0.9935687499192448,-0.9094274219711751,-0.8082836295626861,-0.9096245393326654,-0.20713131781899857,0.9091481940185547,0.5001412427975199,-0.8978876049491313,0.9503843011760645,-0.9095740051056942,0.9139403177845447,-0.909602372098188,0.405283396066496,0.9095768984987092,-0.3109064334604978,0.9085844064547897,-0.865902152277306,-0.9093973432702699,-0.9768428035765745,-0.9083899297794219,-0.585822836618184,0.9095942521774812,0.10647013967439092,0.9068193904186991,0.7437793520762195,-0.909614670232041,0.9990009965741318,0.9062698355183791
112,-0.9782091000924481,-0.9176204618087531,-0.8870468060354797,-0.9178193550023291,-0.39716241030105964,0.9173387182889923,0.2706378009844662,-0.9059766824711957,0.8169544119150898,-0.9177683655120518,0.9984466151224077,-0.9177969880630366,0.7288960459693473,0.9177712849716705,0.1343850896591849,0.9167698515579861,-0.5210350697477014,-0.9175901121285607,-0.9421736034145309,-0.9165736228404978,-0.9404739370341241,0.9177887949898909,-0.5172221327069243,0.9149889344765252,0.1392225150770804,-0.9178093969908881,0.7324596415666184,0.9144344286311573
113,-0.9530755082904647,-0.9258135016463314,-0.9458888236041615,-0.9260141706719928,-0.5713598908028816,0.9255292425594296,0.02430741151908543,-0.91406575999326,0.6105484181332109,-0.9259627259184094,0.9618866931770942,-0.9259916040278852,0.9374320371841065,0.9259656714446319,0.5529197626265396,0.9249552966611825,-0.04860043041052775,-0.9257828809868514,-0.6296093983195374,-0.9247573159015737,-0.9665904336389802,0.9259833378023007,-0.9299744598723458,0.9231584785343514,-0.5308128461746043,-0.9260041237497354,0.07286413669307466,0.9225990217439355
114,-0.9184191010545203,-0.9340065414839094,-0.9834882174100049,-0.9342089863416565,-0.7227790554887887,0.933719766829867,-0.22353429506351763,-0.9221548375153242,0.34960395252605325,-0.934157086324767,0.8086936097207147,-0.9341862199927337,0.9979681219885344,0.9341600579175932,0.8613649065220053,0.9331407417643787,0.43573328929041,-0.933975649845142,-0.13134129935853708,-0.9329410089626496,-0.6550490827923521,0.9341778806147104,-0.9634530684306402,0.9313280225921775,-0.9511986316908706,-0.9341988505085826,-0.6258318860497718,0.9307636148567138
115,-0.8745861537498308,-0.9421995813214876,-0.9990005861276121,-0.94240380201132,-0.8453833000935627,0.9419102911003044,-0.4574777218498348,-0.9302439150373883,0.057430407048144685,-0.9423514467311247,0.5574427268742794,-0.9423808359575822,0.9009469836597984,0.9423544443905545,0.9983073056655124,0.941326186867575,0.8133843030433217,-0.9421684187034328,0.4056660413949089,-0.9411247020237254,-0.11468024144592093,0.94237242342712,-0.6040042970076678,0.9394975666500036,-0.9242208378343082,-0.9423935772674298,-0.9886925863109296,0.938928207969492
116,-0.822014630696619,-0.9503926211590659,-0.9920775553982699,-0.9505986176809836,-0.9342847802848783,0.950100815370742,-0.6629773996488669,-0.9383329925594527,-0.2398732256491951,-0.9505458071374823,0.23859936348114957,-0.9505754519224306,0.6616861266386566,0.9505488308635158,0.9364809347660296,0.9495116319707713,0.9918904716419369,-0.9503611875617235,0.8230217954704712,-0.9493083950848014,0.4657497076088322,0.9505669662395299,0.0017769982270118311,0.9476671107078298,-0.4625675425953155,-0.950588304026277,-0.8209988342337814,0.9470928010822701
117,-0.7612298091753971,-0.9585856609966439,-0.9628746015669893,-0.9587934333506474,-0.9859392745923515,0.9582913396411793,-0.8272563538165842,-0.946422070081517,-0.5157496975020239,-0.9587401675438398,-0.10917526416038911,-0.9587700678872793,0.31795957937964214,0.9587432173364773,0.6881957825716858,0.9576970770739677,0.9275472593930406,-0.9585539564200142,0.9976264842611329,-0.9574920881458773,0.8834798840926075,0.9587615090519396,0.6068335760048055,0.9558366547656557,0.21663849574348615,-0.9587830307851243,-0.21273883042355266,0.9552573941950483
118,-0.6928390310300134,-0.9667787008342221,-0.9120475600189895,-0.9669882490203111,-0.9982874813331263,0.9664818639116167,-0.9401005146550224,-0.9545111476035812,-0.7455557849091524,-0.9669345279501975,-0.44371189162149094,-0.9669646838521279,-0.07596579416635837,0.9669376038094386,0.3028868617699809,0.9658825221771639,0.6361081287031403,-0.966746725278305,0.8779802879067796,-0.965675781206953,0.9925851191869678,0.9669560518643494,0.9644037583190236,0.9640061988234819,0.7939560644642447,-0.9669777575439714,0.5096815658374838,0.9634219873078265
119,-0.6175256343087134,-0.9749717406718003,-0.840737896530836,-0.9751830646899747,-0.9708371164714672,0.974672388182054,-0.9944937787950584,-0.9626002251256454,-0.9087635945027708,-0.9751288883565551,-0.7244464225498618,-0.9751592998169764,-0.45789783914980475,0.9751319902823998,-0.14272858852872933,0.9740679672803602,0.18892754306014506,-0.9749394941365956,0.4993729663896582,-0.9738594742680289,0.7549518152961344,0.975150594676759,0.928658838510381,0.972175742881308,0.9978636901617591,-0.9751724843028188,0.9585954872104855,0.9715865804206046
120,-0.5360421255762363,-0.9831647805093785,-0.7505470724099633,-0.9833778803596382,-0.9046825394297735,0.9828629124524917,-0.9870542365219581,-0.9706893026477097,-0.9907942587257301,-0.9833232487629128,-0.9173385109049929,-0.983353915781825,-0.7675378835918268,0.9833263767553613,-0.559925949697151,0.9822534123835565,-0.3045090942023887,-0.9831322629948863,-0.026524888905216166,-0.9820431673291049,0.25359212222226457,0.9833451374891689,0.5141767531159706,0.9803452869391341,0.7324604303259336,-0.9833672110616659,0.8931057296516948,0.9797511735333828
121,-0.44920266111697,-0.9913578203469565,-0.6435005791262448,-0.991572696029302,-0.8024611244318575,0.991053436722929,-0.9182444425414257,-0.978778380169774,-0.9843202226509236,-0.9915176091692702,-0.9989991086265334,-0.9915485317466733,-0.9560005728421136,0.9915207632283226,-0.86563880934501,0.9904388574867529,-0.7233912850782432,-0.9913250318531771,-0.5445992028628601,-0.9902268603901808,-0.3363545950350067,0.9915396803015786,-0.11000327301802088,0.9885148309969604,0.12256958509788746,-0.9915619378205132,0.3483555550166687,0.9879157666461612
122,-0.3578749121544809,-0.9995508601845348,-0.5220024501402831,-0.9997675116989656,-0.6682481167153574,0.9992439609933664,-0.7923426565683493,-0.9868674576918381,-0.8899197926396045,-0.9997119695756278,-0.9595264947002462,-0.999743147711522,-0.9935317921867435,0.999715149701284,-0.9989979654203176,0.9986243025899492,-0.9651620602138787,-0.9995178007114678,-0.9020434613640808,-0.9984105534512567,-0.8088029752641013,0.9997342231139884,-0.6893203999899893,0.9966843750547865,-0.544967651204293,-0.9997566645793603,-0.383329965619578,0.9960803597589393
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/exog_guest_nights_by_region_missing_exog.csv | ,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23
0,0.0,-0.02746317167259738,0.0,-0.007057893525316508,0.0,-0.014159119369999468,0.0,-0.08749370035088656,0.0,-0.04865625779594764,0.0,-0.008568659351486464,0.0,-0.010099001044268438,0.0,0.06541195138945713,0.0,0.01367535330005259,0.0,0.00702884733073994,0.0,0.027210792607960475,0.0,0.2436440609625011
1,0.06661711827247965,-0.05839362741669856,0.0832366491561994,0.030356773267918424,0.0998330429860223,-0.022676823066760606,0.11640181278434966,-0.08353910219117414,0.1329382701053118,0.1146733584393578,0.14943757315012937,0.018127975473701558,0.16589609842745917,-0.025668754114287165,0.18230801406500982,0.14878472563092088,0.19866928975977965,-0.025979754882546776,0.2152732472099785,-0.05469365918347875,0.23122030493460827,0.04385994346356848,0.24740390815307164,0.17328459408252173
2,0.1329382701053118,0.08641101988419007,0.1658956004585934,0.045504541181782765,0.19866858720694477,-0.05871959016947563,0.23122106381895619,-0.19602046063379353,0.2635166923672206,0.17518177469758947,0.2955191005755992,-0.08125760752546007,0.3271946292138714,0.08238020291612126,0.3585055968790339,0.11044374803339405,0.38941826187403455,-0.09740021468319436,0.4204800842020212,-0.18706058979347182,0.4499089604913546,0.05455515643726392,0.47942543957854117,0.14369220782091904
3,0.19866880397813208,0.07569944878855102,0.24740316552292724,0.0415514746130159,0.2955191005755992,-0.15920769477754748,0.3428967073565496,-0.19944875822861768,0.3894173096773208,0.2942784947111525,0.4349639061035205,-0.12424093300579578,0.47942543957854117,0.08178194243293531,0.5226871209692666,0.19454246319638996,0.5646423567677593,-0.09448483074511826,0.6060248170036572,-0.31646084048319356,0.6442135060693922,0.045456356318880335,0.6816386192303977,0.22999701028090566
4,0.2635166923672206,0.08148909254286604,0.32719364707661835,0.036762583842814375,0.3894168847769294,-0.2149331081811167,0.44991043713013074,-0.137780459510568,0.5084052027756657,0.32500192197293243,0.5646403600268997,-0.038875860023334034,0.6183696753450455,0.16624018851145048,0.6693497019958143,0.1577922134993358,0.717355942729148,-0.07889291499163552,0.7632311729318405,-0.2196775313919245,0.8036030480328988,0.030138855532104488,0.8414708110015084,0.3724825361763016
5,0.3271938291649748,0.15808485600062427,0.40471326513915407,0.06466726981444942,0.47942374418967615,-0.29012992984859837,0.5508073287584893,-0.009338350034207608,0.6183681633215211,0.34639528123348473,0.6816362087564646,-0.03435725501354441,0.7401767003120189,0.2353379841232961,0.7935776393349223,0.14218909558628462,0.8414708110015084,-0.10099295151332627,0.8847480125015222,-0.25229027875152843,0.9194390117768044,0.05439873691292068,0.9489844233421858,0.5197881426170552
6,0.3894173096773208,0.1093040070860904,0.4794240004913979,0.0634119947267259,0.5646403600268997,-0.2951048719526334,0.6442156204319115,-0.0628476816067032,0.7173541886666447,0.5918235567629165,0.7833239777578337,-0.028717210622548818,0.8414708110015084,0.3248372017099977,0.8912071759819662,0.13854592261038334,0.9320388934539371,-0.09384739552397232,0.9648930766705357,-0.4549627585367087,0.985443334120646,0.08225754342511524,0.9974947805708058,0.5278870832139627
7,0.44991068751229063,0.02436124338507627,0.5508073287584893,0.055083929897366816,0.6442152760320222,-0.3122591591151877,0.7288653629462123,-0.14320409379407573,0.8036061327442741,0.46362121986640603,0.8674199789650587,-0.03615306301164129,0.919444789341787,0.3084245513757743,0.9589660555042986,0.10596350259458488,0.9854495264431672,-0.09164386217785683,0.9999186954272832,-0.29610089850965643,0.99803872099732,0.07043982387897442,0.9839857436309893,0.7177817042201629
8,0.5084052027756657,0.09721738025908783,0.6183678191902066,0.05733556464541125,0.7173534059482909,-0.2813354916901234,0.8036056855252041,-0.22092658378419633,0.8755928979700968,0.4126099613603387,0.9320355974912465,-0.08000409489538378,0.9719377006088968,0.32738203406528965,0.994583198503197,0.13196944522768964,0.9995733965789168,-0.10498310190874317,0.9881870328998575,-0.3752186596435092,0.9565425291927647,0.07410549162758007,0.9092972390096973,0.5377613391231191
9,0.5646409761170574,-0.03890281288913384,0.681636573161724,0.027793399697221152,0.7833239777578337,-0.3533061173597477,0.8674204426910147,-0.16752851421805906,0.932036614453545,0.3194380099002905,0.9757197058473077,-0.1250112729241256,0.9974947805708058,0.31223221766233356,0.9968648225507883,0.10772011087940857,0.9738474297293235,-0.05435060081226761,0.9302466743354737,-0.4930855459100964,0.8632037641581153,0.06256476784212436,0.7780730361763881,0.655913272795306
10,0.6183681633215211,-0.12378155753976353,0.7401744785300999,0.04217621001610438,0.8414678353141013,-0.2963939906157213,0.9194420294524152,-0.12779305942202387,0.9719353240488453,0.3439375153596155,0.9974912531369146,-0.10012387803902585,0.9954077521495933,0.34811192772181937,0.9657344542843688,0.17756403514764052,0.9092972390096973,-0.002597716402951101,0.8288069736600956,-0.40461895998540864,0.7230811886915083,0.11498446177100384,0.5984720204891397,0.4769077297661318
11,0.6693480653170835,-0.16333808314677709,0.7935752572598723,0.0393910973677675,0.8912040244121957,-0.2990933885011083,0.9589631769842881,-0.20534744714928563,0.9945807665708869,0.2611814223410574,0.9968612973446134,-0.11290087347168024,0.9657344542843688,0.31625472585004094,0.9022354925697829,0.16310994483492183,0.8084962368241235,0.02235773865485984,0.6886113611541567,-0.22622988015017736,0.5437691487400569,0.10814643831108015,0.3816609132200015,0.7250295622631547
12,0.7173541886666447,-0.1548719887252969,0.8414682851659445,0.07834239391218195,0.9320355974912465,-0.25240088438385544,0.985446568427954,-0.16204751289288136,0.9995709524446874,0.23253894232359504,0.9738439859193954,-0.03716586812829837,0.9092972390096973,0.36606272705327847,0.8084962368241235,0.11180584295279966,0.6754630410337846,-0.0018077423554732563,0.5162155355146849,-0.3390232473992746,0.3349859759811476,0.1280249518989127,0.14111997891143627,0.69061499472004
13,0.7621732518327039,-0.10749146273124935,0.883521164203536,0.07749851922729012,0.9635545789701722,-0.227134619825772,0.9985321440790174,-0.02301125778510732,0.9868172986056173,0.2133432880418572,0.9289562380534917,0.11573990086914566,0.8276601811721678,0.31581808850216747,0.6876585526460597,0.1473546008632466,0.5155012653443151,-0.01897728433277572,0.319680912283269,-0.47067635828495935,0.10804727488884226,0.12553141079504324,-0.10819511218233183,0.5001783422199888
14,0.8036061327442741,-0.09745270856158064,0.9194420294524152,0.16168783920818547,0.9854460416048247,-0.16499994847260543,0.9980419966437395,-0.014414062591243804,0.9565462009777684,0.17954378707502774,0.863206135791728,0.09821480307985395,0.7230857323844578,0.256515722817357,0.5437725656731565,0.1087667383838919,0.3349880809638813,-0.03137028783600982,0.10819766332341596,-0.5038099290319056,-0.12474735899242541,0.1296516865263856,-0.3507831552351123,0.6527590458580811
15,0.8414687534561404,-0.08630108281320674,0.9489815747839533,0.16658207045861523,0.9974912531369146,-0.20543791189325872,0.9839827900096002,-0.17383700811973316,0.9092950156166836,0.28688678287081637,0.7780702846820935,-0.07700252789943487,0.5984720204891397,0.1838460879138721,0.3816609132200015,0.17896140060483928,0.14111997891143627,-0.02367782774153826,-0.10834502557034269,-0.33980911606950104,-0.3507809510000156,0.11781909169268824,-0.5715612006859756,0.710230762929154
16,0.8755928979700968,-0.1683787958918241,0.9719347831520129,0.16015407573830234,0.9995698617944301,-0.23081665871083204,0.9565456686452177,-0.12726126841451088,0.8459025198570183,0.5187780000960701,0.6754606523985058,-0.04180270321099697,0.45727253218584873,0.22183060178803446,0.2067571035850584,0.14430496836230883,-0.05837413137036218,-0.024166478393756886,-0.3198213837119108,-0.37062366080128767,-0.5578029605792876,0.12137324462968649,-0.7568023389898726,0.7895574004897215
17,0.9058269595847469,-0.16902460892909563,0.9881423494987066,0.23070555133215268,0.9916610988067627,-0.21253662258257405,0.9161036588614004,-0.18924771160813464,0.7674940227936468,0.3123417814877628,0.5576816300688126,-0.10739618137720164,0.30340040470028906,0.20313360490397186,0.02492340074937531,0.2373013518183809,-0.2555410492446477,-0.020538898905008443,-0.516342547528883,-0.40619563714367873,-0.7345932345595172,0.09895087372797683,-0.8949891733679401,0.753577791336926
18,0.932036614453545,-0.11264313051297044,0.9974917863994587,0.2629510830189659,0.9738439859193954,-0.2627699299344032,0.8632065972649476,-0.14043201977455427,0.6754613894068556,0.261788679779552,0.4273782806180316,0.02639621636541322,0.14111997891143627,0.1501033399592531,-0.15774566156077083,0.24785074084622782,-0.4425203518919624,-0.024926683783861956,-0.688718974534551,-0.5470724378011553,-0.8715701156223173,0.1381452244458354,-0.9775299157556051,0.6636691642532521
19,0.9541054183585924,-0.045204647021192194,0.9999182047737185,0.2719164604831875,0.9462965458347441,-0.1634547351306061,0.7985736553563189,-0.15351085934038766,0.5714383329324836,0.004189561320598864,0.28747693635747484,-0.05182901399217172,-0.025071380616657785,0.16129599131725675,-0.3351275508135408,0.331527427016503,-0.6118577645630672,-0.023931670874707624,-0.828890156288455,-0.5077790532169782,-0.9613097477752932,0.11990477950704977,-0.999292582570792,0.5646195757955813
20,0.9719353240488453,-0.0809405340641556,0.9954047642428613,0.2647380191178602,0.909294023468162,-0.19819335130674062,0.7230835619043475,-0.20089707601163576,0.4572714140734101,0.09835143027300718,0.1411194798698312,-0.05056944898913816,-0.1905679235135467,0.2373306583850281,-0.501276945049239,0.37944749868119915,-0.7568023389898726,-0.05207385767435483,-0.9303015365019949,-0.34785399568795367,-0.9989484335773511,0.1262748821328722,-0.9589240765966958,0.666644711458042
21,0.9854471168443051,-0.04408092769410418,0.9839827900096002,0.3300936140228323,0.863206135791728,-0.1740109647779066,0.6377626560314573,-0.1449486429206976,0.33498726185861305,0.19856648052622228,-0.008407215900141561,-0.12332105335558877,-0.3507831552351123,0.2964439956478614,-0.6506250026781153,0.440761213846951,-0.8715755923890364,-0.026087242791070525,-0.9882110091902089,-0.4152052834986769,-0.9824462362087195,0.09916979794690495,-0.8589343160131047,0.7140841900347635
22,0.9945807665708869,-0.17520621354857277,0.9657315554477149,0.3073280918105685,0.8084933777444749,-0.09550176881645107,0.5437709334358083,-0.16130388679541083,0.20675659802727167,0.22916682322577894,-0.15774510372587924,-0.11325535588457732,-0.501276945049239,0.31160508293487893,-0.7781660137576312,0.5181695734129138,-0.9516018773354787,-0.02496576161388539,-0.9999106646841573,-0.38372656559318274,-0.9126975397134226,0.069321410563614,-0.7055401798405714,0.6571622130888845
23,0.9992956942623932,-0.20237368465181715,0.9407777319110218,0.3240928737356791,0.745702421119178,-0.020819083768218345,0.44238627546146486,-0.33101684243892365,0.07485570355747272,0.2902472387428093,-0.30354037660019967,-0.03915293432881033,-0.6378785777625828,0.34073297855812773,-0.8796251767444788,0.48855952261140023,-0.9936907983859309,-0.10588932533583054,-0.9648534144201797,-0.505032208657691,-0.7934825752839127,0.04385463260925596,-0.508278972513879,0.5630873754943327
24,0.9995709524446874,-0.21888182883006488,0.9092945095801384,0.2883045209049815,0.6754606523985058,-0.021852110054143656,0.33498707543309236,-0.20684100657225868,-0.0583739886352583,0.4203392932138093,-0.44251878701030367,-0.0567156707567736,-0.7568023389898726,0.36923479836852974,-0.9516018773354787,0.39499119090957796,-0.9961644030773823,-0.12032059306790124,-0.8846785733937238,-0.41042857191804605,-0.6312625407602636,0.06350738598296563,-0.27941544048547,0.4131988741508426
25,0.9954053182011033,-0.19397991955498628,0.8715003954487482,0.26384178212475035,0.5984699041166742,-0.04887413849083492,0.22303349846197512,-0.21791232901451,-0.19056745754117643,0.47338528314241196,-0.571559179478056,-0.03452577726577143,-0.854752430689124,0.4420907663062728,-0.99168366710981,0.3788586812915607,-0.9589240765966958,-0.13226479990829104,-0.7631352040144949,-0.4402535455789761,-0.4348294174318781,0.06829607344714897,-0.03317920969436771,0.6114566398864039
26,0.9868172986056173,-0.26617327000070784,0.8276576967918504,0.3019887498111387,0.5154994423807571,0.02051252259408932,0.1080476295090205,-0.25126019611708644,-0.31937807662254736,0.36060542916869276,-0.6877635849832111,0.007072230899562329,-0.9290143093822026,0.3893191043272919,-0.9985271217419511,0.32596222601754904,-0.8834544732420101,-0.1323212106755028,-0.6059068058872905,-0.41620942988372434,-0.21482946332720174,0.07912305668215713,0.2151199436546398,0.8510177964114687
27,0.9738450484996455,-0.28172645738770385,0.7780707006413696,0.26825479102086597,0.4273782806180316,0.003805571022830546,-0.008407220394670538,-0.3105331496366426,-0.44251926985122086,0.4263853202209891,-0.7885223030999569,0.06840982487466114,-0.9775299157556051,0.4345358976629172,-0.9719028686545995,0.2377992247376011,-0.7727643279409716,-0.07898493599868872,-0.4203455492142622,-0.3501661521338785,0.01681379135703285,0.04775432836021079,0.45004398082371677,0.912952339487122
28,0.9565462009777684,-0.27972446603143003,0.7230835619043475,0.2794845026334388,0.33498689634784823,-0.13453057247897232,-0.12474776842367415,-0.3608610723959134,-0.5578051017622284,0.31236710229467113,-0.8715725102422991,0.008289645460680358,-0.9989547107631299,0.4360771302910287,-0.9127032749163861,0.1961983048028075,-0.6312665074837803,-0.07449129381359164,-0.2151284793525788,-0.4388719550421398,0.2475457742649481,0.054276530604369384,0.6569864630177729,0.8329160520844267
29,0.934997611335499,-0.4054742189383086,0.6630779147810708,0.23771498030668,0.23924843374130247,0.0036556374372045397,-0.2393922857642307,-0.442323157252805,-0.6631890809908589,0.28740539118269726,-0.9350490778034435,-0.044342777893656135,-0.9926949375583151,0.4813314717631015,-0.8229125377085427,0.18962050640635372,-0.46460208344987014,-0.11848956077252819,0.00014823122521962477,-0.45191781301930845,0.4648613020352531,0.0675544974471554,0.823080709003606,0.7725790027830468
30,0.9092950156166836,-0.4866980728665873,0.598470224060917,0.24882736619302914,0.1411194798698312,-0.03285598097178348,-0.3507821022923873,-0.4352079345227285,-0.7568004884739346,0.4059779977431539,-0.9775264589233272,-0.12983629940721736,-0.9589240765966958,0.3854707214186822,-0.7055401798405714,0.23701475562867858,-0.27941544048547,-0.10000894457557132,0.21541801035036656,-0.46921270842502427,0.6569823346763795,0.05746263314673422,0.9379997830302238,0.7534117280349918
31,0.8795526052775403,-0.5118802406245294,0.5297088946790524,0.3298756452246315,0.04158050680339868,-0.07970708038017216,-0.4574027977535819,-0.6212081347141821,-0.8369775854368209,0.44960574189980224,-0.9980507031977979,-0.10746362627203652,-0.8985780378788037,0.40648842505730326,-0.5645201793506656,0.33080812329563913,-0.08308938565532534,-0.11120979473953269,0.4206146099763294,-0.35101415793754276,0.8134963203155482,0.062141636981296274,0.994598573676141,0.7848618163058243
32,0.8459025198570183,-0.503875277800938,0.45727115959489356,0.3304051297180357,-0.05837392494232528,-0.11250380088297358,-0.5578047913351813,-0.6712641792122447,-0.9022971116760534,0.4557221897227917,-0.9961608803480958,-0.29783659587158684,-0.8133292235738566,0.41081514979839323,-0.40457911407154984,0.34925274277647944,0.11654918077717834,-0.1535473624878327,0.6061428148409641,-0.5082727519552203,0.9259205333923181,0.05544743878647134,0.9893580422707821,0.537503657279199
33,0.8084942599074373,-0.5718707456920367,0.3816597675917808,0.34289784848716953,-0.15774510372587924,-0.0587276181862525,-0.6506230497027428,-0.6831305202539154,-0.9515995505001217,0.6031215802319095,-0.9718994317212093,-0.30968985455403764,-0.7055401798405714,0.3705955218316874,-0.23107774057012234,0.47130585841099726,0.31154129916430334,-0.18824706178276376,0.7633271251254613,-0.4283163282445873,0.9881618203652709,0.04719771911842727,0.9226040196748309,0.6851307073815349
34,0.7674940227936468,-0.6570170423410816,0.30339949398595134,0.28177756492025946,-0.25554014557660326,-0.11992769323395744,-0.7345956455558522,-0.754918527233537,-0.9840097118389171,0.565922120086364,-0.9258112171487184,-0.3343666831947572,-0.5781981223170799,0.30490096742081396,-0.04983131727135926,0.514049021717953,0.4941132490791689,-0.1835900460139457,0.8848174322229501,-0.20483100072709196,0.9968468356911052,0.07667635942183935,0.7984869476954495,0.5933998351597874
35,0.7230839643116026,-0.5970227670000505,0.22303349846197512,0.20773962247789202,-0.35078191476306736,-0.07115556207879387,-0.8085809143873596,-0.6653991044461588,-0.998952268141697,0.6600930182805015,-0.8589312785696256,-0.4456846749801294,-0.4348321498101907,0.23355873290347698,0.13308530599033094,0.5879046966935789,0.6569864630177729,-0.21249674461609228,0.9649327177784034,-0.4052416724537488,0.9515048699875486,0.09412632624342786,0.6247238247170469,0.5955038742638565
36,0.6754613894068556,-0.5874007583826399,0.14111955531283146,0.1984309501338832,-0.44251878701030367,-0.09239052406983225,-0.8715729761882137,-0.7439469909747064,-0.9961619672787466,0.5868257897774732,-0.7727615952198251,-0.4000373223836527,-0.27941544048547,0.2908924926809972,0.31154129916430334,0.5525185181834922,0.7936676999165313,-0.20899180115099678,0.9999267042604486,-0.3533431748471735,0.8545933614820114,0.13726763976756845,0.41211840011841105,0.3259218243065448
37,0.6248378755884137,-0.641973589033756,0.05822618224837239,0.15265504605667682,-0.529834157814996,-0.10654507746151437,-0.9227154110697628,-0.7944490955728652,-0.9756883411528704,0.45438117266138056,-0.6692373524165389,-0.34910955372547253,-0.11625514104764849,0.2154553173251945,0.4795553394699432,0.4686390936839719,0.8987079101828758,-0.2125481999767755,0.9881630349566065,-0.2863188338367578,0.711364708079664,0.1516748759610729,0.17388944946344542,0.09198360175948733
38,0.5714383329324836,-0.7567854515571073,-0.025071305360114156,0.19249117543385022,-0.6118556008546875,-0.1374556969245217,-0.9613129028741557,-0.7102147799338546,-0.9378948264360029,0.5211912634973803,-0.5506834814681028,-0.3846597488494382,0.0501269995284057,0.233356830350503,0.631496087836649,0.5642277889877553,0.9679194721070383,-0.20313336809244087,0.9301917917856273,-0.30002358958127845,0.52958159855392,0.16929565344098835,-0.07515110493929571,0.14068339267663424
39,0.515500004852297,-0.7454765099093049,-0.10819478741401113,0.27649590443690614,-0.6877635849832111,-0.10322373659505071,-0.9868406924700296,-0.8163235756917707,-0.883452313039142,0.31022470103545724,-0.4197624467290389,-0.5015428042448056,0.2151199436546398,0.1476638485572891,0.7622709349138133,0.44902404966719955,0.9985431391248171,-0.1653392729075555,0.8287237728711316,-0.3674563381664206,0.31909629130413975,0.1938063426842304,-0.31951912762537305,0.12600304941546492
40,0.4572714140734101,-0.8038221537316022,-0.19056735148747966,0.28907656980120305,-0.7567996627149973,-0.1120335910183939,-0.9989517122095232,-0.8412669726308702,-0.8133272348396607,0.16671750102521368,-0.279414452390581,-0.4728183709223925,0.374151153290036,0.08204501595154011,0.867496690436754,0.6424191247461458,0.9893580422707821,-0.19865662476453644,0.6885037326850879,-0.3639223267114612,0.09131664287817737,0.17752479330468524,-0.5440209985214498,0.26972662657026014
41,0.39701125849728375,-0.8749078094902892,-0.27161729684580566,0.2356039762795395,-0.8182740483816434,-0.16868715913709328,-0.9974813045488092,-0.863353526743569,-0.728764414072919,0.22850150327964341,-0.13279141183324866,-0.48377933399166423,0.5228133111422222,0.13187538928241327,0.9436464949404428,0.7269314848359838,0.940730362371255,-0.15130648817056905,0.5160885121893056,-0.35600327945060084,-0.1414121736203554,0.17678174739765315,-0.734698278652349,0.4408443177930563
42,0.33498726185861305,-0.8169439520381667,-0.3507821022923873,0.14639555889422715,-0.8715725102422991,-0.2548831662329098,-0.9824494606793018,-0.9463245050196543,-0.6312649639252141,0.32835981179907586,0.016813837552559784,-0.3758232426917584,0.6569864630177729,0.0976800310457925,0.9881680297701986,0.7474350132141053,0.8545987315703112,-0.16644296499199246,0.3195404338498622,-0.3751700489450812,-0.36647675069115965,0.16465222964561166,-0.8796955782699294,0.13757903845252992
43,0.2714749842826905,-0.8080333618750497,-0.42751233030580743,0.14740883584282755,-0.9161625076841808,-0.13782391428429158,-0.9540605487338251,-0.9999845285483123,-0.522559641589201,0.18175937979285645,0.1660414843954216,-0.5216809293294343,0.772952196324663,0.16751228842831362,0.9995690633356505,0.6020398052628806,0.7343969461839074,-0.15962455021469577,0.10805029870568993,-0.378447686595194,-0.5716790663308193,0.17880963468253838,-0.9699976675669778,0.36154973153002856
44,0.20675659802727167,-0.8220823849154185,-0.5012754403706232,0.13297748318130478,-0.9515985121923437,-0.0808684049336967,-0.912700535262959,-0.9269805064194142,-0.4045781248038636,0.08372887781588584,0.3115401974629625,-0.5656101908220632,0.867496690436754,0.13875940878369386,0.9774674663478088,0.6692735191346982,0.5849170720767289,-0.157752449835368,-0.10849238544324119,-0.23314518141875534,-0.7458975909148143,0.1598202578634686,-0.9999900000020655,0.5194601110964426
45,0.14111963384806014,-0.9353943783477018,-0.5715594850357251,0.11464645577439447,-0.9775264589233272,-0.06356612301941582,-0.8589317377574907,-0.8756233225692146,-0.2794147572651636,0.0991667701525068,0.45004238933630103,-0.6461607434263958,0.9379997830302238,0.1678984280049077,0.9226040196748309,0.721759764022878,0.41211840011841105,-0.23127035608467914,-0.31996184813270867,-0.29943401784026696,-0.8796900504792157,0.18010852347007422,-0.9678077976098799,0.6114857677100735
46,0.07485570355747272,-0.8491012008193805,-0.6378766630480424,0.12220632742832672,-0.9936872844040355,-0.08987481934800527,-0.7934851795599813,-0.766908416393876,-0.14929137097622078,0.15237122391952754,0.5784375993789701,-0.6117991310087655,0.9825075840487404,0.1793407514530167,0.8368175855346323,0.8065341132813423,0.22288986806218858,-0.2050192454234533,-0.5164695482291165,-0.5497977656103833,-0.965805177912614,0.2083546025546788,-0.8754519938632033,0.5260905450315775
47,0.00825920445208658,-0.9131232777065567,-0.6997667049013352,0.1119181648525825,-0.9999195150083239,-0.16855399941805094,-0.7172506511056325,-0.6890468403636137,-0.016517845494420102,0.20637745713656913,0.6938423479770602,-0.5324137632326444,0.9997866249075904,0.13277955860697319,0.7229834742133202,0.7634180682540304,0.024775420335977266,-0.23869510222548215,-0.688826572823913,-0.5699829507821744,-0.9995757162306776,0.17497996963546517,-0.7286648253209377,0.57118989504468
48,-0.0583739886352583,-0.7656640341842935,-0.7568000673029196,0.08237671474894245,-0.9961608803480958,-0.17849496304311105,-0.6312646126166332,-0.5787484152767336,0.11654889579376129,0.24254626136297167,0.7936648932749352,-0.4746627991978376,0.9893580422707821,0.16599498765525916,0.5849170720767289,0.6575433639089356,-0.17432674521566777,-0.25279639610980154,-0.8289733207543877,-0.6048477471997559,-0.979171373990692,0.1843099772803257,-0.5365728071709441,0.7022343562033321
49,-0.12474783784774864,-0.6523912637263265,-0.8085809143873596,0.11289031949066372,-0.9824489354584368,-0.09422067788346132,-0.5366961027210728,-0.5230271726105937,0.24754672449437742,0.22885376178056327,0.8756634361098425,-0.5559903758498261,0.9515108490477658,0.0878741419329197,0.4272459609821542,0.7658523118797833,-0.36647905355542204,-0.2815461084215164,-0.9303563782839893,-0.40926665882610663,-0.905698023199917,0.22555259532572558,-0.3111192907192189,0.7834427676018797
50,-0.19056745754117643,-0.5079685781093141,-0.8547498649862385,0.20540138952201537,-0.9589206855601181,-0.15812036703627605,-0.4348308445783347,-0.47668282983117616,0.3741502384241103,0.06603701772245002,0.9379964659880807,-0.5871835792546191,0.88729392482351,0.15617929072057038,0.255254815273456,0.6429011797594453,-0.5440209985214498,-0.28464213620892975,-0.9882349638271357,-0.48978539468110077,-0.7831377634018479,0.2616275804165923,-0.06632188365236895,0.6870332826712016
51,-0.255540424401462,-0.5361299856209372,-0.8949864868867765,0.18584982467761785,-0.9258112171487184,-0.16292832397479162,-0.32705376559898175,-0.5448814701734853,0.4941120408846437,0.06728394139525266,0.9792641174406228,-0.5362334822119769,0.7984869476954495,0.09426598510640612,0.07470827495848803,0.6663392009191567,-0.699874543033963,-0.2936110117135072,-0.9999026120312465,-0.6774045581131184,-0.6181331003395811,0.2872425238055302,0.18259909691516205,0.4485153982970888
52,-0.31937807662254736,-0.5837206065186612,-0.9290115207680862,0.16796268978505094,-0.8834513490880687,-0.23302537835877535,-0.21483016841543884,-0.5302503604278089,0.6053026352261208,-0.00728647100318081,0.9985396079836227,-0.46815864645736405,0.6875510730989038,0.10402492769864827,-0.10834226815830302,0.7271120776213198,-0.8278262980975494,-0.2892939797126546,-0.9648137310282042,-0.6967073712994301,-0.4196269362543102,0.3154831910833612,0.4201669500408618,0.3806559172429574
53,-0.38179679637265995,-0.4690412138964364,-0.9565888183799907,0.08968142782436918,-0.8322643271775211,-0.08131980340344482,-0.09968580909690142,-0.5341209668481497,0.7057482265130745,-0.09469785320628528,0.9953900516549766,-0.3532490025955926,0.557560726485169,0.11859388455322865,-0.28776149542454066,0.7443057254586575,-0.9227752310129332,-0.28029118932997565,-0.8846091149010771,-0.7283693753040252,-0.19837788358002445,0.27551833230971645,0.6316108572585717,0.20448297267031548
54,-0.44251926985122086,-0.5433102151684175,-0.977526981512618,0.09669995568007064,-0.7727615952198251,-0.09581167064225259,0.016813846541300047,-0.5009801804741575,0.7936657592582477,-0.21224328831597916,0.9698861806997854,-0.46656852911409236,0.41211840011841105,0.12429039573954802,-0.4575357992709801,0.6226110276352584,-0.9809360274534649,-0.19516560115450038,-0.7630392183755283,-0.561590188820998,0.03362282899735623,0.27781381119351617,0.8037842605294165,0.550661136721583
55,-0.5012757193382068,-0.6879107881674135,-0.9916806903816313,0.07749476779922461,-0.7055376848437148,-0.07648551692613569,0.1330849065093335,-0.6115855993325966,0.867494569253495,-0.22830337562144812,0.9226007570766193,-0.44038436629766664,0.255254815273456,0.10829849131601803,-0.6119748417431544,0.6694395020494619,-0.9999900000020655,-0.20664153590721213,-0.6057887814944514,-0.5784987804587369,0.2638012556634479,0.29363231381560684,0.9259822515463416,0.6253838386849155
56,-0.5578051017622284,-0.737157203994413,-0.9989517122095232,0.07015202763956879,-0.6312642751403974,-0.08733306588028837,0.24754658673084953,-0.5118915959373198,0.9259240876317014,-0.1431742304848978,0.8545957094587294,-0.3940183651788089,0.09131721669311393,0.1545065494414948,-0.7459022779813261,0.7529089307136472,-0.9791775269015103,-0.20976449155106006,-0.4202110050159997,-0.47490539140551946,0.4796822146294197,0.2651286078509319,0.9906071510842663,0.619054219511175
57,-0.6118562684623196,-0.7595854393989256,-0.9992895830029984,0.08248116894654765,-0.5506834814681028,-0.12808440657889103,0.3586427028767836,-0.49823055072918093,0.967917105372268,-0.13082531556713486,0.7673982846054301,-0.31303080377335313,-0.07515110493929571,0.1464576413375254,-0.8548292521556993,0.8010841156336324,-0.9193283357767612,-0.1488037194499353,-0.21498370678133918,-0.4325140927773877,0.669565416485643,0.26455147785133115,0.9936408958955364,0.2937997370244784
58,-0.6631890809908589,-0.8016739474932879,-0.9926919577946147,0.10292765215750962,-0.46460044048073684,-0.10853460481389234,0.46486282774886273,-0.46959222073148593,0.9927281852713374,-0.15759295293669015,0.6629667486924334,-0.34067671576111597,-0.2395367236538371,0.08824047786416422,-0.9351048507807446,0.8564878506137047,-0.8228284250129188,-0.19882595977122058,0.00029646244719124513,-0.26201860140331085,0.823159595034361,0.2807426517251061,0.9348948624214914,0.6207361817318204
59,-0.7115754780003251,-0.892101863563142,-0.9792046269178578,0.07480715224332651,-0.37387526546857375,-0.1448246642811204,0.5647628265638364,-0.5738305893618623,0.9999168945374642,-0.17281667045546822,0.5436464088753101,-0.2598833493393143,-0.39728393254468647,0.09548814630460115,-0.9840384706537116,0.9035027171857681,-0.6935249415290577,-0.22515978877574455,0.21556276877057062,-0.2922873605552811,0.932140272057291,0.26229757364983214,0.8180215944917583,0.49439333443520733
60,-0.7568004884739346,-0.9223558574338026,-0.9589211982026931,0.1390456376724821,-0.279414452390581,-0.15344837536581027,0.6569844909472605,-0.5723420883706045,0.9893556231149057,-0.25353585439795173,0.41211694274696403,-0.26644788218508997,-0.5440209985214498,0.1844627175706323,-0.9999900000020655,0.9999859918073343,-0.5365728071709441,-0.24113082325426516,0.42074912653423857,-0.059486813387637213,0.9906009263525001,0.2670904167817131,0.6502877058397336,0.5703964095999233
61,-0.7986631867985713,-0.9999849682235472,-0.9319824472972914,0.20374538720638635,-0.18216182246643972,-0.18739900291242914,0.7402740046993728,-0.5710917948522494,0.9612318489479014,-0.07953656249269177,0.27133221855593354,-0.16241566092230295,-0.6756813184953772,0.27645788006987887,-0.9824247902316056,0.8931276414540231,-0.35822920824433374,-0.277264690711573,0.6062607993966257,-0.13053433186011273,0.9953731146371415,0.28317207149668555,0.4421220772559133,0.484002250842023
62,-0.8369775854368209,-0.9598910011075292,-0.898575340624977,0.22251967041904414,-0.0830890918269057,-0.265507180797345,0.8134989902783991,-0.5083513557642414,0.9160448099742079,-0.2393302887972379,0.12445395769369237,-0.13668106563171853,-0.7886161193340099,0.18319536095878391,-0.9319315757831196,0.8289133436196935,-0.16560414124265752,-0.21421819476742096,0.7634230605932549,-0.17251690228979205,0.9461981944430928,0.34337221151748365,0.20646743929180175,0.4394432788193297
63,-0.8715734612321387,-0.8884657669889602,-0.8589317377574907,0.2723682098589931,0.016813837552559784,-0.3369410542934259,0.8756639042427814,-0.42500634209721366,0.8545966419248491,-0.2695508352166868,-0.025219270751534132,-0.17602685563477913,-0.8796955782699294,0.20739027680012664,-0.8502027414763919,0.6798903833052802,0.03362304027627407,-0.26754277458807085,0.884886832556487,-0.19777028461354473,0.8457413419893308,0.35231780791195405,-0.042024344038682965,0.33807427782072297
64,-0.9022971116760534,-0.9172086712685874,-0.8133267822106491,0.25032787754789093,0.11654876862511672,-0.34945206579421517,0.9259235723408239,-0.3885734963687396,0.7779781392470327,-0.4672202141037717,-0.17432612874520476,-0.07116184641755208,-0.9463955613594388,0.2258725917529856,-0.7399775987224757,0.7729473647357156,0.23150977728303157,-0.29361042732436055,0.9649723377429144,-0.19015285267461804,0.6994471052921291,0.3417889450074968,-0.287903257198445,0.452636910748443
65,-0.9290120377774551,-0.8462840309848875,-0.76207699183097,0.308732779697949,0.2151191829274714,-0.3867928470852793,0.9635946802290106,-0.4037150173161997,0.6875493919145926,-0.39617490578450215,-0.31951799771209266,-0.13658811391163736,-0.9868675761616822,0.2291101809632378,-0.604950571819621,0.7449234440554018,0.4201669500408618,-0.2838498939952181,0.9999346911834778,0.013841376496749472,0.5152443212276875,0.36835334100302025,-0.515881740262373,0.1350075527187752
66,-0.9515995505001217,-0.8651875291325607,-0.7055380620267925,0.31520135640988595,0.3115401974629625,-0.4365306973335728,0.9881650635948778,-0.47812892700730086,0.5849156418507524,-0.3696189686974637,-0.45753418129029866,-0.05197396824476862,-0.9999900000020655,0.2294308591472208,-0.4496473716596205,0.8320115228485158,0.5920733924140472,-0.30168937090402514,0.988139015360982,0.17694679792553278,0.3031163894155759,0.3750061086179097,-0.7117851953493901,0.019805396154623078
67,-0.9699592980742013,-0.7905302867925699,-0.6441023970904011,0.272737524050207,0.4048484053269021,-0.387641166255108,0.999300671378899,-0.41345830409903694,0.47189878754687226,-0.49107510232691576,-0.5852751335418936,-0.1554769899161486,-0.9853991641036104,0.18400428823287104,-0.27927330713963194,0.9034502025459354,0.7403757370273192,-0.25136271823692763,0.9301368888536573,0.17049841977321042,0.07456019316573098,0.391071387768644,-0.8634332944651512,0.01774510522033419
68,-0.9840097118389171,-0.7424247203880844,-0.5781963867448399,0.2846552841198958,0.49411150174990476,-0.41465459823078377,0.9968501074256576,-0.34163545768527503,0.3505050427116589,-0.4291470767505771,-0.6998720680724586,-0.3130809124442334,-0.9434994321350132,0.2297506389497506,-0.09953881843717356,0.8922517889161251,0.859161637396055,-0.24732615339055192,0.8286405539233858,0.2067636894133297,-0.15803700717570207,0.3935374949638586,-0.9613972933022695,-0.1495169370454631
69,-0.9936883686356974,-0.7019699690877867,-0.5082774468173386,0.3729945962360282,0.5784375993789701,-0.3448903312591691,0.9808466888174643,-0.31285151635137604,0.22288932305693154,-0.5383785357928811,-0.7987513847887721,-0.35646260268353464,-0.8754519938632033,0.19396347146167583,0.0835319202375313,0.8239925806407276,0.9436954745231406,-0.277690372190899,0.6883960891297026,0.37251767526876245,-0.38206893742455966,0.3919272085482479,-0.999586264893971,0.044861376551148316
70,-0.998952268141697,-0.8062979117986191,-0.4348308445783347,0.40834873588936654,0.6569841397210892,-0.3415318887526639,0.9515079929059833,-0.3194786169314604,0.09131699340632397,-0.48495199343672557,-0.8796924674085422,-0.33746635269403114,-0.7831426844779176,0.06629353420159725,0.2638029133360913,0.757779297371044,0.9906071510842663,-0.33757335207111727,0.5159614775555277,0.4446278617880872,-0.5853935428416657,0.3574890499293087,-0.9756258039519613,0.019972445485279395
71,-0.9997780239115723,-0.778197921004047,-0.3583663307796047,0.4559064603262504,0.7289663117091982,-0.3793337552717201,0.9092328992002714,-0.1880336080351496,-0.04187634582854418,-0.6112507620734513,-0.9408775537147501,-0.3179668752937279,-0.6691297214286311,0.06852414569757703,0.4352320045905824,0.72523702201256,0.998026446573297,-0.28007330649650575,0.3193999484147674,0.510950121467294,-0.7569910617080028,0.37621384765734733,-0.8910056558870083,-0.14238963619029532
72,-0.9961619672787466,-0.9280684245355669,-0.2794146017665894,0.4234250495276733,0.7936648932749352,-0.37614049911379,0.854596166328783,-0.1932051849796462,-0.1743263189558588,-0.530060574042246,-0.9809325585761778,-0.35373004451927864,-0.5365728071709441,0.09038972717837572,0.5920733924140472,0.6977683938457208,0.9656575770920255,-0.25267224577515895,0.10790293172039359,0.5916541590372538,-0.8875612730020958,0.3419752137019912,-0.750987091654764,-0.16262461833939862
73,-0.9881201636545577,-0.8779767350494935,-0.19852361621935746,0.4720849196708154,0.8504334375778019,-0.4509081426629696,0.7883406178052164,-0.09797750493963203,-0.30368174611450327,-0.6162949652890368,-0.9989579329371041,-0.4977091444121057,-0.38914556263605904,0.06871000288420166,0.7290702123591435,0.7339339716780069,0.8947909873207962,-0.27799084295669496,-0.10863974293888247,0.49847557390822966,-0.9700275486242032,0.34331186651522505,-0.5642757874102943,-0.28292226175235763
74,-0.9756883411528704,-0.9430808572785799,-0.11625479208560974,0.5190028559175055,0.8987047320883108,-0.4276770455650107,0.7113670428380355,-0.12513417626404552,-0.42764638013975387,-0.5452099206464335,-0.9945488657497844,-0.49925061928234266,-0.2309337182016461,0.06337581469053871,0.841630731834346,0.7198858553708735,0.7882519045613308,-0.23993438371903913,-0.3201023055425866,0.4059193139990852,-0.9999203916074106,0.33348647306255896,-0.3424805477300144,-0.4536670191773511
75,-0.9589217318572629,-0.947150808252016,-0.03317911010062455,0.5257381824854882,0.9379964659880807,-0.3733158874090307,0.6247219494890134,-0.06285620426985472,-0.544019668293626,-0.46069082187063426,-0.9678043751578611,-0.5834440554866966,-0.06632188365236895,-0.024341078302173667,0.9259822515463416,0.7686964196077406,0.6502877058397336,-0.2399726021719177,-0.516596537612602,0.5495674803109192,-0.9756196733592968,0.3280989487164913,-0.09939152636945847,-0.39967895046742047
76,-0.9378948264360029,-0.9615484538168197,0.050126849062630215,0.5780227339219672,0.9679160492601058,-0.37047700950867873,0.529583336684947,-0.07418153782307127,-0.6507358152798379,-0.5493408059339998,-0.9193250847623279,-0.4434953792855477,0.10012796486507118,-0.11862277238300427,0.9792975553425121,0.7674734166511675,0.486398588387829,-0.18831443220412133,-0.6889341560198833,0.35237052813961556,-0.8984424411902245,0.36771649349579816,0.14987717870571562,-0.5151568473794634
77,-0.9127010431950106,-0.8781926856622723,0.1330849065093335,0.5214848949424539,0.9881645353184317,-0.3824804670525291,0.4272446785216949,-0.18986589953287736,-0.7459004541179705,-0.5303012517473864,-0.8501997349103194,-0.34214432154232277,0.2638029133360913,-0.09154432069929787,0.9997896702247545,0.6976740628030432,0.30311829413640534,-0.20149498884189565,-0.8290564670560703,0.5416791807883498,-0.7725715371467465,0.38671301287958887,0.38982724672728647,-0.4619710991241565
78,-0.883452313039142,-0.8431511050236952,0.2151192979309889,0.5325098391558969,0.9985396079836227,-0.42720581310209205,0.31909733860481265,-0.22589730797969368,-0.8278242739154351,-0.6818218268856214,-0.761980731934101,-0.33401798741842376,0.4201669500408618,-0.13243847890535898,0.986771760456212,0.6959372399978908,0.10775363004285596,-0.2836021594164605,-0.9304111996802535,0.7351743158521509,-0.6048288968577106,0.3777973083944079,0.6055397446449408,-0.5649667779054948
79,-0.8502785821857195,-0.8794082156242914,0.2956606696553515,0.5759029252768927,0.9989376029591515,-0.3974901586357941,0.20661165351579477,-0.03544159996752629,-0.8950530075953472,-0.5036579579440729,-0.6566492844516307,-0.3078047161324786,0.5648866744823758,-0.20473202957686668,0.9406801482856912,0.6172316303541268,-0.09190683124426098,-0.3131735607923452,-0.9882588968101124,0.7887774856055605,-0.4043058151140909,0.37739034679628153,0.7836027141246634,-0.7222382071812826
80,-0.8133272348396607,-0.9466192517053802,0.3741500302037959,0.6612191417014436,0.9893545436107821,-0.33077080534661024,0.09131694258702432,-0.020514579751780816,-0.9463932472544476,-0.4517852488346605,-0.5365709096922384,-0.36775879850469073,0.6939513912409081,-0.12121460550769086,0.8630596897024333,0.5939620878189663,-0.287903257198445,-0.3202147531714932,-0.9998945374687279,0.9248994997675027,-0.18187021700270053,0.39381668176586654,0.9129450621581827,-0.6628229442670751
81,-0.7727624383951397,-0.875678058455289,0.45004262993064087,0.6639079653456622,0.9698861806997854,-0.3221248359771876,-0.025219284233850333,0.11540229742839081,-0.9809336288909083,-0.37402552261520006,-0.4044423090794041,-0.2689933054629357,0.8037842605294165,-0.0765331394426572,0.7565119953831572,0.535603105098267,-0.4724218888193926,-0.38075274327629394,-0.9647740264954793,0.9999784543319578,0.05042236054826496,0.4112977761867202,0.9855249080042565,-0.5094383302468176
82,-0.728764414072919,-0.8275759991429337,0.5228117418180848,0.6869923458663679,0.940727035672983,-0.4374358957254711,-0.14141263774691729,0.03413965001647246,-0.9980610104422466,-0.48142853475520947,-0.2632308061305301,-0.24362985607901788,0.8913414250748817,-0.06600746282446375,0.6246082323137356,0.5024056217947194,-0.6381065505465904,-0.31923092809904063,-0.8845396370251033,0.8434089512810761,0.2799821532628256,0.4070156862890952,0.9968295883829464,-0.5403481224155495
83,-0.6815286362337613,-0.7861775773179335,0.591952317186408,0.6710819677626526,0.9021684570687498,-0.3772177842281423,-0.25568339034789045,0.04692166156020406,-0.9974713559496301,-0.48598219300931245,-0.116107706767913,-0.2318046707151435,0.9541963661690775,-0.11564387066221349,0.4717694287200295,0.5020528382894593,-0.7783519177651622,-0.30766652689408064,-0.7629432160170414,0.8995716313873495,0.494367507998244,0.44256761541005585,0.9461562330216352,-0.5087795714547242
84,-0.6312649639252141,-0.8107015905342001,0.6569844909472605,0.6159079203184137,0.8545957094587294,-0.430462980918681,-0.3664779534984565,0.12559124889303322,-0.9791751326387994,-0.42117755368988674,0.03362292137534937,-0.11737796411506829,0.9906071510842663,-0.030959243145450097,0.30311829413640534,0.43754885410695904,-0.8875668502539472,-0.26929354229952496,-0.6056707438277239,0.8874335313480314,0.6819591939172774,0.44080916992685065,0.836655465724281,-0.6368561265866148
85,-0.5781967085201272,-0.8919667740873336,0.7174569120720905,0.5255859337220001,0.7984841240115808,-0.4067153770764977,-0.4722899998182455,0.20882621818384256,-0.9434971251115717,-0.36892699014668373,0.18259845119123896,-0.21696472406809691,0.9995647081900849,0.044042576619157114,0.12430752116970865,0.46587328974256853,-0.9613972933022695,-0.22220524599131877,-0.42007645161018037,0.8020274006821616,0.8325901401184119,0.47125164235719597,0.6751355138430856,-0.7993619464243936
86,-0.522559641589201,-0.8229889188356848,0.7729498761608247,0.5700994531527581,0.7343943491410676,-0.462469314396966,-0.5716809426292507,0.2486268808342447,-0.8910706699462959,-0.2873694764255082,0.3274732134522769,-0.22515345819534727,0.9808207918973785,0.1491175050987566,-0.058669676218775134,0.4830530485080047,-0.9968998601312286,-0.28612055729888347,-0.21483892949943206,0.8319113610335723,0.9380964692822186,0.4526235157853081,0.47163890567684835,-0.6835371370159699
87,-0.4646009474153787,-0.7894232831721396,0.8230782383694502,0.5672688287631938,0.6629667486924334,-0.46190761877517217,-0.6632994945193492,0.2655283113857519,-0.8228264130514906,-0.3408925469024966,0.4649936333295361,-0.21888157927109173,0.9348948624214914,0.12049007387637291,-0.2396804377108924,0.429506598652466,-0.9926591754361821,-0.27713928784391306,0.00044469366266863564,0.7895762950239942,0.9927599624806462,0.4634215424988555,0.2388180747015414,-0.5609635561002569
88,-0.4045781248038636,-0.7406680408169154,0.8674940864795357,0.5814089080465407,0.5849150036385338,-0.42097209226501914,-0.7459000390129907,0.32094199078119523,-0.7399757893460115,-0.4076867242690937,0.592071298669006,-0.34209972978057657,0.8630596897024333,0.1595461054722756,-0.41265781216651864,0.3631397371547436,-0.9488443019336659,-0.331661605892851,0.21570752246741887,0.8211112334544407,0.9936179744965047,0.459257807974731,-0.008851307462160092,-0.3710523911710543
89,-0.34275784306678664,-0.7342108243119485,0.9058891555574474,0.6334505664108888,0.5010189812154733,-0.4470089680604411,-0.818359565573682,0.21438396075449748,-0.6439895180254736,-0.7134897791537034,0.7058523190703563,-0.3603408225143748,0.7673060804482136,0.14162731624831118,-0.5718041040756225,0.3864498562828788,-0.867202000364398,-0.3477099956124639,0.42088363387280303,0.7311778467875832,0.9406240029010927,0.4817804827371011,-0.25597035819847547,-0.3139185935873256
90,-0.2794147572651636,-0.6558904579331776,0.9379969674444899,0.5328528376141131,0.41211694274696403,-0.33139339334838114,-0.8796929376954162,0.14174968205872077,-0.5365714951552691,-0.7764642960286903,0.8037814181126971,-0.33877436880290074,0.6502877058397336,0.1794806568109022,-0.7117851953493901,0.32303323334507983,-0.750987091654764,-0.3274887059502481,0.606378770668057,0.7404066651013402,0.8366502083870517,0.47846870143564546,-0.48717441183429194,-0.5404137236230665
91,-0.2148302879717708,-0.6399666399105483,0.9635946802290106,0.4960753350103633,0.3190971680142765,-0.32555246402093396,-0.9290662865029509,0.12581955719791166,-0.4196285470346411,-0.6783660054961869,0.8836593193541478,-0.4151859315277886,0.5152475589166583,0.19797059288768226,-0.8279093293524753,0.295602129012532,-0.6048326974776649,-0.36359038068922617,0.763518979333118,0.8190597400629135,0.6873317497597663,0.4946449355570361,-0.6880883201331702,-0.5255359112716173
92,-0.14929137097622078,-0.7799498270681086,0.9825046348643299,0.5036351783871034,0.22288907985829023,-0.22107480496546525,-0.9658083477658534,0.13637375874223193,-0.2952365812536673,-0.5312669428331785,0.9436921373393631,-0.3976089695703728,0.3659280797702288,0.16702785001741455,-0.916284364858694,0.2669391049597387,-0.43456553231208034,-0.4196193616200738,0.8849562135006114,0.7680236500167861,0.5007613703001077,0.4746560525700237,-0.846220229387787,-0.2808499407557628
93,-0.08308918248704147,-0.7613553603103683,0.9945955881983128,0.4848653043361044,0.12445395769369237,-0.1641379805770885,-0.989419588191898,0.2918964311174606,-0.1656037363111622,-0.3881925204227681,0.9825316643994714,-0.371683679907887,0.20646743929180175,0.1702820149039759,-0.9739482292376488,0.3385864327343184,-0.24697361072404783,-0.46502723677901225,0.9650119365631997,0.7701864651636376,0.2870507882689059,0.47361161187332473,-0.9517382633801457,-0.09320757521665134
94,-0.016517845494420102,-0.7103682682417162,0.9997836238568343,0.43450804979377927,0.02477533272282974,-0.23318807844486225,-0.9995789969216438,0.32393187414319224,-0.03303118394141441,-0.46235654813888505,0.9993056484897552,-0.43685309978089204,0.04128485582125012,0.21014115703358302,-0.9989681984631305,0.36734121788926627,-0.049535630646748055,-0.47108081024093806,0.9999426561961957,0.8107737492950738,0.057782663256164456,0.4612057864051722,-0.9980818218248938,-0.2042300013568377
95,0.05012687695900181,-0.6297206702405063,0.998032734659214,0.429728085759554,-0.07515083918296325,-0.23914062441552567,-0.9961484498892272,0.23135799825122866,0.10012772003443934,-0.6009551739092783,0.9936373820901109,-0.3655226032209624,-0.12504187702344943,0.12559085311178117,-0.9905056763678888,0.4179766524517116,0.14987717870571562,-0.5014010759651129,0.9881149741135105,0.9982583044955786,-0.17461715936527153,0.48894107935679826,-0.9823694867193146,-0.2558447481716523
96,0.11654889579376129,-0.6314026488541591,0.9893550725234111,0.4925770360212569,-0.17432612874520476,-0.26703367167051356,-0.9791745877129031,0.20669740843472967,0.23150921120056775,-0.577513611899127,0.9656541622438166,-0.30350736069204554,-0.287903257198445,0.19217011893962613,-0.9488443019336659,0.42390191064619953,0.34331485790796995,-0.5182197396634947,0.9300819655407683,0.9677874842080605,-0.3975531028643983,0.49568907231733483,-0.9055781749588148,-0.43771952908559814
97,0.18245311135771747,-0.6063894714071458,0.9738108641265286,0.5010131014721029,-0.271759609253581,-0.25223212913927967,-0.9488881815841421,0.1999468215579197,0.3587810768062985,-0.5427278489264217,0.9159844317403779,-0.4506542315368754,-0.44278581983888515,0.16699289815192386,-0.8753804425423758,0.42466899173299805,0.5230656571181169,-0.5613610581695603,0.8285573168186818,0.8650850196530041,-0.5989425126558304,0.5084380861008372,-0.7724823983759875,-0.22685582942110713
98,0.24754672449437742,-0.4018892111146257,0.9515079929059833,0.4919528395938047,-0.3664777575780805,-0.23478092924399757,-0.9057009957764558,0.3201551013647192,0.4796840559379955,-0.6296642395371287,0.8457436656453174,-0.34914976938780695,-0.5853972213338666,0.1771608712959575,-0.7725763918268311,0.3411724013681728,0.6819634792080992,-0.5571775305557305,0.6882884304903586,0.8586155870769133,-0.7678705105127815,0.5360209029070137,-0.5913574077198357,0.0009586064712107799
99,0.3115405373905749,-0.4417767598641747,0.922601250302425,0.42332809082026224,-0.45753418129029866,-0.34142732078644444,-0.8502001894302686,0.11695991801382473,0.5920719446895983,-0.6625706415000948,0.7565093201350219,-0.2816807122396853,-0.7117851953493901,0.13964120462995278,-0.643877840792121,0.3620968971678646,0.8136735694422572,-0.6149640392051635,0.5158344316161346,0.8603158607651058,-0.895181557781991,0.5626341497985637,-0.3734646776447228,0.09401528058680114
100,0.3741502384241103,-0.5508682929275641,0.8872912614411065,0.39452838045875227,-0.5440190747037565,-0.35240473320554294,-0.7831403337253802,0.1493184417618101,0.6939496944066839,-0.5821538025409698,0.6502854062318185,-0.30013733960158423,-0.818447084107124,0.13438882311989533,-0.49359838833422687,0.3563992549116834,0.9129450621581827,-0.6824948349357929,0.3192594559810616,0.8939793545311113,-0.9739756661308704,0.5333031892976254,-0.13235172276043156,0.18587987972401324
101,0.4350976653030245,-0.5511055264367002,0.8458230927079283,0.43684924631627825,-0.625068309351572,-0.42689864375331105,-0.7054331529729766,0.31536987620727547,0.7835088267047319,-0.6063018626350686,0.5294574840368029,-0.1435975227232545,-0.9024269094199129,0.05424853684096489,-0.32677496202536344,0.24072570156591916,0.9758203162106026,-0.6485945672032111,0.10775556237075606,0.7104291140121207,-0.9999823622584344,0.540290966348219,0.11699022120995153,0.10086022028810289
102,0.4941120408846437,-0.5916740798357942,0.7984845508841715,0.44605186233877325,-0.6998720680724586,-0.343472628976518,-0.6181351291040311,0.2628361516210052,0.8591595365934789,-0.5982095560664391,0.39673908819293036,-0.17948101503396496,-0.9613972933022695,0.0962640268014636,-0.14899899503835154,0.21412330540525726,0.999792693634785,-0.6416656637688058,-0.10878709805403952,0.6908465751345122,-0.9717921384770268,0.5943899966626933,0.359058279858428,0.13336413167891345
103,0.5509311761838867,-0.56878348962909,0.7456041856446624,0.457645050484816,-0.7676829364361141,-0.37022465786111486,-0.5224331380794807,0.29270666097479603,0.9195589138301378,-0.4419706098881167,0.2551107877469714,-0.2174243634154547,-0.9937239578459853,0.07211879436065304,0.03377098233441966,0.13859128904758664,0.9839064913920381,-0.7361223946826716,-0.320242755938462,0.6420422577677065,-0.890932845034972,0.6065453339129789,0.5788018337358242,-0.10560112703561415
104,0.6053026352261208,-0.4684938070217103,0.6875490092828691,0.3643839131099978,-0.8278233706611389,-0.4304929491341024,-0.4196283135049542,0.2067287482856787,0.9636347815313934,-0.3884245879640937,0.10775324899443954,-0.2958070479950553,-0.9985110168412102,0.07699486759729186,0.21540905518345918,0.14689106973793076,0.9287950422339709,-0.7242343508832662,-0.5167235156765589,0.6287806991468289,-0.7617868838785207,0.5739168508194704,0.7625582929726507,-0.1687271280653043
105,0.6569848565693539,-0.44765407364540066,0.6247219494890134,0.35565751737284235,-0.8796924674085422,-0.4142000746978778,-0.31111835683516864,0.31933201753291346,0.9906047288740973,-0.4076693919652235,-0.042024195428285886,-0.2654338825718293,-0.9756258039519613,0.04197000899966841,0.38982724672728647,0.03825681932024829,0.836655465724281,-0.7788903327667097,-0.6890417241201084,0.4695570334106547,-0.591353691775289,0.5959981919917001,0.8989025709435087,-0.3928923624924224
106,0.7057482265130745,-0.5235670222497188,0.5575590528600193,0.32127343219609317,-0.9227719678092682,-0.47237773971257857,-0.1983785346728031,0.39601254055364643,0.9999900000265173,-0.42594585875572205,-0.19085786702048183,-0.311645104876202,-0.9257025493695171,0.11794595179177927,0.5511795694824891,0.07114888822418904,0.7111610760151618,-0.8100239663772392,-0.8291395951916803,0.5283077060012916,-0.3888703857094099,0.6118566883284332,0.9793574408169485,-0.29998962087949815
107,0.7513760992256747,-0.46288044873963086,0.48652645855011,0.3361829866418602,-0.9566314357355024,-0.4458669508354063,-0.08294162151646221,0.470830729655385,0.9916239927613844,-0.3801085646822633,-0.33540528238454537,-0.31612013787970916,-0.8501248030506279,0.18646331279259695,0.6940579656604998,-0.03070954484195215,0.5573149384038688,-0.8275815377685178,-0.9304660006895888,0.5192438940810173,-0.16531113076213094,0.6039279137180887,0.9989206084610697,-0.43075790611573167
108,0.7936657592582477,-0.3497267381961362,0.41211716306621865,0.3294085569920019,-0.9809325585761778,-0.49188818594234857,0.03362293935028854,0.44703476878686177,0.9656552158879896,-0.19951901584367474,-0.4724202181971357,-0.237008821985927,-0.750987091654764,0.15882992649876923,0.8136735694422572,0.04838946671029884,0.3812504129073991,-0.7668361014167681,-0.9882828081386148,0.6170504753092149,0.06720763632468776,0.629148878543633,0.956375730864423,-0.40151956410917333
109,0.8324293221685019,-0.4176870321875085,0.33484759866192704,0.3808603042520846,-0.995432527544753,-0.4172126908695944,0.14973037350692284,0.4277563361846086,0.9225446530046,-0.2402696881828703,-0.598825614386582,-0.1679183061485999,-0.6310368718023147,0.21312991851762503,0.9060172157496263,0.05264772866764461,0.1899866365535643,-0.7674685793062817,-0.9998864409967778,0.7664279948821757,0.2960838922826761,0.6345646922752508,0.854368042453202,-0.37716053753727813
110,0.867494569253495,-0.326167291288825,0.2552540490773125,0.33456892802165544,-0.999986463744347,-0.4882595300394491,0.26380212148119664,0.40395192413212444,0.863057579368428,-0.5042663212128222,-0.7117826782683283,-0.20765807146700535,-0.49359838833422687,0.2798399732058115,0.9679938157291611,0.09532341885123924,-0.008851307462160092,-0.7792506583327679,-0.9647343008228753,0.7840316153416478,0.508913030209363,0.6489873225140227,0.699239887226607,-0.22903423792373387
111,0.8987057126826491,-0.3249842174937003,0.17388892750100785,0.28984478732900393,-0.9945488657497844,-0.53592796443106,0.37428730033793556,0.47464227955408445,0.7882499771456274,-0.4996786574479457,-0.8087546377085748,-0.145226151925367,-0.3424805477300144,0.2841130330121799,0.9975260950751376,0.16578964940912505,-0.20733637778128405,-0.7609314412466066,-0.8844701397673241,0.8032090989076603,0.6941601629128784,0.6581316235734843,0.5006363825256179,0.16242913181502072
112,0.9259240876317014,-0.19853529810911663,0.09131694258702432,0.3394640032358659,-0.9791740642427964,-0.6316968713707293,0.479683788986502,0.47883030704325574,0.6994497901904098,-0.4597321786869101,-0.8875637115574354,-0.20830632236582106,-0.1818713598376071,0.27466070766438366,0.9936242181867788,0.16655150034342664,-0.3975556010060438,-0.7867415771147342,-0.7628471969411419,0.7466613337973158,0.841785289295784,0.6729088121238037,0.2709057323520994,0.0738462206753816
113,0.9490287683425958,-0.22409332941895777,0.008111179135783107,0.39704927417816027,-0.9540156791578063,-0.824376224021167,0.5785586505378316,0.4286148464643785,0.5982333511082314,-0.5415490265735277,-0.9464400179185432,-0.14451340281357405,-0.016221873307991897,0.21521890495577053,0.9564189645616212,0.05780788727371706,-0.5719255369779418,-0.7890346897796908,-0.6055526928896939,0.6594923515965198,0.9437874410879401,0.6721614703715478,0.024331475851518816,0.2457842375514117
114,0.967917105372268,-0.3589116244927078,-0.07515087935888216,0.43670050943011024,-0.9193250847623279,-0.7054383555044101,0.6695676140552493,0.3881845096181986,0.4863973990570174,-0.44166938880339307,-0.984061321880816,-0.12918158345394865,0.14987717870571562,0.24197471938499032,0.8871573454493772,0.0070298157084687,-0.7234946066059249,-0.8530428415766252,-0.4199418889997574,0.7138069799142281,0.9946383183313986,0.679564258223806,-0.22375559396992106,0.0054345099369842685
115,0.9825051816434492,-0.34270547893646086,-0.15789135869151577,0.37466008838738213,-0.8754488980083742,-0.7213973222245108,0.7514733508313973,0.3823303050522717,0.36592718501117627,-0.47529622774995445,-0.9995827300639781,-0.10996931136717417,0.31182260400076867,0.10086026531024332,0.7881608076832424,0.12118968409057732,-0.846220229387787,-0.8907535717716114,-0.21469414751002797,0.7887695702175717,0.9915819114951113,0.6919210735086204,-0.4579306247009187,-0.0713196610213092
116,0.9927281852713374,-0.4414247627226193,-0.23953600463855151,0.3586761673126902,-0.8228255152504522,-0.8053130780122855,0.8231622967128864,0.36473148280048723,0.23896123066642116,-0.6474580233998021,-0.9926556651024078,-0.19252481418275022,0.46512632206164967,0.05003061668083828,0.6627474255739261,0.15487204704616497,-0.9352097220263128,-0.9305305899781555,0.0005929248683984557,0.7413696417483769,0.9347838713372987,0.6931710402981107,-0.6636337471389501,-0.37231974377718535
117,0.9985406975097498,-0.5201630552816429,-0.3195181685276059,0.34667207917873744,-0.761980731934101,-0.8642143122946055,0.8836597917617156,0.30017471650872035,0.10775336656612035,-0.6130223739442876,-0.9634356939412748,-0.2087336622065179,0.6055397446449408,0.020451982977268766,0.5151206887664823,0.06221129622600369,-0.9869153542725878,-0.8964712163453418,0.21585227143774122,0.8442394908761973,0.8273225309862243,0.7023402628673401,-0.828075337437701,-0.31797858166537857
118,0.9999168945374642,-0.5131639240449992,-0.39728274002098546,0.329148782488845,-0.6935224890216052,-0.8983823601889185,0.9321433314194985,0.2026838236645254,-0.025367276672264887,-0.6305100752584313,-0.9125790341383396,-0.05420536512186793,0.7291715191862504,0.00840890793866164,0.35022861356560064,0.09655980001118751,-0.999275785735511,-0.8631796634834882,0.42101813198907045,0.6897303102958403,0.6750220668235242,0.7010566942024997,-0.9410312139722942,-0.18264343985276774
119,0.9968506621882557,-0.6636846491950765,-0.4722899998182455,0.3170515840831408,-0.6181347986468624,-0.9999871206100143,0.9679537490158951,0.2692212412037075,-0.15803761381669842,-0.7889945625815947,-0.8412278166323203,-0.04327413388166692,0.8325953719430101,-0.007351360424068147,0.17359789990553667,0.12180618127101249,-0.9717982450182521,-0.93796433907982,0.6064967286526745,0.736566892307677,0.4861368404958234,0.681246109712436,-0.9954783274327061,-0.2545056213287627
120,0.9893556231149057,-0.714838181072651,-0.5440193655383829,0.3539091121922582,-0.5365709096922384,-0.9589381083293019,0.9906041775874564,0.12592254523054852,-0.2879025532239214,-0.8206176605529616,-0.7509844359443075,-0.11012580980850259,0.9129450621581827,0.03124132730519289,-0.008851307462160092,0.1416731812766308,-0.9055781749588148,-0.9850492037799514,0.7636148813429522,0.4601567523829087,0.2709040300470878,0.6673640729371368,-0.9880314200142769,-0.4365860134938309
121,0.9774650762664994,-0.698012767584844,-0.6119730047836315,0.30721100399838647,-0.449645781574731,-0.9398847907387718,0.9997866691648573,0.12973485918922673,-0.4126568031449828,-0.8556904938474865,-0.6438755638513672,-0.06292096666143432,0.9679938157291611,0.08270067752331135,-0.1910038449000885,0.187672640541427,-0.8032555607809535,-0.9591877331008812,0.8850255750538026,0.5602967061343761,0.040988796614090335,0.6574058068817075,-0.9191535043517743,-0.2285184827070033
122,0.9612318489479014,-0.651251619670935,-0.6756792903086809,0.32066966835305294,-0.35822794144086306,-0.958431285315941,0.9953763815348183,-0.029579688877438157,-0.5300857940101242,-0.8943358365643495,-0.5223066347076768,-0.12196224070751299,0.9962160369939796,0.07893590485217875,-0.3667544922789231,0.262316300168903,-0.6689096822142588,-0.9881535671943186,0.9650515142383932,0.4822745747599837,-0.1911479425285062,0.639988299581993,-0.7931270756363296,-0.14985742639338848
123,0.94072806211868,-0.6614330905129898,-0.7346960733149603,0.2564168011647922,-0.2632308061305301,-0.9339371876023681,0.9774332755548358,-0.06227658892579102,-0.6381049902629059,-0.8753603900971791,-0.3890078245747481,-0.13012288553995344,0.9968295883829464,0.08344989868650204,-0.5302126020940419,0.3444949073278242,-0.5078964854842457,-0.999992634555207,0.9999505992984282,0.6156067164774683,-0.4129248692154293,0.6503106033810196,-0.6177878468063815,-0.13169660790691542
124,0.9160448099742079,-0.5619201104057032,-0.788613752151911,0.2180812408476596,-0.1656035556178786,-0.9542504760410663,0.9462012999445754,0.0346195706339305,-0.7347968939012778,-0.9167059766476839,-0.24697273735297695,-0.03863436558125311,0.9698174662170184,0.08568815029482849,-0.6758995367355524,0.3627849930839091,-0.32663505863802106,-0.9619166821863349,0.9880909112147178,0.38007368985253936,-0.6123221451105643,0.6633112489088094,-0.40403756186882234,0.024520612348077987
125,0.8872917552325018,-0.5607851055238864,-0.8370581151271822,0.1919004908182071,-0.06632164911875064,-0.8592343049102897,0.9021050746372444,0.0023770316783192076,-0.8184450828588514,-0.9999807825346458,-0.09939117489189143,-0.15420589137512988,0.9159282719398917,0.1198524099147848,-0.7989322963920675,0.359180292874764,-0.13235172276043156,-0.9129214914021617,0.9300270218481637,0.3151577964906548,-0.7785328614126905,0.6396980245787202,-0.16516617825838867,0.138186819032617
126,0.8545966419248491,-0.5943810122258328,-0.8796929376954162,0.19773205705684918,0.03362292137534937,-0.789288755163469,0.8457441177830354,0.05571690284866341,-0.887564679995588,-0.9495871843014311,0.05042249908260653,-0.19739744795766628,0.836655465724281,0.08515082287917616,-0.8951871829181393,0.2729690804317011,0.06720805864360312,-0.8872524267510923,0.8284740615588424,0.21913295345476982,-0.9025487503568688,0.6530254335035731,0.08397443834676752,0.19351256820424437
127,0.8181047278548061,-0.6327198160164098,-0.9162223160000214,0.23373485147381684,0.13323154275332702,-0.7353719184566497,0.7778846948924476,-0.07560725127683154,-0.9409287118289015,-0.8090023298639981,0.19910379243217086,-0.03369586341295423,0.7341959774083611,0.03593834537543288,-0.9614380141313472,0.26175324306509345,0.2640884668368108,-0.8831895425480949,0.6881807567694185,0.1318156396180887,-0.9776484141922787,0.6272454083238493,0.32789393109910414,0.511398372032127
128,0.7779781392470327,-0.5639280690675578,-0.9463927205721706,0.24872251924238278,0.2315089585966079,-0.6937707929203941,0.6994494009359331,-0.09582919725690656,-0.9775898890993684,-0.8167739051820099,0.3433136438460132,-0.028647857300324642,0.6113893217996845,0.0466437487468204,-0.9954642560001775,0.25836606979188537,0.4504405012365868,-0.8371960722463517,0.5157073743739092,0.11716780724804954,-0.9997616106629585,0.6616753554489513,0.5514265673441451,0.703703917253922
129,0.7343951504531283,-0.6138531641622795,-0.9699947559334741,0.2260515826529221,0.3274732134522769,-0.7012390799640944,0.6115046170382894,-0.014985212691185162,-0.9968974225342699,-0.8673838100608994,0.47981341095937835,0.1651526573538476,0.47163890567684835,0.01923176334424164,-0.996125448448122,0.1627302623328458,0.6188348942992564,-0.7932857272939471,0.3191189565518283,0.20271489697942013,-0.967689851492229,0.6870880036478486,0.740674170422903,0.8522261373197957
130,0.6875493919145926,-0.6888013414240189,-0.9868646138899216,0.3032796498053424,0.4201654642073833,-0.7304592510398912,0.5152460123025732,0.050254526650784295,-0.9985085753046877,-0.6613532190988834,0.6055376032789312,0.11390156987597497,0.3188177073502529,0.010411902965584996,-0.9633994302387822,0.4655184583726534,0.7625582929726507,-0.8249200841873588,0.10760819066000465,0.23809106640906208,-0.8831713579015043,0.6968875582599984,0.8838702409818106,0.5829172589078465
131,0.637648989901771,-0.7164175641007546,-0.9968852104306327,0.27190481745416206,0.5086595605323664,-0.7179959845625419,0.4119822878760787,-0.018723611028264375,-0.9823947471032186,-0.4903200967170218,0.7176627264904547,0.23980508392067254,0.1571609427348777,-0.03195081336589785,-0.8983830817556795,0.5225982927238109,0.8758808988970734,-0.8043161378310241,-0.10893445078547817,0.1857740151368449,-0.7507868527123509,0.7101456052605639,0.9721115409124114,0.7174853411447022
132,0.5849156418507524,-0.5959227852152886,-0.9999869983408401,0.2592936312563225,0.592071298669006,-0.6741528894486046,0.30311738426887647,-0.006940709382290968,-0.9488419818410697,-0.6975650448384572,0.8136706920540431,0.28934101760664366,-0.008851307462160092,-0.0192778644186205,-0.8032555607809535,0.5057853338678439,0.95428489738448,-0.8075400651705451,-0.32038319931726406,0.1788071437374569,-0.577711294899767,0.7014093213394843,0.9999116535748116,0.6942051911250943
133,0.5295836314063154,-0.6275320025457827,-0.9961484498892272,0.3381722132817015,0.669567256102097,-0.6904512003352132,0.19013139382847485,0.010428558882996087,-0.8984458899521787,-0.6647317095015265,0.8914053680440264,0.2973326844923104,-0.1746182566234609,-0.06937709860016532,-0.6812052635020888,0.44316490649995754,0.9946445684333027,-0.7990049731299939,-0.5168504824182005,0.3445513559365065,-0.3733250120565157,0.6972515457889525,0.9655421026113206,0.6920134392932413
134,0.47189878754687226,-0.5639755249628372,-0.9853962062395694,0.3118474865920208,0.7403731188417227,-0.6583441338146857,0.07456043787851079,0.046831649127665784,-0.8321010754408373,-0.5524027466095175,0.9491210012236917,0.26616461519499907,-0.33554591629092423,-0.086166521989574,-0.5363229588000349,0.5152238285132361,0.9953508993211305,-0.8035378136057207,-0.6891492771222292,0.0718529432416093,-0.14870530656294534,0.6943388736776431,0.8711398202346337,0.7603171970873271
135,0.4121173924158334,-0.499646542810522,-0.9678048925496896,0.3352328921039035,0.8037814181126971,-0.6460981249653535,-0.0420242178945775,0.028984124039896524,-0.750985255358137,-0.7563903927266687,0.9855214228993429,0.22280156481654015,-0.48717441183429194,-0.13081603552672624,-0.3734646776447228,0.5481581759966825,0.956375730864423,-0.8330460137374684,-0.8292227051593992,0.1368221245649074,0.08397391067204463,0.7202269213209556,0.7225742831329085,0.5267106036672927
136,0.3505050427116589,-0.5895346223204567,-0.9434966000410308,0.2947360997415459,0.859158599148701,-0.6397708801800386,-0.15803752586635586,-0.01644086470424731,-0.6565383536096343,-0.8274151560668076,0.9997891580747996,0.10420808619481577,-0.6253015813511476,-0.1120752783195442,-0.19808895314429317,0.5097932043213641,0.8792728800362922,-0.8130072418284752,-0.9305207813107916,0.1292107451522122,0.31210192032287537,0.7250147876166235,0.5290825768376454,0.7149346241984571
137,0.28733546969521884,-0.5917210928669255,-0.9126400386334936,0.3208317307393739,0.9059513514620855,-0.5773951205602964,-0.2719022062740284,-0.0890451691668222,-0.5504369402095979,-0.8413605476122751,0.9916037841774327,0.11072644105516326,-0.7460994324357614,-0.05272926042822714,-0.01607386648539956,0.40272309124968886,0.7671161941871404,-0.8672246267150759,-0.9883066978121199,0.08211584622575385,0.5233146688475238,0.7519465198590994,0.30269507848443405,0.7362979659941171
138,0.22288932305693154,-0.6546255271043835,-0.8754493660266203,0.2925155956593109,0.9436921373393631,-0.5211138995020674,-0.3820701914067154,-0.12306943969224497,-0.4345644697222834,-0.6918936984034708,0.9611491270583582,0.13439201248376825,-0.846220229387787,-0.16058749269719585,0.1664799691506045,0.38582592515637165,0.6243770064508815,-0.8834926068606455,-0.9998783226155739,0.14461692079547803,0.7061648739262473,0.7789537220568848,0.05748746623084797,0.5708007879167993
139,0.1574529240467105,-0.8025787443433129,-0.832182701353802,0.26679907736629627,0.9720038633234919,-0.5266893231056408,-0.48704367263800635,-0.13666693268851163,-0.3109778472012334,-0.6292296199127334,0.9091091326601242,0.1311608346870219,-0.922889270809022,-0.10096729607339966,0.34345388329287624,0.4487181853429214,0.45674587780301035,-0.8944285775141385,-0.9646945540112608,0.3111606256832997,0.8507424428473884,0.7799539185078793,-0.1912944382369544,0.5589383488530875
140,0.09131699340632397,-0.8098553055113502,-0.7831403337253802,0.24943339013671623,0.9906036480070516,-0.6669795934555631,-0.5853954641521542,-0.12854093835737648,-0.18187091512987733,-0.5510108058575802,0.836652507065346,0.0707542445058575,-0.9739817863929329,-0.11643592681185111,0.5089162281138078,0.4555847713907267,0.2709057323520994,-0.897511696875171,-0.8844006231292648,0.3000514564370045,0.9492115784817415,0.7607981248296823,-0.4281825810547376,0.49792784638333454
141,0.024775359755680612,-0.7781129288730496,-0.728662638094113,0.24586600704958803,0.9993056484897552,-0.6110520233696793,-0.6757884065051065,-0.1537820082308193,-0.049535509523345955,-0.5563998809423977,0.745406469877665,0.041430940945441405,-0.9980818218248938,-0.22446014585611926,0.6573211899099233,0.48382631201221027,0.07426543024478444,-0.9142851836447353,-0.762751161149929,0.20481375561147894,0.9962354628424042,0.7483997022025621,-0.6384484048511662,0.5839867169626435
142,-0.04187634582854418,-0.7007086205805161,-0.6691277129078065,0.22047942031018683,0.9980229172592789,-0.5888743832082391,-0.7569935462158768,-0.14079725724609224,0.08367922349754048,-0.48762009301165044,0.6374202103775293,-0.013955537635429673,-0.9945214798842913,-0.10636387669232894,0.783694668551774,0.4510224995274084,-0.12533560020827272,-0.896926662156866,-0.6054346286829522,0.15710552183188856,0.9892655012834192,0.7515584501580547,-0.8090185991085457,0.6279208561864791
143,-0.10834200324224282,-0.6886860763756487,-0.6049487559447875,0.2455226981015693,0.9867682709420627,-0.570798758459354,-0.8279068442242921,-0.18193576726630362,0.21540852847011638,-0.7715969133669576,0.5151188671487545,0.027306276184705092,-0.9633994302387822,-0.1324763688783936,0.8838009947674983,0.4259794307155154,-0.3199398958003875,-0.9328733078828125,-0.4198073171876728,0.21323524147062425,0.9286794509159497,0.7515730663156197,-0.9292879352912621,0.33429852867932497
144,-0.1743263189558588,-0.7678159886809648,-0.536571196545048,0.2245674176737984,0.9656541622438166,-0.6475248363199618,-0.8875641860523059,-0.2638233932552994,0.34331401844225534,-0.8548371718431959,0.38124906469420167,-0.056350355312008604,-0.9055781749588148,0.010978728750045672,0.95428489738448,0.4653988846090967,-0.5017891973756624,-0.9079078127862242,-0.21454936081630455,0.26881663423859686,0.8177609469752724,0.7457998008641991,-0.9917786485905381,0.23648814708079482
145,-0.23953613794406584,-0.7640904112412368,-0.4644696041406137,0.2880936370403567,0.9348915563592576,-0.6608048908030826,-0.9351544895955247,-0.23312084594307667,0.4651251847453019,-0.8922364781749585,0.23881723017083611,-0.10075981921340288,-0.8226601455347005,-0.06220650117594421,0.9927839622024905,0.44533312084829857,-0.6636337471389501,-0.9010877948610226,0.0007411560611398164,0.40512448952230734,0.6625215367660003,0.7639351134825983,-0.9926053691222754,0.20639258190782514
146,-0.30368174611450327,-0.663951669384535,-0.38914439454123173,0.35681102834570094,0.8947878230776185,-0.6317902491145484,-0.970030732335616,-0.3204887099366053,0.5786797018286458,-0.7593816019439492,0.09102207551723347,-0.15739796959953833,-0.7169432938324188,-0.060382325996264724,0.998007813205949,0.48343143563947705,-0.7990213136211992,-0.8558020198809594,0.2159970156783608,0.3597866206327004,0.4713748665676806,0.7470187993425715,-0.9317166954079665,0.2767567696888857
147,-0.3664781574491409,-0.6847252807163223,-0.31111835683516864,0.36522148616231387,0.8457436656453174,-0.6088476152304406,-0.9917187480511526,-0.3008744597896294,0.681961811686446,-0.7331964173060141,-0.058817238720538026,-0.07419253478249246,-0.5913574077198357,-0.09765396372005014,0.9697813621926953,0.5161685881530024,-0.9025544217869346,-0.8443710246844056,0.4211526208801004,0.4037577148844063,0.2546806798784033,0.737861872717856,-0.8128983902696688,0.1646023677117719
148,-0.42764638013975387,-0.7126963872703047,-0.23093302500992663,0.414998003163392,0.7882491170715743,-0.5235894785947633,-0.9999236734296302,-0.2914236751860201,0.7731381064970291,-0.6732835583391705,-0.2073356445790858,-0.065352244955548,-0.449382916282607,-0.13212512282079192,0.9090506772182689,0.5925227736987173,-0.9701055333312049,-0.8649243786213304,0.6066146733478919,0.32303149919533464,0.024183341322962116,0.7449489927076653,-0.6435380004337727,0.5451433699323006
149,-0.48691465609234674,-0.7225208949546024,-0.14914491947534733,0.4372602010507678,0.7228786438805687,-0.5333591281965481,-0.994533956934582,-0.27448182808167504,0.8505900742810576,-0.6230481031972923,-0.35119773884951894,-0.12107550467826146,-0.2949544348243706,-0.16143392270627194,0.8178512731626093,0.5855597036031585,-0.9989815986065973,-0.8237200849356715,0.7637107666206521,0.5726735437873901,-0.20762468296282718,0.7499309525157579,-0.43416553465655655,0.3897858907369526
150,-0.544019668293626,-0.8539859797665557,-0.06632168457455162,0.5568232117508372,0.6502854062318185,-0.49576913415748225,-0.97562287542453,-0.20393358977226453,0.9129428298455946,-0.46718645905584555,-0.48717268904279,-0.09828586045163659,-0.13235172276043156,-0.1803191943023353,0.699239887226607,0.4596340057292032,-0.9880314200142769,-0.8439546849752012,0.8850949172145435,0.604187732843301,-0.4281798904606801,0.7409808689172951,-0.1977987587809895,0.6232691296364428
151,-0.5987077106754591,-0.8209442934310748,0.016961851109947905,0.5817759127723208,0.5711947317597765,-0.5115733385655228,-0.9434475379042923,-0.20625009304602693,0.9590895209980197,-0.4547038304667201,-0.6122067909221609,-0.03305969520890151,0.03391892365258597,-0.252401260823134,0.5571920260442581,0.3775949670992866,-0.9376915466194324,-0.8735963412715544,0.9650910707676266,0.7347835170895425,-0.6255286574194023,0.7510448615654495,0.050866185892860204,0.568476728421622
152,-0.6507358152798379,-0.8523072073167836,0.10012766431183594,0.5776381837357942,0.4863968683398661,-0.4607826315025696,-0.8984453899533372,-0.20843753253208747,0.9882109767839498,-0.4815595574948016,-0.7234920481169532,0.026021677072948387,0.1992495567242226,-0.32763084594736985,0.396468718331474,0.26938184397465337,-0.8499688703176599,-0.7891201669484059,0.9999585204900007,0.8447466318769923,-0.7889750998707605,0.7442150580445197,0.2963685174942595,0.6543150813884291
153,-0.6998728317165822,-0.9099277309619971,0.182598548809052,0.5851708855204515,0.39673908819293036,-0.3685670535726247,-0.8412282663558485,-0.16633689734850354,0.9997902489643353,-0.32276926177326337,-0.8185292336673461,0.10391374525209426,0.359058279858428,-0.37103308585713907,0.2224569391329161,0.37937742064076124,-0.7283606173881955,-0.737986271219398,0.9880668266651327,0.8300816110729989,-0.9096607678207603,0.7931310188364457,0.5234440901154529,0.8634343544390212
154,-0.7459004541179705,-0.8340145859852982,0.26380212148119664,0.6068029054537983,0.30311722222127896,-0.3835115639889803,-0.7725740727910418,-0.2983440738773591,0.9936217885993456,-0.28553042539082,-0.8951840172738977,0.17485154965341512,0.5089162281138078,-0.3533781642235184,0.040989054179228386,0.4243236755352852,-0.5777149251183075,-0.7757409281874582,0.9299720577770455,0.8115237307478039,-0.9810447544225287,0.7786809119145721,0.7179744444735174,0.7265598664566326
155,-0.7886141910277368,-0.7864634163951715,0.3431747948925702,0.6004343417739685,0.20646670916242466,-0.36683779652890813,-0.693416209676795,-0.31291312288513606,0.9698150948413159,-0.24039107214336644,-0.9517348977547095,0.20462383820190466,0.6446703077154994,-0.37572788662387147,-0.14185266394844032,0.41898874233034844,-0.40403756186882234,-0.7942193858658511,0.8283907881456931,0.6058714768466846,-0.9992581992334705,0.7682694848138871,0.8678646253269284,0.8931932851677002
156,-0.8278242739154351,-0.6955142750858299,0.4201656888294076,0.6932699713877991,0.10775324899443954,-0.4070761538188358,-0.6048308819566539,-0.3363216186183452,0.9287927711653704,-0.2564685356156593,-0.9869118642506486,0.12098920193974173,0.7625582929726507,-0.43242264444822864,-0.3199398958003875,0.4233283517412277,-0.2142524960418839,-0.8176377533638589,0.6880730679692388,0.7962120728583675,-0.9633139722331534,0.770228395348414,0.9637951872115138,0.5712162325967661
157,-0.8633565002797943,-0.719685549407444,0.49424045364355607,0.6549394578901109,0.007963153980990558,-0.3159915881961962,-0.5080224673622445,-0.37976086241207424,0.871283023542456,-0.3596171415358265,-0.9999249179312569,0.133430186582505,0.8593130908588155,-0.4556961839851684,-0.4873036783966224,0.4618658192893034,-0.015925859310604355,-0.8113350091462656,0.5155803058316397,0.7894114605438078,-0.8751601741736009,0.744455563738127,0.9998016324214156,0.5099241033475956
158,-0.8950530075953472,-0.7039076259052606,0.5648849788669922,0.7140157984417536,-0.09190650623476952,-0.30081917458179547,-0.40430714207953605,-0.4143902465179485,0.7983067337231403,-0.48931687902738313,-0.9904818136653462,0.18696448757461714,0.9322532837150231,-0.4489658231950374,-0.6383344674024947,0.46797445196962717,0.18303569117443722,-0.7597932889241052,0.3189784501301393,0.7993634958635755,-0.7395745536566136,0.7256565417211259,0.9736452545878657,0.469934718988808
159,-0.9227729746638389,-0.6449653938271711,0.631608961357793,0.6946383125234756,-0.19085786702048183,-0.3217543258334557,-0.29509498643913845,-0.22542564850046892,0.7111593371002062,-0.5271097806414337,-0.9587946232166036,0.10020400177552667,0.9793574408169485,-0.38449901411348997,-0.7679701526429006,0.2725195901804569,0.3747001862548749,-0.7601991311392019,0.10746081659137031,0.7675893857307187,-0.5639055633082671,0.7031665607537941,0.8869523305981969,0.5475179248721241
160,-0.9463932472544476,-0.7124324144483571,0.6939493082130944,0.7258802304253542,-0.28790223908814844,-0.4077310326452898,-0.18187081391593907,-0.24464077669695092,0.6113878268443625,-0.5630881491894426,-0.90557497256898,-0.03478096543562162,0.9993201393698201,-0.337015646874104,-0.8718657252250657,0.31060922751696723,0.5514265673441451,-0.7276599616534423,-0.10908180112997323,0.7922348880479891,-0.3576740892558896,0.6964619611180283,0.7451130065757331,0.32643488003481413
161,-0.9658088852532292,-0.5355895065144407,0.7514733508313973,0.7289609314458407,-0.3820699871506753,-0.370891739697894,-0.06617398408085352,-0.26407374223063057,0.5007632925231493,-0.4771048857846092,-0.8320180603412612,0.008929159424877934,0.9915881423911858,-0.3312218971596861,-0.9465389095244047,0.23962774458031552,0.7061693113205663,-0.7358062982239041,-0.32052363567590875,0.7763182474547686,-0.13205743932387398,0.692273387932789,0.5569461647005892,0.20531477604562917
162,-0.9809336288909083,-0.5533049927129073,0.803781847817241,0.6657058477545351,-0.4724202181971357,-0.3830223333337613,0.050422526038662596,-0.17372533558085118,0.3812494806825246,-0.5183669412596974,-0.7397758162027859,0.05270954465169002,0.956375730864423,-0.3850032825272123,-0.989486878875324,0.1277196582727222,0.8327593133007577,-0.7983072928587566,-0.5169774378347507,0.795489735797058,0.10071644330797519,0.6809163946217288,0.3341511078292741,0.14051742023295816
163,-0.9917002819776596,-0.6877697856467679,0.850511755886796,0.5979563010481965,-0.5580501825847355,-0.3159282965656539,0.1663335076502882,-0.22220399901750074,0.25496793548660607,-0.6392493992559233,-0.6309198020944293,0.08844014482858606,0.8946587652548067,-0.3816431067564638,-0.9992701429985067,0.2070892797388379,0.9261498293836297,-0.7882255282515086,-0.6892568150238886,0.755160388940742,0.3280317086506715,0.6800762942747486,0.09058015350772429,0.02271867111630931
164,-0.9980610104422466,-0.6128354336313908,0.8913387495431321,0.5585230495695993,-0.6381042940148105,-0.2550631128153829,0.2799830721876314,-0.18209496568010203,0.12416033821991551,-0.6403395187769441,-0.507894689413418,-0.05086508192112607,0.8081476409645214,-0.371520585884713,-0.9755607955035421,0.19184835645864154,0.9826176744037675,-0.7724463428860785,-0.8293057969574021,0.7532272848777432,0.5375683521772873,0.6814827191810051,-0.1586226360410919,0.33216591411579655
165,-0.9999875548491668,-0.652992032006204,0.9259794720335266,0.599978372126126,-0.7117826782683283,-0.31390620147751985,0.38982607658625396,-0.24413634738766404,-0.008851285819143564,-0.4877524045662919,-0.3734633569641671,-0.12653213105909591,0.699239887226607,-0.3739069895571498,-0.9191535043517743,0.34598927634562066,0.9999116535748116,-0.7634439674615098,-0.9305755415426644,0.8540711770303312,0.7179699328986444,0.7276349000603903,-0.39796303835712543,0.5272570294486983
166,-0.9974713559496301,-0.7773172969660384,0.9541935019662018,0.5701017543310525,-0.7783491652846606,-0.2794438176042866,0.4943691305536854,-0.3008659417616971,-0.14170578664804576,-0.3175319605580715,-0.23064484265672702,-0.022841799380026964,0.5709537230901615,-0.3763806900043454,-0.8319388769112056,0.29193722017001117,0.9773423105215168,-0.7620423155621154,-0.9883305658301034,0.9208429578083062,0.8594590683419905,0.7413265882709479,-0.6125600264507652,0.4193072596701785
167,-0.9905235927085793,-0.8284621454150561,0.9757850219373507,0.5680386876944907,-0.837138644729493,-0.30765714875589034,0.5921909002253344,-0.24585821412288233,-0.27204480345000664,-0.358802348403232,-0.08264654242399118,0.026465974885453533,0.42684441189542627,-0.3705058333276816,-0.7168400923288917,0.24729337479086677,0.9158094137297402,-0.722454615320051,-0.9998701823252941,0.9977915487616215,0.9543673479122683,0.7624556364131477,-0.7890709989859699,0.20162453111489656
168,-0.9791751326387994,-0.7755102661531552,0.9906041775874564,0.5301066060312014,-0.8875637115574354,-0.2768564242875551,0.681961432164296,-0.25598134702134595,-0.3975546289120927,-0.4791207489473424,0.06720782097621032,0.04912312833112173,0.2709057323520994,-0.30422332510597727,-0.5777149251183075,0.2596448551514561,0.8177660856162815,-0.7396522377243049,-0.9646547860615091,0.9075078016864845,0.9975509444574153,0.7445118771851542,-0.9165213586075023,0.3817606079142788
169,-0.9634763946627743,-0.7325630759303934,0.9985481176544713,0.5057932286902381,-0.9291205351690502,-0.34477603059914097,0.76246023498214,-0.16123418254746608,-0.5160072808986288,-0.5985615643820933,0.21555284160875476,0.012993528960474785,0.10745929680581393,-0.42221984098086635,-0.4192264438443876,0.1901377298971006,0.6871210042794174,-0.7478729527991358,-0.8843310871124457,0.8630374189117807,0.9866693987736797,0.7339783260816132,-0.9869868592498809,0.39444187563121313
170,-0.9434971251115717,-0.7505428278764106,0.9995617078054543,0.5368687633552037,-0.9613938935196727,-0.3833795223231589,0.8325928727487738,-0.004035575182070193,-0.6253000523778837,-0.580135965220611,0.3590570101231171,0.08198659241639113,-0.058965215924966195,-0.4155643456534302,-0.24668671870801795,0.16892745712820553,0.5290825768376454,-0.7728900001895255,-0.7626551086455092,0.7641879484452553,0.9223124675229432,0.7645753690398748,-0.9960862973772684,0.29270410324524043
171,-0.919326087855937,-0.7229599212925478,0.9936379132923613,0.5294235610869673,-0.984061321880816,-0.49064412205382235,0.8914058445926608,-0.006836909810463561,-0.7234928375332997,-0.4053705625771066,0.4944975322710992,0.11153708202399196,-0.22375559396992106,-0.4278005538798227,-0.06587877649448187,0.2306700461745371,0.3499512966739757,-0.7094825568576197,-0.605316551210083,0.758627745987643,0.8079681596743896,0.737243939177728,-0.9432539139993165,0.1684936792713442
172,-0.8910706699462959,-0.7430733268577483,0.9808178477761973,0.5312131554725424,-0.9968963348011504,-0.5656745209513335,0.9380995481931627,-0.0346841138410854,-0.8088425714663853,-0.41582302043022284,0.6188327059177012,0.09956546676977897,-0.38234491311059304,-0.36377821773531016,0.11713722956492027,0.31192492836146324,0.156868562647098,-0.6753345060443551,-0.4196727361768817,0.5780929638283581,0.6498336938274741,0.7677882394897065,-0.8317745708249755,0.22634676587095387
173,-0.8588564045137166,-0.6856120844440625,0.9611904879806703,0.48078870234092536,-0.9997706890742684,-0.6355416982811356,0.972039150917484,-0.08274120631960244,-0.87983417213645,-0.43272609815361085,0.7292702311135444,0.14883015637781338,-0.5303381024031278,-0.35534220088770707,0.29622713839892695,0.4424457717845697,-0.04246802594515088,-0.6535086528571514,-0.21440456942142735,0.3836084771374969,0.45647962212773363,0.7752438843936666,-0.668579513471402,0.07837327962012283
174,-0.8228264130514906,-0.7629376537907792,0.9348920561557704,0.5133916781327151,-0.9926556651024078,-0.6246820360978406,0.9927632208017394,0.006154408870383383,-0.9352074352726826,-0.28127616162910596,0.8233299191319133,0.21233235039282647,-0.6636337471389501,-0.3549477288213638,0.465388380228658,0.40647372551809535,-0.24011154835856524,-0.616943377546797,0.0008893872376411567,0.18561825333116916,0.2383853245457482,0.774449720200236,-0.463815420182426,-0.1447343566283398
175,-0.7831407695551633,-0.7192112588765311,0.9021050746372444,0.5246746704548766,-0.9756223538531809,-0.698287671266355,0.9999900000320824,0.015647128014011115,-0.973979404834729,-0.3989408397087701,0.8988993921605662,0.2305315302269452,-0.7785377535525064,-0.3439414661752621,0.6189511651448041,0.44700487403085426,-0.4281825810547376,-0.699669696556786,0.21614175518610942,0.32191936250445746,0.007371048742645748,0.7589071798697787,-0.23021353051998736,-0.20665882402579874
176,-0.7399757893460115,-0.5981128645431862,0.8630570990637212,0.49164497027630166,-0.9488409465421256,-0.6531107305936077,0.993621235633665,-0.030439756783913113,-0.9954618219135256,-0.39045450485288996,0.9542815227533997,0.23766740698331987,-0.8718657252250657,-0.4217027376945137,0.7517685178772217,0.38155314749976577,-0.5991833254525278,-0.709217804046541,0.42128710054293955,0.2537108970630501,-0.2240427222449998,0.7606579822167906,0.01770192144906925,0.002538746811184534
177,-0.6935232457375787,-0.5724672682224896,0.8180191390435021,0.488754107641418,-0.9125790341383396,-0.6817197098569582,0.9737435152866017,0.09379191663220925,-0.9992733423289929,-0.27117392757879344,0.9882325476540885,0.32356274531035345,-0.9410312139722942,-0.3962267463785142,0.8593887893474554,0.3199851647595635,-0.7462965214968447,-0.7149784843660417,0.6067326047511222,0.13051828615191055,-0.44331385359019876,0.7359483330706535,0.26451675368028593,0.08831671524336411
178,-0.6439895180254736,-0.6168707878821066,0.7673037772322717,0.51883521790853,-0.867198933683964,-0.6753391178378502,0.9406270901076131,0.054034018575335546,-0.9853463060444431,-0.26625686036552726,0.9999900000374283,0.2858840252794433,-0.9841173994713753,-0.4677782872867103,0.9382048629215747,0.1529921333677516,-0.8636572303039475,-0.6624785104311448,0.7638066351641228,0.15536489186621422,-0.6385583157416571,0.7428753456554406,0.4948852153337387,0.35911367629327323
179,-0.5915946745857159,-0.5373270030805528,0.7112629998761175,0.5487510518512376,-0.8131540681420757,-0.7428364159479898,0.8947222004024376,0.12548076745709308,-0.9539279382307397,-0.2591215501563565,0.9892898328704534,0.27766787936455106,-0.9999302111317135,-0.4941911085167567,0.9855750544063959,0.18533375240341457,-0.9465866507668218,-0.6448925825968654,0.8851642399813108,0.32878270580096003,-0.799194273581802,0.7525561324155817,0.6944841112353274,0.26943015975143925
180,-0.5365714951552691,-0.5511802009332026,0.6502857538768221,0.5463793402705781,-0.7509844359443075,-0.7619295263013105,0.8366529543428967,0.12305096580683318,-0.9055759606595722,-0.2016000424610222,0.9563723488395427,0.31355161834645506,-0.9880314200142769,-0.4973950611625626,0.9999116535748116,0.337564749526306,-0.9917786485905381,-0.6434340662364078,0.9651306061500325,0.3671918977946466,-0.9165155994126397,0.7358229867341827,0.850903348779433,0.48291249400590486
181,-0.47916443663855113,-0.5042960737783164,0.5847952473344464,0.6534195179639094,-0.6813112155050336,-0.823466855520637,0.7672088427921657,0.1017935208034044,-0.8411486913300028,-0.13489346309067973,0.9019768036701457,0.3392919068029137,-0.9487507837054554,-0.5057333897453817,0.9807341395674377,0.28687727650770106,-0.9974315614334571,-0.6319446922457373,0.9999664197707405,0.35834803383338787,-0.9841637265837466,0.7237014084710526,0.9544175373798285,0.39761455531294454
182,-0.4196285470346411,-0.47682937464338393,0.5152460123025732,0.6232998679658788,-0.6048305586119814,-0.8694721897670449,0.6873340056399617,0.11064989177947933,-0.7617898080743309,-0.3546592310658909,0.8273248040369273,0.3312881293636325,-0.8831769075680354,-0.6147544033188956,0.9286852865459446,0.29920031613614756,-0.9633200254993318,-0.6655412303749393,0.9880427204652819,0.2726709270717901,-0.9984722702980908,0.7276735357735584,0.9985906661521659,0.27169456512795154
183,-0.3582283323103875,-0.43546833436486443,0.442120750141946,0.6675619690344573,-0.5223066347076768,-0.9010362867753404,0.5981143955791197,0.07418823427038891,-0.6689080466114544,-0.3851066251806441,0.7340928729114915,0.39335188778787794,-0.7931270756363296,-0.5665231686850866,0.8455096197837403,0.3899587486772397,-0.8908039600806766,-0.7459968224017002,0.9299170733286206,0.3628164891158306,-0.9586657378800326,0.7323699040646934,0.9806762638984534,0.0645892669879056
184,-0.2952365812536673,-0.3647565157581919,0.36592698136711643,0.6925331698714323,-0.434563995560995,-0.876014559036492,0.5007630138407391,0.25030996236315006,-0.5641521934368313,-0.3902826301082068,0.6243747984707934,0.5783539113862357,-0.6810968872480894,-0.49929348943759816,0.7339949442827645,0.3362414400241003,-0.7827743518680612,-0.7519533185137608,0.8283074965810604,0.3566422228172831,-0.8669015588310748,0.7247137783633437,0.90178816138383,0.02344234788813011
185,-0.2309331535277489,-0.3167041274452501,0.2871935232375599,0.76067316821491,-0.34247933661842295,-0.9449208799622723,0.396603418618098,0.1266036335453885,-0.44938181746167843,-0.43181942196325057,0.5006346121286421,0.4814121475553296,-0.5501910951602332,-0.5224963060217537,0.5978789057061601,0.3265222452362543,-0.6435380004337727,-0.7476624908012768,0.6879653640921747,0.36677303595758576,-0.7281531567336006,0.767380552433972,0.7668312387343396,0.32734614745013363
186,-0.1656037363111622,-0.34502064826926737,0.20646681954029744,0.7673675574804435,-0.24697273735297695,-0.7988398333863977,0.287051730393562,0.2225333984370652,-0.32663425995737383,-0.2726827750841868,0.36565125170226676,0.5503810327783901,-0.40403756186882234,-0.5607467507332466,0.44172371543087957,0.3104440069286825,-0.47864581972378417,-0.7099420513637784,0.5154532259921075,0.4667811974014035,-0.549940400257087,0.7586756989770775,0.584196463747095,0.29827639302714604
187,-0.09953857504710924,-0.33992355763827037,0.12430714803691101,0.7486802539297343,-0.14899846813423626,-0.686755266214585,0.17359737881824086,0.2530354198082239,-0.19808846878167158,-0.3125064769312221,0.22245615245998135,0.6346228946150279,-0.24668671870801795,-0.6591311148511917,0.2707632385700804,0.32751134206879495,-0.2946715406356436,-0.7229342603411069,0.3188379367190802,0.5243132837450064,-0.3419220422496351,0.7908452358503261,0.3652391821536459,0.3746404527279033
188,-0.03303118394141441,-0.3542197554752525,0.04128473189685996,0.6534197450757225,-0.04953545547424012,-0.6042455949198662,0.05778285290369017,0.15501419270311875,-0.06602631888182521,-0.2738786299776591,0.07426516762045728,0.4897306343961041,-0.08249931407193935,-0.6172285092417864,0.09072757012725888,0.361313389651743,-0.09894963711217238,-0.7089237159881514,0.1073134401680858,0.5299490028125907,-0.11537223585037017,0.7807022591139973,0.12357309722111383,0.40951418052665767
189,0.033622958061977536,-0.23868199588516725,-0.0420242178945775,0.6911612504545314,0.05042249908260653,-0.6479931577286316,-0.05881727016445322,0.09489244636973049,0.06720789430798035,-0.20486276241326587,-0.07559365277762403,0.5523064259566913,0.08397443834676752,-0.678569466058801,-0.0923490210236156,0.33745021820136295,0.10071707618931759,-0.6599669224783776,-0.10922914908430306,0.5208163634973746,0.11743050066256167,0.8211209818961193,-0.12577616438005726,0.4728620755356516
190,0.10012772003443934,-0.26614671793055633,-0.12504150168634226,0.697438216992314,0.1498766486960855,-0.7431985779043407,-0.17461773247336926,0.10885210410017937,0.1992490695237194,-0.24021206163128891,-0.2237548027045622,0.6271326388697341,0.24812096207186515,-0.7421543902393448,-0.2723303431762871,0.4697779305432528,0.2963685174942595,-0.6662024696192816,-0.32066406501132216,0.40307479031655064,0.3438687534810956,0.8063187949176538,-0.3673052732670287,0.3218676466898526
191,0.16618763471273568,-0.1832084479918421,-0.20719094411383734,0.7379870707935955,0.24783328037842636,-0.7976789812712686,-0.2880441480416971,0.2149961716908297,0.3277532836623877,-0.17492903829289258,-0.36689090214948894,0.5960997100376236,0.4053911763653313,-0.7351986813756235,-0.4431839489854308,0.4698749729807766,0.48020468125164417,-0.7073421712758955,-0.5171043819234213,0.5041184372447447,0.551670050294203,0.8267261809861431,-0.5859971192764365,0.23025271095903577
192,0.23150921120056775,-0.27681687197781213,-0.28790239300175924,0.741944511614534,0.3433136438460132,-0.7704370743645386,-0.39755440766687794,0.3201393249850457,0.45043939982967546,-0.21375631307701168,-0.5017874229019954,0.6762873478691019,0.5514265673441451,-0.6610742144513835,-0.5991833254525278,0.36905309057601093,0.6448965997410208,-0.7460742148106392,-0.6893643378227279,0.4320522057223851,0.729572002218414,0.8106034853059807,-0.7682545026401588,0.26435592603900343
193,0.29580223888950874,-0.2234228062494285,-0.3666156764634084,0.790587479572948,0.4353637308686965,-0.7256780663361395,-0.501659644947771,0.31017183111391133,0.5651295608410086,-0.24243953249762332,-0.6254148799256174,0.6841182947123903,0.6821799778154783,-0.6585428647002151,-0.7350998298874049,0.35534687129454123,0.7838785258876318,-0.798217762208775,-0.8293888705838742,0.45018757556427236,0.8679327013521737,0.7971029523072551,-0.9027455420099297,0.2648273484625302
194,0.3587810768062985,-0.30304443684416343,-0.44278449073256754,0.8341030893721426,0.5230638074046525,-0.7466265580464106,-0.5989444784351692,0.2921183482067915,0.6697878493574049,-0.365106036043419,-0.7349968670605935,0.718968777385184,0.7940277683168202,-0.585411808406148,-0.8463779387137614,0.4341395260708944,0.8916096888788322,-0.8263257064898584,-0.9306302813840046,0.4739627272725633,0.9592532917030901,0.7859599059372201,-0.9811082359545006,0.23202420900535412
195,0.42016592265815217,-0.26984329839739535,-0.5158801917446711,0.8764099849158177,0.6055376032789312,-0.7179667062001966,-0.6880862547044857,0.2520157958099333,0.7625564283823844,-0.2560476245345448,-0.8280724091206144,0.703944559024269,0.8838702409818106,-0.4636230465571613,-0.9292879352912621,0.4337263548121031,0.9637951872115138,-0.8291993015340791,-0.9883544121920417,0.44243522099838856,0.9985843912539538,0.774218315125716,-0.998470371707938,0.2186172528175455
196,0.4796840559379955,-0.31399192251194713,-0.5853954641521542,0.8282337393022485,0.6819610675853659,-0.8412914842644552,-0.767873030727915,0.1816031395870493,0.8417885205735445,-0.38054814486168254,-0.9025512300899773,0.6406500095336508,0.9492175431313883,-0.48425968812193715,-0.981050919105283,0.4514953195498216,0.997557212861661,-0.7880416316341179,-0.9998620201261161,0.34986262454521544,0.9837943379547703,0.7689569612354813,-0.9537524557612311,0.2233534148923774
197,0.537071049566752,-0.31450305994358246,-0.6508478423093741,0.7704059271319672,0.7515706023308427,-0.8639133246168571,-0.8372200510635122,0.2336390650615425,0.9060776409379818,-0.3125728717228705,-0.9567606962165225,0.8345695960986332,0.9882586698978062,-0.4556482864196094,-0.9999319463818837,0.4575895043729863,0.9915497804088362,-0.7376830082214636,-0.9646149969744904,0.43570564039918036,0.9156847212778731,0.7728112811261312,-0.84973483154024,0.11823502383663392
198,0.5920719446895983,-0.32498686911377705,-0.7117830587900028,0.729671151288476,0.8136706920540431,-0.9414360915468778,-0.8951844958426122,0.27394742233758973,0.9542825639886631,-0.2913618475218785,-0.9894833797597202,0.7062154041402812,0.9999116535748116,-0.4643795156987278,-0.9852981803271714,0.37481959488389205,0.9460123872273839,-0.7317783960370636,-0.8842615317183934,0.5455529062867026,0.797946937771229,0.7829255815434659,-0.6928848111178505,0.1620993433020221
199,0.6444423834068088,-0.5139145752238674,-0.7677781971967729,0.7364238712428328,0.8676408531860084,-0.9420020508198257,-0.9409782990466675,0.25318003768341185,0.9855475821583667,-0.09253760764748979,-0.9999843997931115,0.7365921657015352,0.98385354876958,-0.4060655600299774,-0.9376401019651358,0.4478127658429462,0.8627604654818977,-0.6689275396114137,-0.7625590394299897,0.7403526933819582,0.6369621252066716,0.7785734939181045,-0.49295456907319835,0.040121514397034735
200,0.6939496944066839,-0.4677377440415345,-0.8184446273816793,0.7725123662381725,0.9129418337168758,-0.9166797877790558,-0.9739788628003345,0.2735343542274845,0.9993176958548495,-0.050777382206222084,-0.9880279260456021,0.6892153448888411,0.9405293823617579,-0.37156287940366245,-0.8585550706487349,0.4954345493912623,0.7451130065757331,-0.6614191880371216,-0.6051984604736681,0.5841829412978584,0.44145531846722413,0.7731006714167619,-0.2623747995102293,-0.010828352242706781
201,0.7403739266773363,-0.5138164106485335,-0.8634307027049929,0.7930642222275178,0.9491210012236917,-0.8965242083130336,-0.9937375220126885,0.27440073829083084,0.9953484655116557,-0.16893902035723407,-0.9538824751409333,0.6837298282990034,0.8711398202346337,-0.4164887270584241,-0.750693785247781,0.44814412062500525,0.5977602434374559,-0.6613888977847695,-0.4195381459703299,0.7782196087761387,0.22202257118986288,0.745695068337471,-0.015481835705404115,0.012688787451261228
202,0.7835088267047319,-0.478939729308828,-0.902424200612962,0.8374810597556926,0.9758168654239697,-0.9049382049237824,-0.9999856442840467,0.24692475182611517,0.9737103507463053,-0.09909834363385213,-0.8983148802931981,0.6325634444546345,0.7776078925768971,-0.3619703138609784,-0.6176714404780257,0.3104351305261738,0.4265766657348627,-0.6273257450427164,-0.21425977332857601,0.617415204699545,-0.009443327833880081,0.7044405882873123,0.2323737136585305,0.14172827337195282
203,0.8231627548155148,-0.5759667565596028,-0.9351544895955247,0.9017995413845665,0.9927626900671027,-0.9283358649003866,-0.9926382821459993,0.2245683955230295,0.9347874595994989,-0.17785767460652668,-0.8225730698860527,0.6718376000408841,0.6625256999145963,-0.4070096551423921,-0.4639465561512684,0.36842036411392376,0.23838682250992616,-0.6508805650177836,0.0010376183946509156,0.6441017812544829,-0.24039741854819335,0.6989823407024248,0.4657813909909695,0.026723137044714688
204,0.8591595365934789,-0.5511589167815552,-0.9613944074844338,0.8868807913037107,0.9997891580747996,-0.8993684057679644,-0.9717953279799708,0.172477062169891,0.8792707300581611,-0.20503233662739,-0.7283580416915835,0.6386023347041929,0.5290825768376454,-0.4628064285623604,-0.2946715406356436,0.32102664729856617,0.04069324894464566,-0.629133245981949,0.21628648995782263,0.7843893414258646,-0.45832247982047253,0.7275702431734352,0.6702290374070955,0.09920865329739657
205,0.8913392455870175,-0.48274523448140144,-0.9809618380556229,0.920865677253616,0.9968260633013697,-0.8829015393399409,-0.9377401559124097,0.12949358659855809,0.808145664900214,-0.2530342991757413,-0.6177856621274931,0.6063512042991235,0.38097670425803437,-0.4418848644978943,-0.1155199972015296,0.44095626302936397,-0.1586226360410919,-0.5958469764485311,0.4214215709746444,0.7496769408515128,-0.6514074362380903,0.7076918762534439,0.833005088478837,-0.013541135456314274
206,0.9195589138301378,-0.5085007003220058,-0.9937209749934839,0.8648824045651194,0.9839030120103199,-0.8828021774157268,-0.8909357691509632,0.13841613583459597,0.7226748375854438,-0.48386074230377996,-0.4933391484588829,0.5919471196291681,0.22231261973676972,-0.3890369451182721,0.06750343856070244,0.450121682943994,-0.3516147370905449,-0.5488937183735413,0.6068505228597841,0.559280588805679,-0.8091874933071129,0.7407556130204135,0.9439889177435442,0.026065880612794853
207,0.9436931670203406,-0.607980407696213,-0.9995832644446337,0.9287675429854978,0.9611491270583582,-0.818725890537002,-0.8320185051412194,0.11354561501161226,0.6243754797383672,-0.5477970436288564,-0.35781330109198134,0.4540394823885981,0.05748746623084797,-0.3707102243597731,0.24826435655790655,0.46874050183934973,-0.5305890681566583,-0.5188616426106809,0.7639024869712551,0.30582604340893454,-0.9231113070849405,0.7246387957918351,0.9962800882389792,0.17107504546657812
208,0.9636347815313934,-0.6918722367361146,-0.9985080196194357,0.965722278559312,0.928791757742489,-0.9110296902083629,-0.7617893841266835,0.1382771281774754,0.5149925480779199,-0.4671576207923793,-0.2142517383822645,0.45263027106133913,-0.1089308682902638,-0.34896411834123736,0.4207041796729208,0.3512097168817629,-0.6884104874459999,-0.5561487511203201,0.885233543352591,0.16995342920141535,-0.9870044488575602,0.7177134947242643,0.9866273882519041,-0.09332387181634119
209,0.9792951607863104,-0.6679373159397465,-0.9905027031756746,0.9711209619532603,0.8871542082009943,-0.9987915212082173,-0.6812032187342151,0.08218873500850418,0.3964677488951438,-0.6098827383046554,-0.06587854352782034,0.39256780527582746,-0.2723303431762871,-0.4218125425618471,0.5790432295439465,0.27817786969927843,-0.818787153005781,-0.5305911695615797,0.9651701203847454,0.23222596206077753,-0.9974040460603334,0.7266403691912515,0.9156309759154228,0.035345209387149974
210,0.9906047288740973,-0.6892761811924516,-0.97562287542453,0.9999918914087125,0.836652507065346,-0.9478305794014092,-0.5913556326474839,-0.016264646561890268,0.27090506993953856,-0.6627213186516584,0.08397414138854217,0.4134140092667525,-0.4281825810547376,-0.43783327394728944,0.7179744444735174,0.210356607063165,-0.9165213586075023,-0.4925940069969338,0.9999742971404736,0.20317987202954177,-0.9537464626153084,0.7629284121582889,0.7877050642830827,-0.18400092112729874
211,0.9975132396615125,-0.609937804290293,-0.953971808718312,0.9628840937424251,0.7777912506396496,-0.9857765836671346,-0.49346816485842154,0.06145282916917917,0.14053343157324344,-0.7011619387571374,0.2319409481268253,0.3660880018481415,-0.5721683651842594,-0.45388473083479836,0.8328412566097173,0.1534582695996758,-0.9777167498919384,-0.5845551074606459,0.9880185926156925,0.2447508388261329,-0.8583978468093861,0.7393730066734668,0.6108034669410987,-0.24055395053025827
212,0.9999900000265173,-0.6814467695243744,-0.9256997706962817,0.9118643955079324,0.7111585611411689,-0.9296153754783923,-0.38887166201448686,0.006736540328126095,0.007667120189453631,-0.7386252263007321,0.37469886120519863,0.30096262635251503,-0.7002973402270305,-0.47348215697081003,0.9197936664767834,0.19065291607026902,-0.9999336597218027,-0.6187074070434971,0.9298620685040927,0.27721387470505743,-0.7165258910832557,0.7761264505851783,0.39592506840323277,-0.18616441418764873
213,0.9980240062216347,-0.5702110685566197,-0.8910029813631342,0.8854374860182198,0.6374202103775293,-0.9607634858373308,-0.27898818465407005,-0.1412927638522742,-0.12533529374050187,-0.5927638683695321,0.509041845663715,0.19306799814192963,-0.8090185991085457,-0.4396642100309936,0.9759172836935449,0.14871587885769585,-0.9822863700116937,-0.6451517789282568,0.8282241868667674,0.1306114092725207,-0.5358197540781281,0.7834824796811155,0.15642996674395915,-0.5811589046061376
214,0.9916239927613844,-0.5545353813517195,-0.8501222512384518,0.889363335886226,0.557312967574908,-0.9554950147465985,-0.16531167332706573,-0.07447473605004847,-0.25611282372694705,-0.6254850654785763,0.6319528476977322,0.13756750364857515,-0.8953190912117935,-0.42536932395292154,0.9993310088123568,0.11124664718187453,-0.9254784225131113,-0.6664413227526771,0.6878576451405959,-0.039594594780879656,-0.32607332457041566,0.7711454452638586,-0.09279119259122257,-0.5043816263615976
215,0.9808183936165933,-0.5196716881442881,-0.8033413100289714,0.8686536390589332,0.4716372378234493,-0.8771325999378975,-0.049387637457823146,-0.14387067234175982,-0.38234397820947147,-0.6148806651023316,0.7406715511819586,0.1151525661295014,-0.9568071246666158,-0.43705025693633226,0.9892500822690169,0.16392687311258836,-0.8317745708249755,-0.731551889778235,0.5153261348580912,-0.07326763155297292,-0.09865441350383979,0.7769120395815542,-0.33624304499791974,-0.7647765479483626
216,0.9656552158879896,-0.49492883815660776,-0.7509848374233878,0.8619358751974233,0.38124906469420167,-0.8133043280178602,0.06720785690576209,-0.19490975111430436,-0.5017879704120822,-0.39266562573595953,0.8327563684197596,0.09081109434954032,-0.9917786485905381,-0.5214985975728861,0.9460123872273839,0.1957376113714279,-0.7049104918144901,-0.7017615383900627,0.3186974163217264,-0.16452736718149183,0.13411135721659903,0.7928695612567384,-0.558788933433369,-0.659344274893933
217,0.9462018265203238,-0.666304641388301,-0.693416209676795,0.8409027239736516,0.2870515769347184,-0.8961781195914922,0.18288961478125398,-0.19668925966507292,-0.6123244955708953,-0.3086416874767901,0.906139272939061,0.06009150604602519,-0.9992644783657527,-0.5365054359881183,0.8710671247242782,0.2883354833606611,-0.5499438559690574,-0.723555290600769,0.10716606139337687,-0.13671077197989995,0.3596085772502678,0.7904012741313343,-0.7465920324381476,-0.6838632937910131
218,0.9225446530046,-0.7560329160363098,-0.6310349776244631,0.7876110214008742,0.1899859647051359,-0.8498106821019342,0.29608486405473383,-0.1650579036298051,-0.7119913694307172,-0.5561773599074953,0.9591722429085168,-0.010853908540354524,-0.9790571551727493,-0.5682643113836634,0.7669262406906555,0.34897091994224455,-0.3730526940343257,-0.7526585801630095,-0.10937649464522486,-0.3645168710801117,0.5656157762641705,0.7973783134505826,-0.8879756549256671,-0.6202757860075052
219,0.8947887993981406,-0.7965888028485396,-0.564274093628605,0.7033263069663895,0.09102207551723347,-0.8238136630479551,0.4052546383865218,-0.1772311226731414,-0.7990193598723806,-0.46332259380440466,0.9906642721549075,-0.013814541345782042,-0.9317166954079665,-0.547187047862277,0.6370802328732516,0.3079413348827095,-0.181289098424304,-0.705389911598884,-0.320804487320434,-0.4453553947676881,0.7409678016646034,0.7769069533107957,-0.9741492520301002,-0.88419649936033
220,0.863057579368428,-0.7561183593682059,-0.4935969067042674,0.7049325880217174,-0.008851276161342752,-0.7534126622183732,0.5089147005044228,-0.22056744936680878,-0.8718635933587418,-0.47572363456963696,0.9999081175941491,-0.00416068869425817,-0.8585550706487349,-0.6162599997498184,0.4858811595622658,0.22421100816259606,0.01770192144906925,-0.662427934348862,-0.5172313146814341,-0.41613480323670726,0.8761609461511297,0.7558818771860684,-0.9997549668585279,-0.5424091368565301
221,0.8274919683353151,-0.6428022508736627,-0.41949394667951106,0.7074240339689672,-0.10863618881435774,-0.7607406383035582,0.6056557214181084,-0.2209904780310367,-0.9292309786891924,-0.5841958109553774,0.9866961823862243,-0.0006430798479338025,-0.7615998482805871,-0.5261560357765572,0.3183967713303756,0.1768543253797331,0.2159872215759115,-0.6650862494621198,-0.6894718455163934,-0.32163060222234147,0.9638680284648419,0.7811832547376729,-0.9632007600821836,-0.4406755136322458
222,0.7882499771456274,-0.6524594173291111,-0.34247951970915363,0.7426881997576302,-0.2073356445790858,-0.7164056738000313,0.694162441204496,-0.18788453491502732,-0.9701031612511273,-0.31296228826495937,0.9513251781126607,-0.004941384067946012,-0.6435380004337727,-0.6056826307339235,0.14024065486026568,0.05604795725440266,0.4056617927656072,-0.595677249911845,-0.8294719260369872,-0.19638246682072358,0.9993355110484666,0.7860741887340983,-0.8667593952309963,-0.4161663007073992
223,0.7455059500627972,-0.6516597893488985,-0.263088138770635,0.7023011970249347,-0.30396347111923505,-0.7864535089475843,0.7732315508602453,-0.13765608587046835,-0.9937546002969656,-0.30571427896975817,0.894589461274079,0.0664602264114635,-0.5076414384820207,-0.6137413239041724,-0.042615918057195085,0.14559824794374857,0.579163908417558,-0.5815621917160112,-0.9306850008336166,-0.1333939487935336,0.9806411316846843,0.79727897724113,-0.7164271292652535,-0.20004062452708823
224,0.6994497901904098,-0.689956408587904,-0.18187081391593907,0.7305068987120813,-0.3975541951329225,-0.787380992812912,0.8417880521053933,-0.15467421036207343,-0.9997654483487667,-0.31999792050200804,0.8177631937557307,-0.08120892401723825,-0.35767633680604444,-0.5773918304768862,-0.22404413008322513,0.1986158089782926,0.7295765866982961,-0.5396574159615726,-0.9883782368974129,-0.10474759650992872,0.9087980860251031,0.8314484557294043,-0.5215508943602077,-0.0717627762352843
225,0.6502861157709641,-0.6254422537177807,-0.09939122802678423,0.712956764281032,-0.48717268904279,-0.6797946260772141,0.8988998727155338,-0.09326410526727993,-0.9880290041022272,-0.23891306832633485,0.7225717278984704,-0.12418153302949728,-0.1977987587809895,-0.6016771627555849,-0.39796303835712543,0.3339216262154872,0.850903348779433,-0.5109351140903798,-0.99985383601822,0.06425410865318756,0.7877001145381402,0.8289274845704195,-0.29424715093454945,-0.4435025071799735
226,0.5982333511082314,-0.7373724287990674,-0.016221824614936854,0.7328470002641662,-0.5719235144816223,-0.6762830126708941,0.9437905386771288,-0.19727672917108377,-0.9587536065301835,-0.19389724097334332,0.6111528588048277,-0.17185553080851623,-0.0324394775607966,-0.5248905189298503,-0.5585433899099405,0.3507777411481352,0.9383072795255256,-0.4724151916163982,-0.9645751867510747,-0.06879293422794876,0.6239104700465702,0.8206129832187511,-0.0486485448266961,-0.6806242610931467
227,0.5435227561520647,-0.6013083980094209,0.06706016517995395,0.687720624206052,-0.6509598692165303,-0.6564555458405403,0.9758497308794016,-0.07976810109513131,-0.9124589365625174,-0.3332398944880627,0.4860088140698168,-0.06670864114422564,0.13381881631696257,-0.5553985022324505,-0.7004030011692479,0.39465020524220307,0.9883038600367794,-0.4973219894410031,-0.8841919569486304,0.018371214775720186,0.42630620336595004,0.8110360175178235,0.19997479217308345,-0.6507800433380467
228,0.4863973990570174,-0.5171949835281928,0.14987672882070088,0.6998886990897788,-0.7234920481169532,-0.725095324414154,0.9946415828174126,-0.15293072675068903,-0.8499667919930296,-0.16425718396405078,0.3499500591436264,-0.12937105565316537,0.2963685174942595,-0.6544841353948084,-0.818787153005781,0.3455565286177644,0.9988998844215471,-0.504544369122226,-0.7624629535054732,-0.18011432551861453,0.20559704600671688,0.8124500586500436,0.43616466515770647,-0.7906963256499492
229,0.4271110762794862,-0.3797482264289919,0.23165308402618737,0.7280462462183894,-0.7887953336272641,-0.6744983584163717,0.9999106064934012,-0.19532215423323127,-0.7723864993721168,-0.018623076260781978,0.20603218033663998,-0.1472580348745244,0.4507047988336424,-0.58253074529561,-0.909727955007746,0.4825282274031021,0.9696729226252964,-0.5605798382489048,-0.6050803564763061,-0.14356497388284897,-0.026255034524455524,0.8455481230835835,0.6452359317920483,-0.9999683158444924
230,0.36592718501117627,-0.36552940143564244,0.31182166800558914,0.7127955994182923,-0.8462172369050445,-0.6523157217315282,0.9915851659497428,-0.2726759691081855,-0.6810952218454073,-0.05733362147973213,0.057487262938318864,-0.16291873853548708,0.5925504561111989,-0.6298218539902996,-0.97017733755849,0.5999052895687899,0.90178816138383,-0.5387139937810904,-0.41940354657095985,-0.10797067119060126,-0.25668414795437966,0.8551827991579193,0.8141895533370094,-0.9515785817192471
231,0.303117552958418,-0.3830839056139636,0.38982607658625396,0.6842281332811829,-0.8951840172738977,-0.69883633439414,0.9697784512084735,-0.3109255932893988,-0.5777135125028576,0.10758221137653873,-0.09234869445041148,-0.19419068926018188,0.7179744444735174,-0.6628362894215206,-0.9981092142123112,0.6133945187120781,0.7979519519050938,-0.5323151113761191,-0.21411497254091974,-0.18416720623297192,-0.4732015258041208,0.8445318108111722,0.9325208119184917,-0.7236956117534291
232,0.23896123066642116,-0.3365524026297233,0.4651249258960422,0.6903737476839749,-0.9352064148506432,-0.8308211410018617,0.9347869393760198,-0.4341954644629979,-0.4640765472019998,0.18259978538013974,-0.24011069925375803,-0.1918680774591743,0.8235008215942224,-0.6254658140923302,-0.9925873901843565,0.7171535811021404,0.662303916186903,-0.5125540458253387,0.0011858495289246477,-0.14734713233546703,-0.6640723861124043,0.8346375519035834,0.9928724430060355,-0.4247860057294489
233,0.1737432517539016,-0.2962049967379527,0.5371956097983421,0.7352132867863307,-0.9658845390683422,-0.7189119774551187,0.8870863638354546,-0.470769940848585,-0.34220154752457754,0.28587731088907276,-0.38248033539982557,-0.28143107396596456,0.9062050783297869,-0.5832059343079099,-0.9537969408607838,0.6947926755347933,0.5002519133512023,-0.5192625483857103,0.21643121999031506,-0.24365802502267817,-0.8189519336913215,0.8264420249477866,0.9914920744869923,-0.6154877951520632
234,0.10775336656612035,-0.3991013939353041,0.6055379270015925,0.7848348993316667,-0.9869118642506486,-0.7730107150305804,0.8273252463278532,-0.5104734383907741,-0.21425197215653333,0.3383707119395907,-0.5162602877914844,-0.2972123096784259,0.9637951872115138,-0.611423250678349,-0.883038008613297,0.6173086440017623,0.31825644536639547,-0.5121687797050326,0.42155603217227483,-0.045307046227585746,-0.9294460260514841,0.8179607559704594,0.92846553099017,-0.6661919614463637
235,0.041284754872455746,-0.45131003350147136,0.6696775526696496,0.8482197527386605,-0.9980782923150526,-0.7033280444458287,0.7563160798584979,-0.5157009412554323,-0.08249911234648516,0.26552518343086273,-0.6384461471104893,-0.30435254289359076,0.9946751226333268,-0.6538468636950066,-0.782682225830674,0.6033091037170336,0.12357309722111383,-0.49142415725776123,0.6069684276712939,-0.16137571125399267,-0.9895661180944233,0.8144690947851516,0.8077114977260982,-0.5260764971944196
236,-0.025367276672264887,-0.5189064371198691,0.7291693304384873,0.7806298817272771,-0.9992722520034635,-0.5652832937191195,0.6750242823023124,-0.4914954877417831,0.05071822682078113,0.08216835286013115,-0.746293882373619,-0.33243567193862816,0.9979890923662418,-0.5925387634951199,-0.6560932247398279,0.5242588620192876,-0.07603672035291548,-0.49342391427599835,0.763998322039953,-0.0645436945508966,-0.9960538285362807,0.8357543330133976,0.6367378756204817,-0.3187745172970334
237,-0.09190660651575888,-0.4579033211533678,0.783600361991259,0.7625762682542608,-0.9904818136653462,-0.5736285463329884,0.5845550708917716,-0.3551619167810877,0.18303524361971082,0.06397015692108426,-0.8373814659527854,-0.4061753464083979,0.9736452545878657,-0.5210274960826689,-0.5075138982945424,0.40482378122727214,-0.2726151938342136,-0.5259434181265721,0.8853028273268585,-0.2314334844943017,-0.9485575372520637,0.8662515229872183,0.4261749763985662,-0.3428855938767574
238,-0.15803761381669842,-0.3219688785092033,0.8325928727487738,0.7851350745387872,-0.9717948084548418,-0.6060466180946368,0.48613843603752677,-0.4417404099245068,0.3121031183575844,0.24396524200812814,-0.9096632670939825,-0.4263330434970826,0.9223182631441286,-0.5386592820924623,-0.3419241908171777,0.3183672924482967,-0.4583253598243403,-0.5213229119836962,0.9652096134709015,-0.0915675302821888,-0.849651442339514,0.8915688665221083,0.1891145812891417,-0.31598075884033555
239,-0.22346649182858055,-0.1627533830810707,0.8758068337862263,0.8257111622342458,-0.9433979507509122,-0.6970602196414096,0.3811124187729042,-0.4067840914645202,0.4356307083438057,0.2486386847079032,-0.9615159923738889,-0.5525935122991656,0.8454305705050417,-0.5903943293187606,-0.1648741848348143,0.22891349552440327,-0.6257635400475741,-0.5975410196754017,0.9999821525990276,-0.20950524924411068,-0.7046960440502352,0.884593384480506,-0.05970404252325249,-0.19391759053313595
240,-0.2879025532239214,-0.3052508250905807,0.912942321778985,0.8650154178246391,-0.90557497256898,-0.7721679076614789,0.27090491917673515,-0.5326554254581114,0.5514252190083923,0.20169399880829042,-0.9917751413705649,-0.5475973367613293,0.7451130065757331,-0.6069289146036069,0.01770192144906925,0.30175593482794716,-0.7682545026401588,-0.5668459928404246,0.9879944431168955,-0.3068394916105448,-0.5215476170626268,0.875103699386991,-0.3048105581433814,-0.12340621695678702
241,-0.3510595215595087,-0.2789034295324414,0.9437416005984248,0.7763973725072962,-0.8587037886044255,-0.7604532506839113,0.1570142831014063,-0.4963991985749585,0.6574311291981556,0.09596623460155,-0.9997611588323999,-0.4618589746030129,0.6241457258632798,-0.5809037173309524,0.1996847111166777,0.31146969363006377,-0.880117582579756,-0.5262859660692,0.9298070433046642,-0.37243895437508484,-0.3101324170823232,0.8561168652929093,-0.5309654295841015,-0.27775828033812267
242,-0.4126568031449828,-0.26181293074978707,0.9679909101106019,0.7034076268981816,-0.8032527202338713,-0.8155123531172739,0.04098893114274343,-0.4618254379840683,0.751766679669869,0.0357472399928521,-0.9852946960240333,-0.49433990710420195,0.4858811595622658,-0.6681843803720179,0.3749746534873206,0.2075184015933644,-0.9568931518735341,-0.5282427110462286,0.8281408590046396,-0.3931843939017494,-0.08190869886793242,0.8385216309064362,-0.7241074423025479,-0.16242887881936732
243,-0.4724207336640902,-0.43594259063285806,0.9855219497627712,0.705212497702951,-0.7397758162027859,-0.7411786657616208,-0.07559369319027275,-0.48187303552700145,0.8327572770565465,-0.05973790673721506,-0.9487006385122951,-0.43315320796307955,0.3341511078292741,-0.608209359604295,0.5376965425403477,0.2905353637686851,-0.9955204108530742,-0.5000439486364324,0.6877499111168577,-0.3202996316155427,0.15075429683768093,0.8672525634436127,-0.8722279614160239,-0.15923082330691618
244,-0.5300857940101242,-0.5394118172926926,0.9962130466610261,0.67248088698902,-0.6689073167535775,-0.800335611564253,-0.1911485698920146,-0.5087081356649299,0.898965220337801,0.0923541308209729,-0.8908008099367958,-0.45056345706562506,0.1731605470479181,-0.593340188805178,0.6823964166318557,0.2510914025773328,-0.994459412598224,-0.5063606755788238,0.5151990324323873,-0.23600073964033919,0.3752467298813699,0.88711968815898,-0.966117570456129,-0.3058475081250469
245,-0.58539578993384,-0.5200402991308077,0.9999900000320824,0.7056622641215812,-0.5913553165067262,-0.6752994154180928,-0.3041046518843542,-0.4283581552124006,0.9492152221261512,0.14451936759380193,-0.8128955156227154,-0.41684578766946356,0.0073710950607947,-0.6402174749360955,0.8042243593322289,0.29093615667373046,-0.9537524557612311,-0.5300386094340512,0.3185568889411502,-0.15969501631480026,0.5794015872506778,0.8800112465688289,-0.9999386682796811,-0.25613951722963174
246,-0.6381049902629059,-0.5425003954167076,0.9968265962083009,0.6573902254521305,-0.507894689413418,-0.7288790083444402,-0.41292622446932964,-0.3945390611433886,0.9826152717292859,0.16059914194296263,-0.716734340526499,-0.39646307929478114,-0.1586226360410919,-0.6237964166753928,0.8990970544354528,0.33914495419151464,-0.8750223982529546,-0.5281338713299394,0.10701868027047284,-0.2314834694412968,0.7521541093343553,0.8804184602283035,-0.9715884228338368,-0.025832435943087755
247,-0.687979218636011,-0.3261917479854887,0.9867447905617235,0.6544056697919666,-0.4193593464711572,-0.7279902755547275,-0.5161337844141267,-0.3102060168074406,0.9985724696385606,0.16922524506963466,-0.6044768573296145,-0.4138162773105634,-0.320220372869979,-0.5935961768060003,0.9638346467717017,0.29134733841846805,-0.7614079590191913,-0.4890725941482864,-0.10952383780951712,-0.18294920617560467,0.8841414763899657,0.8998783937217595,-0.8828295150682364,-0.09918587126246016
248,-0.7347968939012778,-0.24513302206219612,0.9698145551244235,0.6601667137384574,-0.3266339035607231,-0.6284145324680801,-0.612324154802974,-0.36158208001358433,0.9968035523584958,0.3091255662281644,-0.4786441270918835,-0.3308461290236487,-0.47294367058748643,-0.6323355839838152,0.9962673216493237,0.2907214102774441,-0.6174385872210872,-0.4953487843999407,-0.32094490260015385,-0.050381479910089996,0.9682102534857631,0.9421425439146928,-0.7391805439709614,-0.35973231275237416
249,-0.7783500145566823,-0.10947987032000399,0.9461533929527567,0.6483237542097788,-0.23064484265672702,-0.5827842858161874,-0.7001895621808903,-0.28194283331926545,0.9773399207462355,0.3137624789809425,-0.34206208165518653,-0.3271154817301286,-0.6125600264507652,-0.6106131987976866,0.9953080306950366,0.3041623192482656,-0.4488538873906183,-0.5183617241419209,-0.5173582361060138,-0.2968820183502662,0.9998040914789326,0.96124968779195,-0.5495729068123556,-0.10386204689460063
250,-0.8184450828588514,-0.10056921940528357,0.9159255226060192,0.6905708190159823,-0.13235125472595005,-0.5575175674264125,-0.7785354166225499,-0.29082311511724174,0.9405270826006147,0.2344281836287177,-0.1977980593066073,-0.24822314503410212,-0.7352001774333713,-0.5896481693452128,0.9609889265350202,0.49724934932217446,-0.2623747995102293,-0.49790031430240317,-0.6895793381025297,-0.08991768160259525,0.9772106714607033,0.9676258108599989,-0.32579548812127446,-0.19004382727796326
251,-0.8549039644945556,0.029903752234770854,0.8793407383995913,0.624803998276385,-0.03273525680745955,-0.6213224226340187,-0.8462965529941412,-0.28392207340715236,0.8870185193956938,0.23752903200875597,-0.04909191897331618,-0.17159395808517763,-0.8374653311888003,-0.5125510672288985,0.8944602851329572,0.5774774802460272,-0.0654356563449261,-0.5175145531581934,-0.8295549633149252,-0.21456356122982526,0.9016545088039585,0.959828736069216,-0.08176168394361567,-0.1353503786906766
252,-0.887564679995588,-0.03742657732626939,0.8366529543428967,0.6639315241055529,0.06720782097621032,-0.6276435746837874,-0.9025517125972311,-0.3747998492125412,0.817764086033171,0.27889789471812354,0.10071672002421789,-0.11478165177302871,-0.9165213586075023,-0.47830778409012353,0.7979519519050938,0.5758572662563993,0.1341121999446879,-0.4885657367957037,-0.9307396998902961,-0.29240568258241917,0.7772305870311007,0.9581699467097061,0.1673556657353764,-0.22484968900942529
253,-0.9162821243809185,-0.06674026482173183,0.7881584418678484,0.6929049393022351,0.1664793804286414,-0.6182419084917754,-0.9465360683068494,-0.4278108491055114,0.7339931495349514,0.24746030579661482,0.2482634786223801,-0.12346740607677235,-0.97017733755849,-0.4242362048664603,0.6746986038257905,0.4318397832986428,0.32831342603803426,-0.4494047646417985,-0.9884020399456962,-0.4049559741987855,0.6106824184055323,0.9767617694783794,0.4060676506929371,-0.06554146835331684
254,-0.9409287118289015,0.024203716694722044,0.7341937735787256,0.6688776649083711,0.26408753294259263,-0.6758784077421804,-0.9776516229160086,-0.43581833405345427,0.6371927659886355,0.3238707810400973,0.39023477471497986,-0.05654342897675449,-0.9969462710905873,-0.5045453516012357,0.5288313325190398,0.42858078140254363,0.5094258318881653,-0.42603276926312034,-0.9998456300017843,-0.3829520253336366,0.4110365598913215,0.981823030112944,0.6195323158871154,-0.12494358957707953
255,-0.9613949425153832,0.03733183140617771,0.6751334872947263,0.6091865375478093,0.3590570101231171,-0.6707571678594564,-0.9954753393141288,-0.49847677675884433,0.5290812831369119,0.2156773111875082,0.5234422390637385,-0.02143720611927902,-0.9960862973772684,-0.5226854594774414,0.3652391821536459,0.554693948530491,0.6702290374070955,-0.4579124217734183,-0.9645353553921383,-0.389785108296746,0.1891133929396125,0.9999960871536114,0.7944774623354411,-0.08409307459090438
256,-0.9775898890993684,-0.06000329575234665,0.6113874865977117,0.6385979371100539,0.4504389083469587,-0.7462020729222283,-0.9997648919640457,-0.40561454875858766,0.4115778372033273,0.23932286560177518,0.644894319197637,-0.054056106661375836,-0.96762124933175,-0.49393213968607574,0.1894052829865774,0.40476598278404824,0.8043123262346372,-0.466751971104443,-0.8841223628046778,-0.4014350962497896,-0.0430593182017702,0.97675388660634,0.9200258481648044,0.029509716148383235
257,-0.9894416006948833,-0.11168092375673382,0.5433981959264884,0.6998082964657955,0.5373201698939015,-0.8726498666012791,-0.990461961482367,-0.28242073905495796,0.28676828543018756,0.012511288983343836,0.7518634632320171,-0.17539998874430404,-0.912339994112663,-0.47654654950313363,0.007223072878819541,0.4559844714805124,0.906330220769519,-0.4146347197374258,-0.7623668508740649,-0.24678787983595638,-0.27289830576162316,0.9675711169799527,0.9883714828280601,0.5777347072365625
258,-0.9968974225342699,-0.16527474647399007,0.4716374899624757,0.7663996026916836,0.6188327059177012,-0.8854003239616809,-0.9676930275311245,-0.23833319181188234,0.15686817907564163,-0.02118488768340095,0.841947374803662,-0.23087346864567956,-0.8317745708249755,-0.5175304429510847,-0.17520123349920907,0.3867991760886779,0.9722155894927249,-0.4143279074946784,-0.6049622392205793,-0.367081298355812,-0.487946784829653,0.9691627898331531,0.9952649657885488,0.34942009150093045
259,-0.9999242299027156,-0.12547860699896732,0.396603418618098,0.8008819730930491,0.6941620701028643,-0.8492802070179101,-0.9317676492408153,-0.31885001008882896,0.024183434153157633,-0.0075090062579758485,0.9131229634680543,-0.25340890029315055,-0.7281577322977505,-0.5185183996455054,-0.3517533071184418,0.48448094554867255,0.9993417906665604,-0.4560116198679185,-0.41926893798173404,-0.15005660041100968,-0.6765495846383683,0.9426856151112875,0.9402776936638297,0.007146977163294211
260,-0.9985085753046877,-0.16169825777964353,0.3188167503579329,0.8172796235081048,0.7625555963430352,-0.7890734239210294,-0.8831742565436466,-0.25585072610220155,-0.1089306019349719,0.0767542202041181,0.9637917789492614,-0.3787087463068442,-0.6043610676062894,-0.5557152549257511,-0.5165156390114066,0.5997523193608882,0.9866273882519041,-0.48569198704522903,-0.21397016706162447,-0.10039973660097501,-0.8284848340612185,0.954165291867221,0.8268285087080932,-0.11386848691735907
261,-0.9926567482084496,-0.09958054630580518,0.23881735784341807,0.8281372386762353,0.8233299191319133,-0.8140601293384271,-0.8225735096366839,-0.3553674204940323,-0.2401109612432456,-0.05909290003012409,0.99281590888706,-0.4283359818026377,-0.463815420182426,-0.5783250359551799,-0.66396587785995,0.5674152451898657,0.9345792653504099,-0.4093243289851332,0.0013340806372143506,-0.08863242658300413,-0.935517965314699,0.936414561641438,0.6619711317596895,-0.06172120231117038
262,-0.9823947471032186,-0.1273255444175156,0.15716047098625227,0.819354984791028,0.8758778015255105,-0.8142148964954122,-0.750789316857493,-0.4320719154658526,-0.3670290014043059,-0.1823744687258444,0.9995435338953585,-0.5641075115798541,-0.31041580700853816,-0.5722722715595743,-0.7891619230545719,0.5446055906527385,0.8452724163748147,-0.4236451717870244,0.21657594528042243,-0.11255143342711889,-0.9918480100406779,0.9636941722507111,0.45595559604354075,0.0036747620035962832
263,-0.9677681639931142,-0.05392315403206523,0.07441282341595735,0.7773031286985801,0.9196742024529744,-0.7754926354004026,-0.6687976330903763,-0.4963820079625184,-0.4874317424213096,-0.16027266766492754,0.9838235660203686,-0.623808455402353,-0.1484134739207933,-0.6159562919832948,-0.8879075694477435,0.544046775450042,0.7222672235697097,-0.46807224582075585,0.4216904841328718,0.006279260229656684,-0.9944219994691983,0.950628144220682,0.22159094975044577,-0.21690892090056274
264,-0.9488419818410697,0.023291407879657174,-0.008851280893268,0.7244760535981962,0.9542815227533997,-0.7382299520913037,-0.5777131909964762,-0.48739017609552554,-0.5991818603430317,-0.043017967843146786,0.9460090418503241,-0.6039620033566111,0.01770192144906925,-0.5728131075021685,-0.9568931518735341,0.5351136734962498,0.5704675158069115,-0.484391977127757,0.6070863191830737,-0.06446406498724573,-0.9431004288408069,0.9640884448605256,-0.026551148539808382,0.09366727336975204
265,-0.9257002858626101,0.08781529682131493,-0.09205395353714756,0.6919085312506786,0.9793539775220065,-0.6897363650009739,-0.4787743454656908,-0.5270042123887773,-0.7002956278758357,-0.2353990332670686,0.8869491940748067,-0.5783234028815416,0.18332673396610968,-0.654899739555987,-0.9938064754408799,0.5520117770113765,0.39592506840323277,-0.4841086702635434,0.7640941403681166,0.06859029520308045,-0.8406648182681866,0.9931334946628024,-0.27304242501225534,-0.18911082560742415
266,-0.8984458899521787,0.061272557365904215,-0.17461773247336926,0.6936677567507612,0.9946410510785969,-0.6773745039948267,-0.37332623734037823,-0.4928642265593052,-0.7889781284312809,-0.3785855536738455,0.8079703795493773,-0.6542706935977146,0.34387091428136973,-0.6237288082532072,-0.9974103135414998,0.4358187377913357,0.2055983379361198,-0.48439188401825495,0.8853720919025989,0.07797059288008035,-0.6926669602544497,0.9871162317527288,-0.5025572459569345,-0.1402098835585151
267,-0.8671998799017084,0.13591482984016667,-0.255969589854493,0.6724874969460695,0.9999900000374283,-0.6518101888497072,-0.2628025053114708,-0.47788989635411894,-0.8636551185088495,-0.553126165856611,0.7108462921800559,-0.7011778053651275,0.4948852153337387,-0.7108422450340243,-0.9675838760660482,0.4417666358621182,0.0070750505385742705,-0.43412962208192485,0.9652490854076312,0.24917811902456927,-0.5071280243323874,0.9713880193945247,-0.7008254914444759,0.1783590433038968
268,-0.8321010754408373,0.07765835749459722,-0.3355449090856859,0.6902638128685458,0.9953473794686317,-0.675491955507925,-0.1487057946261785,-0.4614954699990039,-0.9230009728100718,-0.4625403917879892,0.5977581295820428,-0.6361133747992225,0.6321844967224848,-0.6065058618232644,-0.9053268579351827,0.5098535153422181,-0.19173029679733813,-0.43302845289482667,0.9999899861462309,0.2162642092734222,-0.2941038267140396,0.9471383560401281,-0.8555198022671052,0.23383297144148335
269,-0.7933054134888071,0.10320458127004684,-0.4127914035047448,0.6786506371444788,0.9807595769022848,-0.644089455683698,-0.03258732760469798,-0.491219946568623,-0.9659622160175942,-0.3679979756211201,0.47124560808368704,-0.6090595899444559,0.7519637097500518,-0.6321780918087443,-0.8127259322506333,0.5218052366996114,-0.382891962239873,-0.3828534182943108,0.9879702719694192,0.17236465383993282,-0.06513982641222865,0.9602678598915634,-0.9570220354275899,0.050291624765572224
270,-0.750985255358137,0.16635332603607386,-0.4871729494871265,0.7098366289663778,0.9563723488395427,-0.6396070718247229,0.08397418628143136,-0.7107307920766205,-0.9917762235158499,-0.4114991307182967,0.33414992617302336,-0.6896339139361498,0.850903348779433,-0.74179455905897,-0.6928848111178505,0.6250623381295031,-0.558788933433369,-0.3355672889241857,0.9297519977315464,0.12409809870736224,0.16735461411352245,0.9492207934098709,-0.999021273686088,-0.08533367342972202
271,-0.7053286209880268,0.0823976624579209,-0.5581733073726326,0.7954606898351773,0.922429364401863,-0.5513227965402728,0.19939401403745713,-0.6925139971775065,-0.9999847590883144,-0.42484428142401753,0.18954995730501162,-0.6808110816443433,0.9262614464760786,-0.7511123626487388,-0.5498202182967745,0.7013132797247675,-0.7124087532113248,-0.35369749095524844,0.8280575129964988,0.1338696918831837,0.3907787900427832,0.9525448777146422,-0.9789062078276909,0.07003470098115784
272,-0.6565383536096343,0.007414192132074792,-0.6252997043888671,0.705227568713586,0.8792697706696899,-0.6147200126424507,0.3121029446674387,-0.623982535211518,-0.9904421092775101,-0.3458205521933108,0.04069310504139096,-0.7456773451525354,0.9759495634207083,-0.8119145156497758,-0.3883272603731776,0.7446052175093468,-0.837627084134564,-0.34302536175731047,0.6876421620233208,0.15828192412420555,0.5930235856295334,0.9882406312898194,-0.8979274952217354,0.060342763257559794
273,-0.6048312185544522,-0.11973689807004091,-0.6880862547044857,0.67125021817605,0.8273248040369273,-0.6420103736001984,0.4205686236641493,-0.6673015974791038,-0.9633176700110243,-0.34412747542815897,-0.1090776266323372,-0.8373357179030392,0.9985906661521659,-0.7721829582251308,-0.21381870881502654,0.6859405393942053,-0.9294518664984789,-0.3954500708896268,0.5150719187177752,0.1370806310253102,0.7631277625538627,0.99417364902291,-0.7611200000040392,-0.09884597800145195
274,-0.5504369402095979,-0.21905277594349684,-0.7460971928756291,0.6396118409692197,0.7671134814394504,-0.6076241875491211,0.5233163864099641,-0.671909912705966,-0.9190929395771664,-0.3733085197263535,-0.2563987099693063,-0.9595804959186276,0.993557289635034,-0.899919511122672,-0.032143579709033164,0.7080824198534638,-0.9842223360000716,-0.3660846545546305,0.31841635458044437,0.16273823925328307,0.8918720357554514,0.9737542635802916,-0.5769897496109039,-0.26155517018449753
275,-0.4935971813986275,-0.18963496595498672,-0.7989298982439903,0.6380730131438741,0.6992374145094317,-0.5748635518851734,0.6189493072445122,-0.6052659748130983,-0.8585529713293221,-0.32389501376306135,-0.39796163104318627,-0.9999856221823422,0.9609889265350202,-0.9328257362985448,0.15060890814599096,0.7073754571618106,-0.9997549668585279,-0.3821506507471892,0.10687129680259598,0.26030599154570916,0.9722787391005914,0.9441228326950064,-0.35698507119139966,-0.09300016930637704
276,-0.4345644697222834,-0.29867115038919834,-0.8462176892959353,0.6779697116692963,0.6243747984707934,-0.4709180837887278,0.7061671916183289,-0.6544577921568425,-0.7827724378459455,-0.35181057192253085,-0.5305871918382075,-0.9945770950585407,0.90178816138383,-0.9999889921636391,0.32831342603803426,0.6807308785985277,-0.9754305220975703,-0.37936798395463384,-0.1096711785739513,0.18759886470719073,0.999990000064903,0.962194857874149,-0.11478479007430807,-0.3461538827325484
277,-0.3736010756292228,-0.3075947071390139,-0.8876323685886472,0.6434702817899297,0.5432736358367058,-0.42344278500584825,0.7837842531213786,-0.7173360989917558,-0.6930965539276435,-0.08278281573345812,-0.6512969081826581,-0.8668990007318169,0.8175956567694579,-0.9871757506010305,0.4950138384393182,0.7655285698661214,-0.9122187405695967,-0.3111628453481045,-0.32108531084741176,0.23521421966874864,0.9735039271673561,0.953663484404848,0.13455225333850804,-0.219386770103228
278,-0.3109778472012334,-0.3130974630769137,-0.9228865005803804,0.6463958815424886,0.45674426261572276,-0.33524248565246284,0.8507452350551237,-0.7530071770977961,-0.5911171970759168,-0.21212892328805805,-0.7573799000821544,-0.9390859420931016,0.7107446847730562,-0.8558999831884421,0.6451228352433134,0.7305946104115967,-0.812639676527811,-0.2544242090891233,-0.5174851461943671,0.12650155011337869,0.894256009298271,0.9803115724814706,0.3755234893319841,-0.18195072952612906
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/ts_datasets/deaths_by_region.csv | Date,Northland Region,Auckland Region,Waikato Region,Bay of Plenty Region,Gisborne Region,Hawke's Bay Region,Taranaki Region,Manawatu-Wanganui Region,Wellington Region,Tasman Region,Nelson Region,Marlborough Region,West Coast Region,Canterbury Region,Otago Region,Southland Region
1991,996,6768,2256,1647,369,1206,888,1950,2940,237,360,270,318,3756,1581,840
1992,1023,6918,2454,1722,375,1248,966,1959,2913,237,309,276,300,3978,1578,843
1993,1062,7086,2325,1719,423,1260,918,1929,2958,279,330,297,294,3867,1551,789
1994,1110,7002,2412,1680,390,1242,900,1938,2769,282,342,285,291,3951,1512,834
1995,1158,7182,2361,1857,450,1215,903,1971,2997,303,360,357,294,3993,1536,861
1996,1116,7383,2520,1932,405,1239,888,1998,3003,315,351,345,303,3999,1590,846
1997,1155,7281,2469,1875,417,1266,777,1824,2994,276,348,327,306,3807,1581,759
1998,1089,6813,2508,1722,360,1170,756,1812,2772,252,315,300,321,3627,1509,765
1999,1203,7170,2628,1977,438,1293,909,1971,2943,279,348,324,288,3906,1563,762
2000,1104,6801,2493,1881,387,1203,876,1902,2796,300,351,351,228,3639,1488,783
2001,1209,7050,2580,2049,378,1236,906,1992,2910,306,345,351,276,3828,1548,807
2002,1287,7140,2535,1986,399,1254,888,1917,2922,285,393,378,270,3975,1536,831
2003,1254,7164,2541,1971,411,1326,921,1908,2931,291,369,327,267,3867,1596,786
2004,1191,7242,2646,2073,363,1218,918,1968,2922,309,381,387,255,4179,1494,792
2005,1167,6873,2520,1956,375,1278,909,1836,2745,315,345,372,243,3816,1428,723
2006,1164,7152,2754,2151,438,1251,858,1965,2826,342,366,360,240,4014,1458,765
2007,1224,7218,2787,2139,378,1281,939,1812,2865,324,381,366,255,4107,1530,783
2008,1344,7404,2871,2241,387,1200,912,1920,3015,297,411,381,273,4017,1617,786
2009,1287,7386,2760,2226,360,1251,897,1941,2955,300,360,378,264,4206,1560,765
2010,1233,7227,2772,2139,381,1206,909,1905,2916,309,372,360,249,4272,1392,744
2011,1365,7692,2937,2202,366,1269,978,1911,3123,348,375,378,291,4473,1542,807
2012,1374,7665,2931,2211,396,1326,948,1920,3123,342,417,393,252,4359,1635,783
2013,1365,7566,2919,2238,399,1269,888,1947,3024,372,411,396,291,4098,1581,780
2014,1377,8034,2955,2352,387,1377,963,2064,3186,339,402,381,279,4422,1689,816
2015,1443,8175,3105,2517,351,1467,1008,2016,3150,375,447,411,294,4305,1641,879
2016,1563,8007,3066,2403,378,1368,981,2082,3237,384,447,426,267,4158,1536,855
2017,1611,8577,3378,2676,432,1527,1017,2262,3300,372,468,405,309,4494,1647,849
2018,1692,8586,3420,2583,429,1515,1026,2190,3330,402,441,450,291,4431,1626,804
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/stemmer_tests/test_stemmer.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nltk import stem as nltk_stem
from cuml.preprocessing.text import stem as rapids_stem
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
def get_words():
"""
Returns list of words from nltk treebank
"""
import nltk
nltk.download("treebank")
from nltk.corpus import treebank
word_ls = []
for item in treebank.fileids():
for (word, tag) in treebank.tagged_words(item):
# assuming the words are already lowered
word = word.lower()
word_ls.append(word)
word_ls = list(set(word_ls))
return word_ls
def test_same_results():
word_ls = get_words()
word_ser = cudf.Series(word_ls)
nltk_stemmer = nltk_stem.PorterStemmer()
nltk_stemmed = [nltk_stemmer.stem(word) for word in word_ls]
cuml_stemmer = rapids_stem.PorterStemmer()
cuml_stemmed = cuml_stemmer.stem(word_ser)
assert all(
[a == b for a, b in zip(nltk_stemmed, cuml_stemmed.to_pandas().values)]
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/stemmer_tests/test_len_utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing.text.stem.porter_stemmer_utils.len_flags_utils import (
len_eq_n,
len_gt_n,
)
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
def test_len_gt_n():
word_str_ser = cudf.Series(["a", "abcd", "abc", "abcd"])
got = len_gt_n(word_str_ser, 3).values.get()
expect = np.asarray([False, True, False, True])
np.testing.assert_array_equal(got, expect)
def test_len_eq_n():
word_str_ser = cudf.Series(["a", "abcd", "abc", "abcd"])
got = len_eq_n(word_str_ser, 3).values.get()
expect = np.asarray([False, False, True, False])
np.testing.assert_array_equal(got, expect)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/stemmer_tests/test_steps.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing.text.stem.porter_stemmer import PorterStemmer
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
def test_step1a():
word_str_ser = cudf.Series(
["caresses", "ponies", "ties", "caress", "cats"]
)
st = PorterStemmer()
got = st._step1a(word_str_ser)
expect = ["caress", "poni", "tie", "caress", "cat"]
assert list(got.to_pandas().values) == expect
# mask test
mask = cudf.Series([True, False, True, True, False])
expect = ["caress", "ponies", "tie", "caress", "cats"]
got = st._step1a(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step1b():
word_str_ser_ls = [
"feed",
"agreed",
"plastered",
"bled",
"motoring",
"sing",
"conflated",
"troubled",
"sized",
"hopping",
"tanned",
"falling",
"hissing",
"fizzed",
"failing",
"filing",
]
expected = [
"feed",
"agree",
"plaster",
"bled",
"motor",
"sing",
"conflate",
"trouble",
"size",
"hop",
"tan",
"fall",
"hiss",
"fizz",
"fail",
"file",
]
word_str_ser = cudf.Series(word_str_ser_ls)
st = PorterStemmer()
got = st._step1b(word_str_ser)
assert list(got.to_pandas().values) == expected
# mask test
expected = expected[:-3] + ["fizzed", "failing", "filing"]
mask = cudf.Series([True] * (len(expected) - 3) + [False] * 3)
got = st._step1b(word_str_ser, mask)
assert list(got.to_pandas().values) == expected
def test_step1c():
word_str_ser_ls = ["happy", "sky", "enjoy", "boy", "toy", "y"]
word_str_ser = cudf.Series(word_str_ser_ls)
st = PorterStemmer()
got = st._step1c(word_str_ser)
expect = ["happi", "ski", "enjoy", "boy", "toy", "y"]
assert list(got.to_pandas().values) == expect
# mask test
expect = ["happi", "sky", "enjoy", "boy", "toy", "y"]
mask = cudf.Series([True, False, False, False, False, True])
got = st._step1c(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step2():
word_str_ser_ls = [
"relational",
"conditional",
"rational",
"valenci",
"hesitanci",
"digitizer",
"conformabli",
"radicalli",
"differentli",
"vileli",
"analogousli",
"vietnamization",
"predication",
"operator",
"feudalism",
"decisiveness",
"hopefulness",
"callousness",
"formaliti",
"sensitiviti",
"sensibiliti",
]
expect = [
"relate",
"condition",
"rational",
"valence",
"hesitance",
"digitize",
"conformable",
"radical",
"different",
"vile",
"analogous",
"vietnamize",
"predicate",
"operate",
"feudal",
"decisive",
"hopeful",
"callous",
"formal",
"sensitive",
"sensible",
]
word_str_ser = cudf.Series(word_str_ser_ls)
st = PorterStemmer()
got = st._step2(word_str_ser)
assert list(got.to_pandas().values) == expect
# mask test
expect = expect[:-3] + ["formaliti", "sensitiviti", "sensibiliti"]
mask = cudf.Series([True] * (len(expect) - 3) + [False] * 3)
got = st._step2(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step3():
word_str_ser_ls = [
"triplicate",
"formative",
"formalize",
"electriciti",
"electriciti",
"hopeful",
"goodness",
]
expect = [
"triplic",
"form",
"formal",
"electric",
"electric",
"hope",
"good",
]
word_str_ser = cudf.Series(word_str_ser_ls)
st = PorterStemmer()
got = st._step3(word_str_ser)
assert list(got.to_pandas().values) == expect
# mask test
expect = expect[:-2] + ["hopeful", "goodness"]
mask = cudf.Series([True] * (len(expect) - 2) + [False] * 2)
got = st._step3(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step4():
word_str_ser_ls = [
"revival",
"allowance",
"inference",
"airliner",
"gyroscopic",
"adjustable",
"defensible",
"irritant",
"replacement",
"adjustment",
"dependent",
"adoption",
"homologou",
"communism",
"activate",
"angulariti",
"homologous",
"effective",
"bowdlerize",
]
expect = [
"reviv",
"allow",
"infer",
"airlin",
"gyroscop",
"adjust",
"defens",
"irrit",
"replac",
"adjust",
"depend",
"adopt",
"homolog",
"commun",
"activ",
"angular",
"homolog",
"effect",
"bowdler",
]
word_str_ser = cudf.Series(word_str_ser_ls)
st = PorterStemmer()
got = st._step4(word_str_ser)
assert list(got.to_pandas().values) == expect
# mask test
expect = expect[:-2] + ["effective", "bowdlerize"]
mask = cudf.Series([True] * (len(expect) - 2) + [False] * 2)
got = st._step4(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step5a():
word_str_ser_ls = ["probate", "rate", "cease", "ones"]
word_str_ser = cudf.Series(word_str_ser_ls)
expect = ["probat", "rate", "ceas", "ones"]
st = PorterStemmer()
got = st._step5a(word_str_ser)
assert list(got.to_pandas().values) == expect
# mask test
expect = expect[:-2] + ["cease", "ones"]
mask = cudf.Series([True] * (len(expect) - 2) + [False] * 2)
got = st._step5a(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
def test_step5b():
word_str_ser_ls = ["controll", "roll"]
word_str_ser = cudf.Series(word_str_ser_ls)
expect = ["control", "roll"]
st = PorterStemmer()
got = st._step5b(word_str_ser)
assert list(got.to_pandas().values) == expect
# mask test
expect = ["controll", "roll"]
mask = cudf.Series([False, True])
got = st._step5b(word_str_ser, mask)
assert list(got.to_pandas().values) == expect
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/stemmer_tests/test_porter_stemmer_rules.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing.text.stem.porter_stemmer_utils import (
porter_stemmer_rules,
)
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
def test_ends_with_suffix():
test_strs = cudf.Series(["happy", "apple", "nappy", ""])
expect = np.asarray([True, False, True, False])
got = porter_stemmer_rules.ends_with_suffix(test_strs, "ppy").values.get()
np.testing.assert_array_equal(got, expect)
def test_ends_with_empty_suffix():
test_strs = cudf.Series(["happy", "sad"])
expect = np.asarray([True, True])
got = porter_stemmer_rules.ends_with_suffix(test_strs, "").values.get()
np.testing.assert_array_equal(got, expect)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/stemmer_tests/test_suffix_utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing.text.stem.porter_stemmer_utils.suffix_utils import (
get_stem_series,
replace_suffix,
)
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
def test_get_stem_series():
word_str_ser = cudf.Series(
["ihop", "packit", "mishit", "crow", "girl", "boy"]
)
can_replace_mask = cudf.Series([True, True, True, False, False, False])
expect = ["ih", "pack", "mish", "crow", "girl", "boy"]
got = get_stem_series(
word_str_ser, suffix_len=2, can_replace_mask=can_replace_mask
)
assert sorted(list(got.to_pandas().values)) == sorted(expect)
def test_replace_suffix():
# test 'ing' -> 's'
word_str_ser = cudf.Series(
["shopping", "parking", "drinking", "sing", "bing"]
)
can_replace_mask = cudf.Series([True, True, True, False, False])
got = replace_suffix(word_str_ser, "ing", "s", can_replace_mask)
expect = ["shopps", "parks", "drinks", "sing", "bing"]
assert sorted(list(got.to_pandas().values)) == sorted(expect)
# basic test 'ies' -> 's'
word_str_ser = cudf.Series(["shops", "ties"])
can_replace_mask = cudf.Series([False, True])
got = replace_suffix(word_str_ser, "ies", "i", can_replace_mask)
expect = ["shops", "ti"]
assert sorted(list(got.to_pandas().values)) == sorted(expect)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_base.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.model_selection import train_test_split
from cuml.dask.datasets import make_regression
from cuml.dask.linear_model import LinearRegression
from cuml.internals.safe_imports import cpu_only_import_from
import pytest
from cuml.internals.safe_imports import cpu_only_import
import cuml
from cuml.dask.datasets import make_blobs
from cuml.testing.dask.utils import load_text_corpus
from cuml.dask.naive_bayes.naive_bayes import MultinomialNB
from cuml.dask.cluster import KMeans
from dask_ml.wrappers import ParallelPostFit
from cuml.internals.safe_imports import gpu_only_import
cupy = gpu_only_import("cupy")
np = cpu_only_import("numpy")
assert_equal = cpu_only_import_from("numpy.testing", "assert_equal")
def make_dataset(datatype, nrows, ncols, n_info):
X, y = make_regression(
n_samples=nrows, n_features=ncols, n_informative=n_info, random_state=0
)
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
return X_train, y_train, X_test
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", [cuml.dask.linear_model.LinearRegression])
@pytest.mark.parametrize("data_size", [[500, 20, 10]])
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_get_combined_model(datatype, keys, data_size, fit_intercept, client):
nrows, ncols, n_info = data_size
X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info)
model = LinearRegression(
fit_intercept=fit_intercept, client=client, verbose=True
)
model.fit(X_train, y_train)
print("Fit done")
combined_model = model.get_combined_model()
assert combined_model.coef_ is not None
assert combined_model.intercept_ is not None
y_hat = combined_model.predict(X_train.compute())
np.testing.assert_allclose(
y_hat.get(), y_train.compute().get(), atol=1e-3, rtol=1e-3
)
def test_check_internal_model_failures(client):
# Test model not trained yet
model = LinearRegression(client=client)
assert model.get_combined_model() is None
# Test single Int future fails
int_future = client.submit(lambda: 1)
with pytest.raises(ValueError):
model._set_internal_model(int_future)
# Test list Int future fails
with pytest.raises(ValueError):
model._set_internal_model([int_future])
# Test directly setting Int fails
with pytest.raises(ValueError):
model._set_internal_model(1)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", [cuml.dask.linear_model.LinearRegression])
@pytest.mark.parametrize("data_size", [[500, 20, 10]])
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_regressor_mg_train_sg_predict(
datatype, keys, data_size, fit_intercept, client
):
nrows, ncols, n_info = data_size
X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info)
X_test_local = X_test.compute()
dist_model = LinearRegression(fit_intercept=fit_intercept, client=client)
dist_model.fit(X_train, y_train)
expected = dist_model.predict(X_test).compute()
local_model = dist_model.get_combined_model()
actual = local_model.predict(X_test_local)
assert_equal(expected.get(), actual.get())
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("keys", [cuml.linear_model.LinearRegression])
@pytest.mark.parametrize("data_size", [[500, 20, 10]])
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_regressor_sg_train_mg_predict(
datatype, keys, data_size, fit_intercept, client
):
# Just testing for basic compatibility w/ dask-ml's ParallelPostFit.
# Refer to test_pickle.py for more extensive testing of single-GPU
# model serialization.
nrows, ncols, n_info = data_size
X_train, y_train, _ = make_dataset(datatype, nrows, ncols, n_info)
X_train_local = X_train.compute()
y_train_local = y_train.compute()
local_model = cuml.linear_model.LinearRegression(
fit_intercept=fit_intercept
)
local_model.fit(X_train_local, y_train_local)
dist_model = ParallelPostFit(estimator=local_model)
predictions = dist_model.predict(X_train).compute()
assert isinstance(predictions, cupy.ndarray)
# Dataset should be fairly linear already so the predictions should
# be very close to the training data.
np.testing.assert_allclose(
predictions.get(), y_train.compute().get(), atol=1e-3, rtol=1e-3
)
def test_getattr(client):
# Test getattr on local param
kmeans_model = KMeans(client=client)
# Test AttributeError
with pytest.raises(AttributeError):
kmeans_model.cluster_centers_
assert kmeans_model.client is not None
# Test getattr on local_model param with a non-distributed model
X, y = make_blobs(
n_samples=5,
n_features=5,
centers=2,
n_parts=2,
cluster_std=0.01,
random_state=10,
)
kmeans_model.fit(X)
assert kmeans_model.cluster_centers_ is not None
assert isinstance(kmeans_model.cluster_centers_, cupy.ndarray)
# Test getattr on trained distributed model
X, y = load_text_corpus(client)
nb_model = MultinomialNB(client=client)
nb_model.fit(X, y)
assert nb_model.feature_count_ is not None
assert isinstance(nb_model.feature_count_, cupy.ndarray)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_sql.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from cuml.internals.import_utils import has_dask_sql
from cuml.internals.safe_imports import cpu_only_import
import pytest
import cuml
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
dask_cudf = gpu_only_import("dask_cudf")
np = cpu_only_import("numpy")
if has_dask_sql():
from dask_sql import Context
else:
pytest.skip("Dask-SQL not available", allow_module_level=True)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("nrows", [100000])
@pytest.mark.parametrize("ncols", [20, 50])
@pytest.mark.parametrize("n_parts", [2, 20])
@pytest.mark.parametrize("wrap_predict", [True, False])
def test_dask_sql_sg_logistic_regression(
datatype, nrows, ncols, n_parts, wrap_predict
):
if wrap_predict:
cuml.set_global_output_type("input")
else:
cuml.set_global_output_type("cudf")
X, y = make_classification(
n_samples=nrows, n_features=ncols, n_informative=5, random_state=0
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
train_df = cudf.DataFrame(
X_train, dtype=datatype, columns=[chr(i) for i in range(ncols)]
)
train_df["target"] = y_train
train_ddf = dask_cudf.from_cudf(train_df, npartitions=n_parts)
c = Context()
c.create_table("train_df", train_ddf)
train_query = f"""
CREATE MODEL model WITH (
model_class = 'cuml.linear_model.LogisticRegression',
wrap_predict = {wrap_predict},
target_column = 'target'
) AS (
SELECT * FROM train_df
)
"""
c.sql(train_query)
skmodel = LogisticRegression().fit(X_train, y_train)
test_df = cudf.DataFrame(
X_test, dtype=datatype, columns=[chr(i) for i in range(ncols)]
)
test_ddf = dask_cudf.from_cudf(test_df, npartitions=n_parts)
c.create_table("test_df", test_ddf)
inference_query = """
SELECT * FROM PREDICT(
MODEL model,
SELECT * FROM test_df
)
"""
preds = c.sql(inference_query).compute()
score = cuml.metrics.accuracy_score(y_test, preds["target"].to_numpy())
assert score >= skmodel.score(X_test, y_test) - 0.022
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_umap.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.datasets import load_iris
from sklearn.datasets import load_digits
import math
from cuml.metrics import trustworthiness
from cuml.internals import logger
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
def _load_dataset(dataset, n_rows):
if dataset == "digits":
local_X, local_y = load_digits(return_X_y=True)
else: # dataset == "iris"
local_X, local_y = load_iris(return_X_y=True)
local_X = cp.asarray(local_X)
local_y = cp.asarray(local_y)
local_X = local_X.repeat(math.ceil(n_rows / len(local_X)), axis=0)
local_y = local_y.repeat(math.ceil(n_rows / len(local_y)), axis=0)
# Add some gaussian noise
local_X += cp.random.standard_normal(local_X.shape, dtype=cp.float32)
return local_X, local_y
def _local_umap_trustworthiness(local_X, local_y, n_neighbors, supervised):
"""
Train model on all data, report trustworthiness
"""
from cuml.manifold import UMAP
local_model = UMAP(n_neighbors=n_neighbors, random_state=42, init="random")
y_train = None
if supervised:
y_train = local_y
local_model.fit(local_X, y=y_train)
embedding = local_model.transform(local_X)
return trustworthiness(
local_X, embedding, n_neighbors=n_neighbors, batch_size=5000
)
def _umap_mnmg_trustworthiness(
local_X, local_y, n_neighbors, supervised, n_parts, sampling_ratio
):
"""
Train model on random sample of data, transform in
parallel, report trustworthiness
"""
import dask.array as da
from cuml.dask.manifold import UMAP as MNMG_UMAP
from cuml.manifold import UMAP
local_model = UMAP(n_neighbors=n_neighbors, random_state=42, init="random")
n_samples = local_X.shape[0]
n_samples_per_part = math.ceil(n_samples / n_parts)
selection = np.random.RandomState(42).choice(
[True, False],
n_samples,
replace=True,
p=[sampling_ratio, 1.0 - sampling_ratio],
)
X_train = local_X[selection]
X_transform = local_X
X_transform_d = da.from_array(X_transform, chunks=(n_samples_per_part, -1))
y_train = None
if supervised:
y_train = local_y[selection]
local_model.fit(X_train, y=y_train)
distributed_model = MNMG_UMAP(model=local_model)
embedding = distributed_model.transform(X_transform_d)
embedding = embedding.compute()
return trustworthiness(
X_transform, embedding, n_neighbors=n_neighbors, batch_size=5000
)
def _run_mnmg_test(
n_parts, n_rows, sampling_ratio, supervised, dataset, n_neighbors, client
):
local_X, local_y = _load_dataset(dataset, n_rows)
dist_umap = _umap_mnmg_trustworthiness(
local_X, local_y, n_neighbors, supervised, n_parts, sampling_ratio
)
loc_umap = _local_umap_trustworthiness(
local_X, local_y, n_neighbors, supervised
)
logger.debug(
"\nLocal UMAP trustworthiness score : {:.2f}".format(loc_umap)
)
logger.debug("UMAP MNMG trustworthiness score : {:.2f}".format(dist_umap))
trust_diff = loc_umap - dist_umap
return trust_diff <= 0.15
@pytest.mark.mg
@pytest.mark.parametrize("n_parts", [2, 9])
@pytest.mark.parametrize("n_rows", [100, 500])
@pytest.mark.parametrize("sampling_ratio", [0.55, 0.9])
@pytest.mark.parametrize("supervised", [True, False])
@pytest.mark.parametrize("dataset", ["digits", "iris"])
@pytest.mark.parametrize("n_neighbors", [10])
def test_umap_mnmg(
n_parts, n_rows, sampling_ratio, supervised, dataset, n_neighbors, client
):
result = _run_mnmg_test(
n_parts,
n_rows,
sampling_ratio,
supervised,
dataset,
n_neighbors,
client,
)
if not result:
result = _run_mnmg_test(
n_parts,
n_rows,
sampling_ratio,
supervised,
dataset,
n_neighbors,
client,
)
assert result
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_tsvd.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.dask_arr_utils import to_dask_cudf
from cuml.internals.safe_imports import gpu_only_import
from cuml.testing.utils import array_equal, unit_param, stress_param
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.mark.mg
@pytest.mark.parametrize(
"data_info",
[unit_param([1000, 20, 30]), stress_param([int(9e6), 5000, 30])],
)
@pytest.mark.parametrize("input_type", ["dataframe", "array"])
def test_pca_fit(data_info, input_type, client):
nrows, ncols, n_parts = data_info
if nrows == int(9e6) and pytest.max_gpu_memory < 48:
if pytest.adapt_stress_test:
nrows = nrows * pytest.max_gpu_memory // 256
ncols = ncols * pytest.max_gpu_memory // 256
else:
pytest.skip(
"Insufficient GPU memory for this test."
"Re-run with 'CUML_ADAPT_STRESS_TESTS=True'"
)
from cuml.dask.decomposition import TruncatedSVD as daskTPCA
from sklearn.decomposition import TruncatedSVD
from cuml.dask.datasets import make_blobs
X, _ = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=1,
n_parts=n_parts,
cluster_std=0.5,
random_state=10,
dtype=np.float32,
)
if input_type == "dataframe":
X_train = to_dask_cudf(X)
X_cpu = X_train.compute().to_pandas().values
elif input_type == "array":
X_train = X
X_cpu = cp.asnumpy(X_train.compute())
cutsvd = daskTPCA(n_components=5)
cutsvd.fit(X_train)
sktsvd = TruncatedSVD(n_components=5, algorithm="arpack")
sktsvd.fit(X_cpu)
all_attr = [
"singular_values_",
"components_",
"explained_variance_",
"explained_variance_ratio_",
]
for attr in all_attr:
with_sign = False if attr in ["components_"] else True
cuml_res = getattr(cutsvd, attr)
if type(cuml_res) == np.ndarray:
cuml_res = cuml_res.to_numpy()
skl_res = getattr(sktsvd, attr)
if attr == "singular_values_":
assert array_equal(cuml_res, skl_res, 1, with_sign=with_sign)
else:
assert array_equal(cuml_res, skl_res, 1e-1, with_sign=with_sign)
@pytest.mark.mg
@pytest.mark.parametrize(
"data_info",
[unit_param([1000, 20, 46]), stress_param([int(9e6), 5000, 46])],
)
def test_pca_fit_transform_fp32(data_info, client):
nrows, ncols, n_parts = data_info
from cuml.dask.decomposition import TruncatedSVD as daskTPCA
from cuml.dask.datasets import make_blobs
X_cudf, _ = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=1,
n_parts=n_parts,
cluster_std=1.5,
random_state=10,
dtype=np.float32,
)
cutsvd = daskTPCA(n_components=20)
cutsvd.fit_transform(X_cudf)
@pytest.mark.mg
@pytest.mark.parametrize(
"data_info",
[unit_param([1000, 20, 33]), stress_param([int(9e6), 5000, 33])],
)
def test_pca_fit_transform_fp64(data_info, client):
nrows, ncols, n_parts = data_info
from cuml.dask.decomposition import TruncatedSVD as daskTPCA
from cuml.dask.datasets import make_blobs
X_cudf, _ = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=1,
n_parts=n_parts,
cluster_std=1.5,
random_state=10,
dtype=np.float64,
)
cutsvd = daskTPCA(n_components=30)
cutsvd.fit_transform(X_cudf)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_doctest.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml.internals.safe_imports import cpu_only_import
import contextlib
import doctest
import inspect
import io
import cuml
import cuml.dask
from cuml.internals.safe_imports import gpu_only_import
dask_cudf = gpu_only_import("dask_cudf")
np = cpu_only_import("numpy")
cudf = gpu_only_import("cudf")
def _name_in_all(parent, name):
return name in getattr(parent, "__all__", [])
def _is_public_name(parent, name):
return not name.startswith("_")
def _find_doctests_in_obj(obj, finder=None, criteria=None):
"""Find all doctests in an object.
Parameters
----------
obj : module or class
The object to search for docstring examples.
finder : doctest.DocTestFinder, optional
The DocTestFinder object to use. If not provided, a DocTestFinder is
constructed.
criteria : callable, optional
Callable indicating whether to recurse over members of the provided
object. If not provided, names not defined in the object's ``__all__``
property are ignored.
Yields
------
doctest.DocTest
The next doctest found in the object.
"""
if finder is None:
finder = doctest.DocTestFinder()
if criteria is None:
criteria = _name_in_all
for docstring in finder.find(obj):
if docstring.examples:
yield docstring
for name, member in inspect.getmembers(obj):
# Only recurse over members matching the criteria
if not criteria(obj, name):
continue
# Recurse over the public API of modules (objects defined in the
# module's __all__)
if inspect.ismodule(member):
yield from _find_doctests_in_obj(
member, finder, criteria=_name_in_all
)
# Recurse over the public API of classes (attributes not prefixed with
# an underscore)
if inspect.isclass(member):
yield from _find_doctests_in_obj(
member, finder, criteria=_is_public_name
)
if inspect.isfunction(member):
yield from _find_doctests_in_obj(member, finder)
@pytest.mark.parametrize(
"docstring",
_find_doctests_in_obj(cuml.dask),
ids=lambda docstring: docstring.name,
)
def test_docstring(docstring):
# We ignore differences in whitespace in the doctest output, and enable
# the use of an ellipsis "..." to match any string in the doctest
# output. An ellipsis is useful for, e.g., memory addresses or
# imprecise floating point values.
optionflags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
runner = doctest.DocTestRunner(optionflags=optionflags)
# These global names are pre-defined and can be used in doctests
# without first importing them.
globals = dict(cudf=cudf, np=np, cuml=cuml, dask_cudf=dask_cudf)
docstring.globs = globals
# Capture stdout and include failing outputs in the traceback.
doctest_stdout = io.StringIO()
with contextlib.redirect_stdout(doctest_stdout):
runner.run(docstring)
results = runner.summarize()
assert not results.failed, (
f"{results.failed} of {results.attempted} doctests failed for "
f"{docstring.name}:\n{doctest_stdout.getvalue()}"
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_input_utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.dask_arr_utils import to_dask_cudf
import pytest
from cuml.dask.datasets.blobs import make_blobs
from cuml.dask.common.input_utils import DistributedDataHandler
import dask.array as da
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [1e4])
@pytest.mark.parametrize("ncols", [10])
@pytest.mark.parametrize("n_parts", [2, 23])
@pytest.mark.parametrize("input_type", ["dataframe", "array", "series"])
@pytest.mark.parametrize("colocated", [True, False])
def test_extract_partitions_worker_list(
nrows, ncols, n_parts, input_type, colocated, client
):
adj_input_type = "dataframe" if input_type == "series" else input_type
X_arr, y_arr = make_blobs(
n_samples=int(nrows), n_features=ncols, n_parts=n_parts
)
if adj_input_type == "dataframe" or input_type == "dataframe":
X = to_dask_cudf(X_arr)
y = to_dask_cudf(y_arr)
elif input_type == "array":
X, y = X_arr, y_arr
if input_type == "series":
X = X[X.columns[0]]
if colocated:
ddh = DistributedDataHandler.create((X, y), client)
else:
ddh = DistributedDataHandler.create(X, client)
parts = list(map(lambda x: x[1], ddh.gpu_futures))
assert len(parts) == n_parts
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [24])
@pytest.mark.parametrize("ncols", [2])
@pytest.mark.parametrize("n_parts", [2, 23])
@pytest.mark.parametrize("input_type", ["dataframe", "array", "series"])
@pytest.mark.parametrize("colocated", [True, False])
def test_extract_partitions_shape(
nrows, ncols, n_parts, input_type, colocated, client
):
adj_input_type = "dataframe" if input_type == "series" else input_type
X_arr, y_arr = make_blobs(
n_samples=nrows, n_features=ncols, n_parts=n_parts
)
if adj_input_type == "dataframe" or input_type == "dataframe":
X = to_dask_cudf(X_arr)
y = to_dask_cudf(y_arr)
elif input_type == "array":
X, y = X_arr, y_arr
if input_type == "series":
X = X[X.columns[0]]
if input_type == "dataframe" or input_type == "series":
X_len_parts = X.map_partitions(len).compute()
y_len_parts = y.map_partitions(len).compute()
elif input_type == "array":
X_len_parts = X.chunks[0]
y_len_parts = y.chunks[0]
if colocated:
ddh = DistributedDataHandler.create((X, y), client)
parts = [part.result() for worker, part in ddh.gpu_futures]
for i in range(len(parts)):
assert (parts[i][0].shape[0] == X_len_parts[i]) and (
parts[i][1].shape[0] == y_len_parts[i]
)
else:
ddh = DistributedDataHandler.create(X, client)
parts = [part.result() for worker, part in ddh.gpu_futures]
for i in range(len(parts)):
assert parts[i].shape[0] == X_len_parts[i]
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [24])
@pytest.mark.parametrize("ncols", [2])
@pytest.mark.parametrize("n_parts", [2, 12])
@pytest.mark.parametrize("X_delayed", [True, False])
@pytest.mark.parametrize("y_delayed", [True, False])
@pytest.mark.parametrize("colocated", [True, False])
def test_extract_partitions_futures(
nrows, ncols, n_parts, X_delayed, y_delayed, colocated, client
):
X = cp.random.standard_normal((nrows, ncols))
y = cp.random.standard_normal((nrows,))
X = da.from_array(X, chunks=(nrows / n_parts, -1))
y = da.from_array(y, chunks=(nrows / n_parts,))
if not X_delayed:
X = client.persist(X)
if not y_delayed:
y = client.persist(y)
if colocated:
ddh = DistributedDataHandler.create((X, y), client)
else:
ddh = DistributedDataHandler.create(X, client)
parts = list(map(lambda x: x[1], ddh.gpu_futures))
assert len(parts) == n_parts
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_kneighbors_regressor.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.testing.utils import unit_param, quality_param, stress_param
from cuml.neighbors import KNeighborsRegressor as lKNNReg
from cuml.dask.neighbors import KNeighborsRegressor as dKNNReg
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import dask.array as da
import dask.dataframe as dd
from cuml.dask.common.dask_arr_utils import to_dask_cudf
from cuml.internals.safe_imports import gpu_only_import_from
DataFrame = gpu_only_import_from("cudf.core.dataframe", "DataFrame")
np = cpu_only_import("numpy")
cudf = gpu_only_import("cudf")
def generate_dask_array(np_array, n_parts):
n_samples = np_array.shape[0]
n_samples_per_part = int(n_samples / n_parts)
chunks = [n_samples_per_part] * n_parts
chunks[-1] += n_samples % n_samples_per_part
chunks = tuple(chunks)
return da.from_array(np_array, chunks=(chunks, -1))
@pytest.fixture(
scope="module",
params=[
unit_param(
{
"n_samples": 3000,
"n_features": 30,
"n_classes": 5,
"n_targets": 2,
}
),
quality_param(
{
"n_samples": 8000,
"n_features": 35,
"n_classes": 12,
"n_targets": 3,
}
),
stress_param(
{
"n_samples": 20000,
"n_features": 40,
"n_classes": 12,
"n_targets": 4,
}
),
],
)
def dataset(request):
X, y = make_multilabel_classification(
n_samples=int(request.param["n_samples"] * 1.2),
n_features=request.param["n_features"],
n_classes=request.param["n_classes"],
n_labels=request.param["n_classes"],
length=request.param["n_targets"],
)
new_x = []
new_y = []
for i in range(y.shape[0]):
a = np.argwhere(y[i] == 1)[:, 0]
if len(a) >= request.param["n_targets"]:
new_x.append(i)
np.random.shuffle(a)
a = a[: request.param["n_targets"]]
new_y.append(a)
if len(new_x) >= request.param["n_samples"]:
break
X = X[new_x]
noise = np.random.normal(0, 5.0, X.shape)
X += noise
y = np.array(new_y, dtype=np.float32)
return train_test_split(X, y, test_size=0.3)
def exact_match(l_outputs, d_outputs):
# Check shapes
assert l_outputs.shape == d_outputs.shape
# Predictions should match
correct_queries = (l_outputs == d_outputs).all(axis=1)
assert np.mean(correct_queries) > 0.95
@pytest.mark.parametrize("datatype", ["dask_array", "dask_cudf"])
@pytest.mark.parametrize("parameters", [(1, 3, 256), (8, 8, 256), (9, 3, 128)])
def test_predict_and_score(dataset, datatype, parameters, client):
n_neighbors, n_parts, batch_size = parameters
X_train, X_test, y_train, y_test = dataset
l_model = lKNNReg(n_neighbors=n_neighbors)
l_model.fit(X_train, y_train)
l_outputs = l_model.predict(X_test)
handmade_local_score = r2_score(y_test, l_outputs)
handmade_local_score = round(float(handmade_local_score), 3)
X_train = generate_dask_array(X_train, n_parts)
X_test = generate_dask_array(X_test, n_parts)
y_train = generate_dask_array(y_train, n_parts)
y_test = generate_dask_array(y_test, n_parts)
if datatype == "dask_cudf":
X_train = to_dask_cudf(X_train, client)
X_test = to_dask_cudf(X_test, client)
y_train = to_dask_cudf(y_train, client)
y_test = to_dask_cudf(y_test, client)
d_model = dKNNReg(
client=client, n_neighbors=n_neighbors, batch_size=batch_size
)
d_model.fit(X_train, y_train)
d_outputs = d_model.predict(X_test, convert_dtype=True)
d_outputs = d_outputs.compute()
d_outputs = (
d_outputs.to_numpy() if isinstance(d_outputs, DataFrame) else d_outputs
)
exact_match(l_outputs, d_outputs)
distributed_score = d_model.score(X_test, y_test)
distributed_score = round(float(distributed_score), 3)
assert distributed_score == pytest.approx(handmade_local_score, abs=1e-2)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_predict_1D_labels(input_type, client):
# Testing that nothing crashes with 1D labels
X, y = make_regression(n_samples=10000)
if input_type == "array":
dX = da.from_array(X)
dy = da.from_array(y)
elif input_type == "dataframe":
X = cudf.DataFrame(X)
y = cudf.Series(y)
dX = dd.from_pandas(X, npartitions=1)
dy = dd.from_pandas(y, npartitions=1)
clf = dKNNReg()
clf.fit(dX, dy)
clf.predict(dX)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_kmeans.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.dask_arr_utils import to_dask_cudf
from sklearn.metrics import adjusted_rand_score as sk_adjusted_rand_score
from cuml.metrics import adjusted_rand_score
import dask.array as da
from cuml.testing.utils import stress_param
from cuml.testing.utils import quality_param
from cuml.testing.utils import unit_param
import pytest
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
@pytest.mark.mg
@pytest.mark.parametrize(
"nrows", [unit_param(1e3), quality_param(1e5), stress_param(5e6)]
)
@pytest.mark.parametrize("ncols", [10, 30])
@pytest.mark.parametrize(
"nclusters", [unit_param(5), quality_param(10), stress_param(50)]
)
@pytest.mark.parametrize(
"n_parts", [unit_param(None), quality_param(7), stress_param(50)]
)
@pytest.mark.parametrize("delayed_predict", [True, False])
@pytest.mark.parametrize("input_type", ["dataframe", "array"])
def test_end_to_end(
nrows, ncols, nclusters, n_parts, delayed_predict, input_type, client
):
from cuml.dask.cluster import KMeans as cumlKMeans
from cuml.dask.datasets import make_blobs
X, y = make_blobs(
n_samples=int(nrows),
n_features=ncols,
centers=nclusters,
n_parts=n_parts,
cluster_std=0.01,
random_state=10,
)
if input_type == "dataframe":
X_train = to_dask_cudf(X)
y_train = to_dask_cudf(y)
elif input_type == "array":
X_train, y_train = X, y
cumlModel = cumlKMeans(
init="k-means||", n_clusters=nclusters, random_state=10
)
cumlModel.fit(X_train)
cumlLabels = cumlModel.predict(X_train, delayed=delayed_predict)
n_workers = len(list(client.has_what().keys()))
# Verifying we are grouping partitions. This should be changed soon.
if n_parts is not None:
parts_len = n_parts
else:
parts_len = n_workers
if input_type == "dataframe":
assert cumlLabels.npartitions == parts_len
cumlPred = cumlLabels.compute().values
labels = y_train.compute().values
elif input_type == "array":
assert len(cumlLabels.chunks[0]) == parts_len
cumlPred = cp.array(cumlLabels.compute())
labels = cp.squeeze(y_train.compute())
assert cumlPred.shape[0] == nrows
assert cp.max(cumlPred) == nclusters - 1
assert cp.min(cumlPred) == 0
score = adjusted_rand_score(labels, cumlPred)
assert 1.0 == score
@pytest.mark.mg
@pytest.mark.parametrize("nrows_per_part", [quality_param(1e7)])
@pytest.mark.parametrize("ncols", [quality_param(256)])
@pytest.mark.parametrize("nclusters", [quality_param(5)])
def test_large_data_no_overflow(nrows_per_part, ncols, nclusters, client):
from cuml.dask.cluster import KMeans as cumlKMeans
from cuml.dask.datasets import make_blobs
n_parts = len(list(client.has_what().keys()))
X, y = make_blobs(
n_samples=nrows_per_part * n_parts,
n_features=ncols,
centers=nclusters,
n_parts=n_parts,
cluster_std=0.01,
random_state=10,
)
X_train, y_train = X, y
X.compute_chunk_sizes().persist()
cumlModel = cumlKMeans(
init="k-means||", n_clusters=nclusters, random_state=10
)
cumlModel.fit(X_train)
n_predict = int(X_train.shape[0] / 4)
cumlLabels = cumlModel.predict(X_train[:n_predict, :], delayed=False)
cumlPred = cp.array(cumlLabels.compute())
labels = cp.squeeze(y_train.compute()[:n_predict])
print(str(cumlPred))
print(str(labels))
assert 1.0 == adjusted_rand_score(labels, cumlPred)
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [500])
@pytest.mark.parametrize("ncols", [5])
@pytest.mark.parametrize("nclusters", [3, 10])
@pytest.mark.parametrize("n_parts", [1, 5])
def test_weighted_kmeans(nrows, ncols, nclusters, n_parts, client):
cluster_std = 10000.0
from cuml.dask.cluster import KMeans as cumlKMeans
from cuml.dask.datasets import make_blobs
# Using fairly high variance between points in clusters
wt = cp.array([0.00001 for j in range(nrows)])
bound = nclusters * 100000
# Open the space really large
centers = cp.random.uniform(-bound, bound, size=(nclusters, ncols))
X_cudf, y = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=centers,
n_parts=n_parts,
cluster_std=cluster_std,
shuffle=False,
verbose=False,
random_state=10,
)
# Choose one sample from each label and increase its weight
for i in range(nclusters):
wt[cp.argmax(cp.array(y.compute()) == i).item()] = 5000.0
cumlModel = cumlKMeans(
verbose=0, init="k-means||", n_clusters=nclusters, random_state=10
)
chunk_parts = int(nrows / n_parts)
sample_weights = da.from_array(wt, chunks=(chunk_parts,))
cumlModel.fit(X_cudf, sample_weight=sample_weights)
X = X_cudf.compute()
labels_ = cumlModel.predict(X_cudf).compute()
cluster_centers_ = cumlModel.cluster_centers_
for i in range(nrows):
label = labels_[i]
actual_center = cluster_centers_[label]
diff = sum(abs(X[i] - actual_center))
# The large weight should be the centroid
if wt[i] > 1.0:
assert diff < 1.0
# Otherwise it should be pretty far away
else:
assert diff > 1000.0
@pytest.mark.mg
@pytest.mark.parametrize(
"nrows", [unit_param(5e3), quality_param(1e5), stress_param(1e6)]
)
@pytest.mark.parametrize(
"ncols", [unit_param(10), quality_param(30), stress_param(50)]
)
@pytest.mark.parametrize("nclusters", [1, 10, 30])
@pytest.mark.parametrize(
"n_parts", [unit_param(None), quality_param(7), stress_param(50)]
)
@pytest.mark.parametrize("input_type", ["dataframe", "array"])
def test_transform(nrows, ncols, nclusters, n_parts, input_type, client):
from cuml.dask.cluster import KMeans as cumlKMeans
from cuml.dask.datasets import make_blobs
X, y = make_blobs(
n_samples=int(nrows),
n_features=ncols,
centers=nclusters,
n_parts=n_parts,
cluster_std=0.01,
shuffle=False,
random_state=10,
)
y = y.astype("int64")
if input_type == "dataframe":
X_train = to_dask_cudf(X)
y_train = to_dask_cudf(y)
labels = y_train.compute().values
elif input_type == "array":
X_train, y_train = X, y
labels = cp.squeeze(y_train.compute())
cumlModel = cumlKMeans(
init="k-means||", n_clusters=nclusters, random_state=10
)
cumlModel.fit(X_train)
xformed = cumlModel.transform(X_train).compute()
if input_type == "dataframe":
xformed = cp.array(
xformed if len(xformed.shape) == 1 else xformed.to_cupy()
)
if nclusters == 1:
# series shape is (nrows,) not (nrows, 1) but both are valid
# and equivalent for this test
assert xformed.shape in [(nrows, nclusters), (nrows,)]
else:
assert xformed.shape == (nrows, nclusters)
# The argmin of the transformed values should be equal to the labels
# reshape is a quick manner of dealing with (nrows,) is not (nrows, 1)
xformed_labels = cp.argmin(
xformed.reshape((int(nrows), int(nclusters))), axis=1
)
assert sk_adjusted_rand_score(
cp.asnumpy(labels), cp.asnumpy(xformed_labels)
)
@pytest.mark.mg
@pytest.mark.parametrize(
"nrows", [unit_param(1e3), quality_param(1e5), stress_param(5e6)]
)
@pytest.mark.parametrize("ncols", [10, 30])
@pytest.mark.parametrize(
"nclusters", [unit_param(5), quality_param(10), stress_param(50)]
)
@pytest.mark.parametrize(
"n_parts", [unit_param(None), quality_param(7), stress_param(50)]
)
@pytest.mark.parametrize("input_type", ["dataframe", "array"])
def test_score(nrows, ncols, nclusters, n_parts, input_type, client):
from cuml.dask.cluster import KMeans as cumlKMeans
from cuml.dask.datasets import make_blobs
X, y = make_blobs(
n_samples=int(nrows),
n_features=ncols,
centers=nclusters,
n_parts=n_parts,
cluster_std=0.01,
shuffle=False,
random_state=10,
)
if input_type == "dataframe":
X_train = to_dask_cudf(X)
y_train = to_dask_cudf(y)
y = y_train
elif input_type == "array":
X_train, y_train = X, y
cumlModel = cumlKMeans(
init="k-means||", n_clusters=nclusters, random_state=10
)
cumlModel.fit(X_train)
actual_score = cumlModel.score(X_train)
local_model = cumlModel.get_combined_model()
expected_score = local_model.score(X_train.compute())
assert abs(actual_score - expected_score) < 9e-3
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_func.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dask import delayed
import pytest
from cuml.dask.common.func import reduce
from cuml.dask.common.func import tree_reduce
@pytest.mark.parametrize("n_parts", [1, 2, 10, 15])
def test_tree_reduce_delayed(n_parts, client):
func = delayed(sum)
a = [delayed(i) for i in range(n_parts)]
b = tree_reduce(a, func=func)
c = client.compute(b, sync=True)
assert sum(range(n_parts)) == c
# Using custom remote task for storing data on workers.
# `client.scatter` doesn't seem to work reliably
# Ref: https://github.com/dask/dask/issues/6027
def s(x):
return x
@pytest.mark.parametrize("n_parts", [1, 2, 10, 15])
def test_tree_reduce_futures(n_parts, client):
a = [client.submit(s, i) for i in range(n_parts)]
b = tree_reduce(a)
c = client.compute(b, sync=True)
assert sum(range(n_parts)) == c
@pytest.mark.parametrize("n_parts", [1, 2, 10, 15])
def test_reduce_futures(n_parts, client):
def s(x):
return x
a = [client.submit(s, i) for i in range(n_parts)]
b = reduce(a, sum)
c = client.compute(b, sync=True)
# Testing this gets the correct result for now.
assert sum(range(n_parts)) == c
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_datasets.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.part_utils import _extract_partitions
from cuml.testing.utils import unit_param, quality_param, stress_param
from cuml.dask.common.input_utils import DistributedDataHandler
from cuml.dask.datasets.blobs import make_blobs
from cuml.internals.safe_imports import gpu_only_import
import pytest
import dask.array as da
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.mark.parametrize(
"nrows", [unit_param(1e3), quality_param(1e5), stress_param(1e6)]
)
@pytest.mark.parametrize(
"ncols", [unit_param(10), quality_param(100), stress_param(1000)]
)
@pytest.mark.parametrize("centers", [10])
@pytest.mark.parametrize("cluster_std", [0.1])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize(
"nparts",
[unit_param(1), unit_param(7), quality_param(100), stress_param(1000)],
)
@pytest.mark.parametrize("order", ["F", "C"])
def test_make_blobs(
nrows, ncols, centers, cluster_std, dtype, nparts, order, client
):
c = client
nrows = int(nrows)
X, y = make_blobs(
nrows,
ncols,
centers=centers,
cluster_std=cluster_std,
dtype=dtype,
n_parts=nparts,
order=order,
client=client,
)
assert len(X.chunks[0]) == nparts
assert len(y.chunks[0]) == nparts
assert X.shape == (nrows, ncols)
assert y.shape == (nrows,)
y_local = y.compute()
assert len(cp.unique(y_local)) == centers
X_ddh = DistributedDataHandler.create(data=X, client=c)
X_first = X_ddh.gpu_futures[0][1].result()
if order == "F":
assert X_first.flags["F_CONTIGUOUS"]
elif order == "C":
assert X_first.flags["C_CONTIGUOUS"]
@pytest.mark.parametrize(
"n_samples", [unit_param(int(1e3)), stress_param(int(1e6))]
)
@pytest.mark.parametrize("n_features", [unit_param(100), stress_param(1000)])
@pytest.mark.parametrize("n_informative", [7])
@pytest.mark.parametrize("n_targets", [1, 3])
@pytest.mark.parametrize("bias", [-4.0])
@pytest.mark.parametrize("effective_rank", [None, 6])
@pytest.mark.parametrize("tail_strength", [0.5])
@pytest.mark.parametrize("noise", [1.0])
@pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("coef", [True, False])
@pytest.mark.parametrize("n_parts", [unit_param(4), stress_param(23)])
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("use_full_low_rank", [True, False])
def test_make_regression(
n_samples,
n_features,
n_informative,
n_targets,
bias,
effective_rank,
tail_strength,
noise,
shuffle,
coef,
n_parts,
order,
use_full_low_rank,
client,
):
c = client
from cuml.dask.datasets import make_regression
result = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
n_targets=n_targets,
bias=bias,
effective_rank=effective_rank,
noise=noise,
shuffle=shuffle,
coef=coef,
n_parts=n_parts,
use_full_low_rank=use_full_low_rank,
order=order,
)
if coef:
out, values, coefs = result
else:
out, values = result
assert out.shape == (n_samples, n_features), "out shape mismatch"
if n_targets > 1:
assert values.shape == (n_samples, n_targets), "values shape mismatch"
else:
assert values.shape == (n_samples,), "values shape mismatch"
assert len(out.chunks[0]) == n_parts
assert len(out.chunks[1]) == 1
if coef:
if n_targets > 1:
assert coefs.shape == (
n_features,
n_targets,
), "coefs shape mismatch"
assert len(coefs.chunks[1]) == 1
else:
assert coefs.shape == (n_features,), "coefs shape mismatch"
assert len(coefs.chunks[0]) == 1
test1 = da.all(da.sum(coefs != 0.0, axis=0) == n_informative)
std_test2 = da.std(values - (da.dot(out, coefs) + bias), axis=0)
test1, std_test2 = da.compute(test1, std_test2)
diff = cp.abs(1.0 - std_test2)
test2 = cp.all(diff < 1.5 * 10 ** (-1.0))
assert test1, "Unexpected number of informative features"
assert test2, "Unexpectedly incongruent outputs"
data_ddh = DistributedDataHandler.create(data=(out, values), client=c)
out_part, value_part = data_ddh.gpu_futures[0][1].result()
if coef:
coefs_ddh = DistributedDataHandler.create(data=coefs, client=c)
coefs_part = coefs_ddh.gpu_futures[0][1].result()
if order == "F":
assert out_part.flags["F_CONTIGUOUS"]
if n_targets > 1:
assert value_part.flags["F_CONTIGUOUS"]
if coef:
assert coefs_part.flags["F_CONTIGUOUS"]
elif order == "C":
assert out_part.flags["C_CONTIGUOUS"]
if n_targets > 1:
assert value_part.flags["C_CONTIGUOUS"]
if coef:
assert coefs_part.flags["C_CONTIGUOUS"]
@pytest.mark.parametrize("n_samples", [unit_param(500), stress_param(1000)])
@pytest.mark.parametrize("n_features", [unit_param(50), stress_param(100)])
@pytest.mark.parametrize("hypercube", [True, False])
@pytest.mark.parametrize("n_classes", [2, 4])
@pytest.mark.parametrize("n_clusters_per_class", [2, 4])
@pytest.mark.parametrize("n_informative", [7, 20])
@pytest.mark.parametrize("random_state", [None, 1234])
@pytest.mark.parametrize("n_parts", [unit_param(4), stress_param(23)])
@pytest.mark.parametrize("order", ["C", "F"])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_make_classification(
n_samples,
n_features,
hypercube,
n_classes,
n_clusters_per_class,
n_informative,
random_state,
n_parts,
order,
dtype,
client,
):
from cuml.dask.datasets.classification import make_classification
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_classes=n_classes,
hypercube=hypercube,
n_clusters_per_class=n_clusters_per_class,
n_informative=n_informative,
random_state=random_state,
n_parts=n_parts,
order=order,
dtype=dtype,
)
assert (len(X.chunks[0])) == n_parts
assert (len(X.chunks[1])) == 1
assert X.shape == (n_samples, n_features)
assert y.shape == (n_samples,)
assert X.dtype == dtype
assert y.dtype == np.int64
assert len(X.chunks[0]) == n_parts
assert len(y.chunks[0]) == n_parts
import cupy as cp
y_local = y.compute()
assert len(cp.unique(y_local)) == n_classes
X_parts = client.sync(_extract_partitions, X)
X_first = X_parts[0][1].result()
if order == "F":
assert X_first.flags["F_CONTIGUOUS"]
elif order == "C":
assert X_first.flags["C_CONTIGUOUS"]
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_ordinal_encoder.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
import dask_cudf
import numpy as np
import pandas as pd
import pytest
from cudf import DataFrame
from cuml.dask.preprocessing import OrdinalEncoder
from distributed import Client
@pytest.mark.mg
def test_ordinal_encoder_df(client: Client) -> None:
X = DataFrame({"cat": ["M", "F", "F"], "int": [1, 3, 2]})
X = dask_cudf.from_cudf(X, npartitions=2)
enc = OrdinalEncoder()
enc.fit(X)
Xt = enc.transform(X)
X_1 = DataFrame({"cat": ["F", "F"], "int": [1, 2]})
X_1 = dask_cudf.from_cudf(X_1, npartitions=2)
enc = OrdinalEncoder(client=client)
enc.fit(X)
Xt_1 = enc.transform(X_1)
Xt_r = Xt.compute()
Xt_1_r = Xt_1.compute()
assert Xt_1_r.iloc[0, 0] == Xt_r.iloc[1, 0]
assert Xt_1_r.iloc[1, 0] == Xt_r.iloc[1, 0]
assert Xt_1_r.iloc[0, 1] == Xt_r.iloc[0, 1]
assert Xt_1_r.iloc[1, 1] == Xt_r.iloc[2, 1]
# Turn Int64Index to RangeIndex for testing equality
inv_Xt = enc.inverse_transform(Xt).compute().reset_index(drop=True)
inv_Xt_1 = enc.inverse_transform(Xt_1).compute().reset_index(drop=True)
X_r = X.compute()
X_1_r = X_1.compute()
assert inv_Xt.equals(X_r)
assert inv_Xt_1.equals(X_1_r)
assert enc.n_features_in_ == 2
@pytest.mark.mg
def test_ordinal_encoder_array(client: Client) -> None:
X = DataFrame({"A": [4, 1, 1], "B": [1, 3, 2]})
X = dask_cudf.from_cudf(X, npartitions=2).values
enc = OrdinalEncoder()
enc.fit(X)
Xt = enc.transform(X)
X_1 = DataFrame({"A": [1, 1], "B": [1, 2]})
X_1 = dask_cudf.from_cudf(X_1, npartitions=2).values
enc = OrdinalEncoder(client=client)
enc.fit(X)
Xt_1 = enc.transform(X_1)
Xt_r = Xt.compute()
Xt_1_r = Xt_1.compute()
assert Xt_1_r[0, 0] == Xt_r[1, 0]
assert Xt_1_r[1, 0] == Xt_r[1, 0]
assert Xt_1_r[0, 1] == Xt_r[0, 1]
assert Xt_1_r[1, 1] == Xt_r[2, 1]
inv_Xt = enc.inverse_transform(Xt)
inv_Xt_1 = enc.inverse_transform(Xt_1)
cp.testing.assert_allclose(X.compute(), inv_Xt.compute())
cp.testing.assert_allclose(X_1.compute(), inv_Xt_1.compute())
assert enc.n_features_in_ == 2
@pytest.mark.mg
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
def test_handle_unknown(client, as_array: bool) -> None:
X = DataFrame({"data": [0, 1]})
Y = DataFrame({"data": [3, 1]})
X = dask_cudf.from_cudf(X, npartitions=2)
Y = dask_cudf.from_cudf(Y, npartitions=2)
if as_array:
X = X.values
Y = Y.values
enc = OrdinalEncoder(handle_unknown="error")
enc = enc.fit(X)
with pytest.raises(KeyError):
enc.transform(Y).compute()
enc = OrdinalEncoder(handle_unknown="ignore")
enc = enc.fit(X)
encoded = enc.transform(Y).compute()
if as_array:
np.isnan(encoded[0, 0])
else:
assert pd.isna(encoded.iloc[0, 0])
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_random_forest.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dask.distributed import Client
from sklearn.ensemble import RandomForestClassifier as skrfc
from sklearn.metrics import accuracy_score, r2_score, mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_regression, make_classification
from dask.array import from_array
from cuml.ensemble import RandomForestRegressor as cuRFR_sg
from cuml.ensemble import RandomForestClassifier as cuRFC_sg
from cuml.dask.common import utils as dask_utils
from cuml.dask.ensemble import RandomForestRegressor as cuRFR_mg
from cuml.dask.ensemble import RandomForestClassifier as cuRFC_mg
from cuml.internals.safe_imports import cpu_only_import
import json
import pytest
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
dask_cudf = gpu_only_import("dask_cudf")
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
def _prep_training_data(c, X_train, y_train, partitions_per_worker):
workers = c.has_what().keys()
n_partitions = partitions_per_worker * len(workers)
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X_train))
X_train_df = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
y_cudf = cudf.Series(y_train)
y_train_df = dask_cudf.from_cudf(y_cudf, npartitions=n_partitions)
X_train_df, y_train_df = dask_utils.persist_across_workers(
c, [X_train_df, y_train_df], workers=workers
)
return X_train_df, y_train_df
@pytest.mark.parametrize("partitions_per_worker", [3])
def test_rf_classification_multi_class(partitions_per_worker, cluster):
# Use CUDA_VISIBLE_DEVICES to control the number of workers
c = Client(cluster)
n_workers = len(c.scheduler_info()["workers"])
try:
X, y = make_classification(
n_samples=n_workers * 5000,
n_features=20,
n_clusters_per_class=1,
n_informative=10,
random_state=123,
n_classes=15,
)
X = X.astype(np.float32)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_workers * 300, random_state=123
)
cu_rf_params = {
"n_estimators": n_workers * 8,
"max_depth": 16,
"n_bins": 256,
"random_state": 10,
}
X_train_df, y_train_df = _prep_training_data(
c, X_train, y_train, partitions_per_worker
)
cuml_mod = cuRFC_mg(**cu_rf_params, ignore_empty_partitions=True)
cuml_mod.fit(X_train_df, y_train_df)
X_test_dask_array = from_array(X_test)
cuml_preds_gpu = cuml_mod.predict(
X_test_dask_array, predict_model="GPU"
).compute()
acc_score_gpu = accuracy_score(cuml_preds_gpu, y_test)
# the sklearn model when ran with the same parameters gives an
# accuracy of 0.69. There is a difference of 0.0632 (6.32%) between
# the two when the code runs on a single GPU (seen in the CI)
# Refer to issue : https://github.com/rapidsai/cuml/issues/2806 for
# more information on the threshold value.
assert acc_score_gpu >= 0.52
finally:
c.close()
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("partitions_per_worker", [5])
def test_rf_regression_dask_fil(partitions_per_worker, dtype, client):
n_workers = len(client.scheduler_info()["workers"])
# Use CUDA_VISIBLE_DEVICES to control the number of workers
X, y = make_regression(
n_samples=n_workers * 4000,
n_features=20,
n_informative=10,
random_state=123,
)
X = X.astype(dtype)
y = y.astype(dtype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_workers * 100, random_state=123
)
if dtype == np.float64:
pytest.xfail(reason=" Dask RF does not support np.float64 data")
cu_rf_params = {
"n_estimators": 50,
"max_depth": 16,
"n_bins": 16,
}
workers = client.has_what().keys()
n_partitions = partitions_per_worker * len(workers)
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X_train))
X_train_df = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
y_cudf = cudf.Series(y_train)
y_train_df = dask_cudf.from_cudf(y_cudf, npartitions=n_partitions)
X_cudf_test = cudf.DataFrame.from_pandas(pd.DataFrame(X_test))
X_test_df = dask_cudf.from_cudf(X_cudf_test, npartitions=n_partitions)
cuml_mod = cuRFR_mg(**cu_rf_params, ignore_empty_partitions=True)
cuml_mod.fit(X_train_df, y_train_df)
cuml_mod_predict = cuml_mod.predict(X_test_df)
cuml_mod_predict = cp.asnumpy(cp.array(cuml_mod_predict.compute()))
acc_score = r2_score(cuml_mod_predict, y_test)
assert acc_score >= 0.59
@pytest.mark.parametrize("partitions_per_worker", [5])
def test_rf_classification_dask_array(partitions_per_worker, client):
n_workers = len(client.scheduler_info()["workers"])
X, y = make_classification(
n_samples=n_workers * 2000,
n_features=30,
n_clusters_per_class=1,
n_informative=20,
random_state=123,
n_classes=2,
)
X = X.astype(np.float32)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_workers * 400
)
cu_rf_params = {
"n_estimators": 25,
"max_depth": 13,
"n_bins": 15,
}
X_train_df, y_train_df = _prep_training_data(
client, X_train, y_train, partitions_per_worker
)
X_test_dask_array = from_array(X_test)
cuml_mod = cuRFC_mg(**cu_rf_params)
cuml_mod.fit(X_train_df, y_train_df)
cuml_mod_predict = cuml_mod.predict(X_test_dask_array).compute()
acc_score = accuracy_score(cuml_mod_predict, y_test, normalize=True)
assert acc_score > 0.8
@pytest.mark.parametrize("partitions_per_worker", [5])
def test_rf_regression_dask_cpu(partitions_per_worker, client):
n_workers = len(client.scheduler_info()["workers"])
X, y = make_regression(
n_samples=n_workers * 2000,
n_features=20,
n_informative=10,
random_state=123,
)
X = X.astype(np.float32)
y = y.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_workers * 400, random_state=123
)
cu_rf_params = {
"n_estimators": 50,
"max_depth": 16,
"n_bins": 16,
}
workers = client.has_what().keys()
n_partitions = partitions_per_worker * len(workers)
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X_train))
X_train_df = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
y_cudf = cudf.Series(y_train)
y_train_df = dask_cudf.from_cudf(y_cudf, npartitions=n_partitions)
X_train_df, y_train_df = dask_utils.persist_across_workers(
client, [X_train_df, y_train_df], workers=workers
)
cuml_mod = cuRFR_mg(**cu_rf_params)
cuml_mod.fit(X_train_df, y_train_df)
cuml_mod_predict = cuml_mod.predict(X_test, predict_model="CPU")
acc_score = r2_score(cuml_mod_predict, y_test)
assert acc_score >= 0.67
@pytest.mark.parametrize("partitions_per_worker", [5])
def test_rf_classification_dask_fil_predict_proba(
partitions_per_worker, client
):
n_workers = len(client.scheduler_info()["workers"])
X, y = make_classification(
n_samples=n_workers * 1500,
n_features=30,
n_clusters_per_class=1,
n_informative=20,
random_state=123,
n_classes=2,
)
X = X.astype(np.float32)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_workers * 150, random_state=123
)
cu_rf_params = {
"n_bins": 16,
"n_streams": 1,
"n_estimators": 40,
"max_depth": 16,
}
X_train_df, y_train_df = _prep_training_data(
client, X_train, y_train, partitions_per_worker
)
X_test_df, _ = _prep_training_data(
client, X_test, y_test, partitions_per_worker
)
cu_rf_mg = cuRFC_mg(**cu_rf_params)
cu_rf_mg.fit(X_train_df, y_train_df)
fil_preds = cu_rf_mg.predict(X_test_df).compute()
fil_preds = fil_preds.to_numpy()
fil_preds_proba = cu_rf_mg.predict_proba(X_test_df).compute()
fil_preds_proba = fil_preds_proba.to_numpy()
np.testing.assert_equal(fil_preds, np.argmax(fil_preds_proba, axis=1))
y_proba = np.zeros(np.shape(fil_preds_proba))
y_proba[:, 1] = y_test
y_proba[:, 0] = 1.0 - y_test
fil_mse = mean_squared_error(y_proba, fil_preds_proba)
sk_model = skrfc(n_estimators=40, max_depth=16, random_state=10)
sk_model.fit(X_train, y_train)
sk_preds_proba = sk_model.predict_proba(X_test)
sk_mse = mean_squared_error(y_proba, sk_preds_proba)
# The threshold is required as the test would intermitently
# fail with a max difference of 0.029 between the two mse values
assert fil_mse <= sk_mse + 0.029
@pytest.mark.parametrize("model_type", ["classification", "regression"])
def test_rf_concatenation_dask(client, model_type):
n_workers = len(client.scheduler_info()["workers"])
from cuml.fil.fil import TreeliteModel
X, y = make_classification(
n_samples=n_workers * 200, n_features=30, random_state=123, n_classes=2
)
X = X.astype(np.float32)
if model_type == "classification":
y = y.astype(np.int32)
else:
y = y.astype(np.float32)
n_estimators = 40
cu_rf_params = {"n_estimators": n_estimators}
X_df, y_df = _prep_training_data(client, X, y, partitions_per_worker=2)
if model_type == "classification":
cu_rf_mg = cuRFC_mg(**cu_rf_params)
else:
cu_rf_mg = cuRFR_mg(**cu_rf_params)
cu_rf_mg.fit(X_df, y_df)
res1 = cu_rf_mg.predict(X_df)
res1.compute()
if cu_rf_mg.internal_model:
local_tl = TreeliteModel.from_treelite_model_handle(
cu_rf_mg.internal_model._obtain_treelite_handle(),
take_handle_ownership=False,
)
assert local_tl.num_trees == n_estimators
@pytest.mark.parametrize("ignore_empty_partitions", [True, False])
def test_single_input_regression(client, ignore_empty_partitions):
X, y = make_classification(n_samples=1, n_classes=1)
X = X.astype(np.float32)
y = y.astype(np.float32)
X, y = _prep_training_data(client, X, y, partitions_per_worker=2)
cu_rf_mg = cuRFR_mg(
n_bins=1, ignore_empty_partitions=ignore_empty_partitions
)
if (
ignore_empty_partitions
or len(client.scheduler_info()["workers"].keys()) == 1
):
cu_rf_mg.fit(X, y)
cuml_mod_predict = cu_rf_mg.predict(X)
cuml_mod_predict = cp.asnumpy(cp.array(cuml_mod_predict.compute()))
y = cp.asnumpy(cp.array(y.compute()))
assert y[0] == cuml_mod_predict[0]
else:
with pytest.raises(ValueError):
cu_rf_mg.fit(X, y)
@pytest.mark.parametrize("max_depth", [1, 2, 3, 5, 10, 15, 20])
@pytest.mark.parametrize("n_estimators", [5, 10, 20])
@pytest.mark.parametrize("estimator_type", ["regression", "classification"])
def test_rf_get_json(client, estimator_type, max_depth, n_estimators):
n_workers = len(client.scheduler_info()["workers"])
if n_estimators < n_workers:
err_msg = "n_estimators cannot be lower than number of dask workers"
pytest.xfail(err_msg)
X, y = make_classification(
n_samples=350,
n_features=20,
n_clusters_per_class=1,
n_informative=10,
random_state=123,
n_classes=2,
)
X = X.astype(np.float32)
if estimator_type == "classification":
cu_rf_mg = cuRFC_mg(
max_features=1.0,
max_samples=1.0,
n_bins=16,
split_criterion=0,
min_samples_leaf=2,
random_state=23707,
n_streams=1,
n_estimators=n_estimators,
max_leaves=-1,
max_depth=max_depth,
)
y = y.astype(np.int32)
elif estimator_type == "regression":
cu_rf_mg = cuRFR_mg(
max_features=1.0,
max_samples=1.0,
n_bins=16,
min_samples_leaf=2,
random_state=23707,
n_streams=1,
n_estimators=n_estimators,
max_leaves=-1,
max_depth=max_depth,
)
y = y.astype(np.float32)
else:
assert False
X_dask, y_dask = _prep_training_data(client, X, y, partitions_per_worker=2)
cu_rf_mg.fit(X_dask, y_dask)
json_out = cu_rf_mg.get_json()
json_obj = json.loads(json_out)
# Test 1: Output is non-zero
assert "" != json_out
# Test 2: JSON object contains correct number of trees
assert isinstance(json_obj, list)
assert len(json_obj) == n_estimators
# Test 3: Traverse JSON trees and get the same predictions as cuML RF
def predict_with_json_tree(tree, x):
if "children" not in tree:
assert "leaf_value" in tree
return tree["leaf_value"]
assert "split_feature" in tree
assert "split_threshold" in tree
assert "yes" in tree
assert "no" in tree
if x[tree["split_feature"]] <= tree["split_threshold"] + 1e-5:
return predict_with_json_tree(tree["children"][0], x)
return predict_with_json_tree(tree["children"][1], x)
def predict_with_json_rf_classifier(rf, x):
# Returns the class with the highest vote. If there is a tie, return
# the list of all classes with the highest vote.
predictions = []
for tree in rf:
predictions.append(np.array(predict_with_json_tree(tree, x)))
predictions = np.sum(predictions, axis=0)
return np.argmax(predictions)
def predict_with_json_rf_regressor(rf, x):
pred = 0.0
for tree in rf:
pred += predict_with_json_tree(tree, x)[0]
return pred / len(rf)
if estimator_type == "classification":
expected_pred = cu_rf_mg.predict(X_dask).astype(np.int32)
expected_pred = expected_pred.compute().to_numpy()
for idx, row in enumerate(X):
majority_vote = predict_with_json_rf_classifier(json_obj, row)
assert expected_pred[idx] == majority_vote
elif estimator_type == "regression":
expected_pred = cu_rf_mg.predict(X_dask).astype(np.float32)
expected_pred = expected_pred.compute().to_numpy()
pred = []
for idx, row in enumerate(X):
pred.append(predict_with_json_rf_regressor(json_obj, row))
pred = np.array(pred, dtype=np.float32)
np.testing.assert_almost_equal(pred, expected_pred, decimal=6)
@pytest.mark.parametrize("max_depth", [1, 2, 3, 5, 10, 15, 20])
@pytest.mark.parametrize("n_estimators", [5, 10, 20])
def test_rf_instance_count(client, max_depth, n_estimators):
n_workers = len(client.scheduler_info()["workers"])
if n_estimators < n_workers:
err_msg = "n_estimators cannot be lower than number of dask workers"
pytest.xfail(err_msg)
n_samples_per_worker = 350
X, y = make_classification(
n_samples=n_samples_per_worker * n_workers,
n_features=20,
n_clusters_per_class=1,
n_informative=10,
random_state=123,
n_classes=2,
)
X = X.astype(np.float32)
cu_rf_mg = cuRFC_mg(
max_features=1.0,
max_samples=1.0,
n_bins=16,
split_criterion=0,
min_samples_leaf=2,
random_state=23707,
n_streams=1,
n_estimators=n_estimators,
max_leaves=-1,
max_depth=max_depth,
)
y = y.astype(np.int32)
X_dask, y_dask = _prep_training_data(client, X, y, partitions_per_worker=2)
cu_rf_mg.fit(X_dask, y_dask)
json_out = cu_rf_mg.get_json()
json_obj = json.loads(json_out)
# The instance count of each node must be equal to the sum of
# the instance counts of its children
def check_instance_count_for_non_leaf(tree):
assert "instance_count" in tree
if "children" not in tree:
return
assert "instance_count" in tree["children"][0]
assert "instance_count" in tree["children"][1]
assert (
tree["instance_count"]
== tree["children"][0]["instance_count"]
+ tree["children"][1]["instance_count"]
)
check_instance_count_for_non_leaf(tree["children"][0])
check_instance_count_for_non_leaf(tree["children"][1])
for tree in json_obj:
check_instance_count_for_non_leaf(tree)
# The root's count should be equal to the number of rows in the data
assert tree["instance_count"] == n_samples_per_worker
@pytest.mark.parametrize("estimator_type", ["regression", "classification"])
def test_rf_get_combined_model_right_aftter_fit(client, estimator_type):
max_depth = 3
n_estimators = 5
n_workers = len(client.scheduler_info()["workers"])
if n_estimators < n_workers:
err_msg = "n_estimators cannot be lower than number of dask workers"
pytest.xfail(err_msg)
X, y = make_classification()
X = X.astype(np.float32)
if estimator_type == "classification":
cu_rf_mg = cuRFC_mg(
max_features=1.0,
max_samples=1.0,
n_bins=16,
n_streams=1,
n_estimators=n_estimators,
max_leaves=-1,
max_depth=max_depth,
)
y = y.astype(np.int32)
elif estimator_type == "regression":
cu_rf_mg = cuRFR_mg(
max_features=1.0,
max_samples=1.0,
n_bins=16,
n_streams=1,
n_estimators=n_estimators,
max_leaves=-1,
max_depth=max_depth,
)
y = y.astype(np.float32)
else:
assert False
X_dask, y_dask = _prep_training_data(client, X, y, partitions_per_worker=2)
cu_rf_mg.fit(X_dask, y_dask)
single_gpu_model = cu_rf_mg.get_combined_model()
if estimator_type == "classification":
assert isinstance(single_gpu_model, cuRFC_sg)
elif estimator_type == "regression":
assert isinstance(single_gpu_model, cuRFR_sg)
else:
assert False
@pytest.mark.parametrize("n_estimators", [5, 10, 20])
@pytest.mark.parametrize("detailed_text", [True, False])
def test_rf_get_text(client, n_estimators, detailed_text):
n_workers = len(client.scheduler_info()["workers"])
X, y = make_classification(
n_samples=500,
n_features=10,
n_clusters_per_class=1,
n_informative=5,
random_state=94929,
n_classes=2,
)
X = X.astype(np.float32)
y = y.astype(np.int32)
X, y = _prep_training_data(client, X, y, partitions_per_worker=2)
if n_estimators >= n_workers:
cu_rf_mg = cuRFC_mg(
n_estimators=n_estimators, n_bins=16, ignore_empty_partitions=True
)
else:
with pytest.raises(ValueError):
cu_rf_mg = cuRFC_mg(
n_estimators=n_estimators,
n_bins=16,
ignore_empty_partitions=True,
)
return
cu_rf_mg.fit(X, y)
if detailed_text:
text_output = cu_rf_mg.get_detailed_text()
else:
text_output = cu_rf_mg.get_summary_text()
# Test 1. Output is non-zero
assert "" != text_output
# Count the number of trees printed
tree_count = 0
for line in text_output.split("\n"):
if line.strip().startswith("Tree #"):
tree_count += 1
# Test 2. Correct number of trees are printed
assert n_estimators == tree_count
@pytest.mark.parametrize("model_type", ["classification", "regression"])
@pytest.mark.parametrize("fit_broadcast", [True, False])
@pytest.mark.parametrize("transform_broadcast", [True, False])
def test_rf_broadcast(model_type, fit_broadcast, transform_broadcast, client):
# Use CUDA_VISIBLE_DEVICES to control the number of workers
workers = list(client.scheduler_info()["workers"].keys())
n_workers = len(workers)
if model_type == "classification":
X, y = make_classification(
n_samples=n_workers * 1000,
n_features=20,
n_informative=15,
n_classes=4,
n_clusters_per_class=1,
random_state=999,
)
y = y.astype(np.int32)
else:
X, y = make_regression(
n_samples=n_workers * 1000,
n_features=20,
n_informative=5,
random_state=123,
)
y = y.astype(np.float32)
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_workers * 100, random_state=123
)
X_train_df, y_train_df = _prep_training_data(client, X_train, y_train, 1)
X_test_dask_array = from_array(X_test)
n_estimators = n_workers * 8
if model_type == "classification":
cuml_mod = cuRFC_mg(
n_estimators=n_estimators,
max_depth=8,
n_bins=16,
ignore_empty_partitions=True,
)
cuml_mod.fit(X_train_df, y_train_df, broadcast_data=fit_broadcast)
cuml_mod_predict = cuml_mod.predict(
X_test_dask_array, broadcast_data=transform_broadcast
)
cuml_mod_predict = cuml_mod_predict.compute()
cuml_mod_predict = cp.asnumpy(cuml_mod_predict)
acc_score = accuracy_score(cuml_mod_predict, y_test, normalize=True)
assert acc_score >= 0.68
else:
cuml_mod = cuRFR_mg(
n_estimators=n_estimators,
max_depth=8,
n_bins=16,
ignore_empty_partitions=True,
)
cuml_mod.fit(X_train_df, y_train_df, broadcast_data=fit_broadcast)
cuml_mod_predict = cuml_mod.predict(
X_test_dask_array, broadcast_data=transform_broadcast
)
cuml_mod_predict = cuml_mod_predict.compute()
cuml_mod_predict = cp.asnumpy(cuml_mod_predict)
acc_score = r2_score(cuml_mod_predict, y_test)
assert acc_score >= 0.72
if transform_broadcast:
assert cuml_mod.internal_model is None
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_arr_utils.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.part_utils import _extract_partitions
import dask
from cuml.dask.common.dask_arr_utils import validate_dask_array
import pytest
from cuml.testing.utils import array_equal
from cuml.internals.safe_imports import gpu_only_import
dask_cudf = gpu_only_import("dask_cudf")
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
cupyx = gpu_only_import("cupyx")
@pytest.mark.parametrize(
"input_type",
[
"dask_array",
"dask_dataframe",
"dataframe",
"scipysparse",
"cupysparse",
"numpy",
"cupy",
],
)
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("ncols", [10])
def test_to_sparse_dask_array(input_type, nrows, ncols, client):
from cuml.dask.common import to_sparse_dask_array
c = client
a = cupyx.scipy.sparse.random(nrows, ncols, format="csr", dtype=cp.float32)
if input_type == "dask_dataframe":
df = cudf.DataFrame(a.todense())
inp = dask_cudf.from_cudf(df, npartitions=2)
elif input_type == "dask_array":
inp = dask.array.from_array(a.todense().get())
elif input_type == "dataframe":
inp = cudf.DataFrame(a.todense())
elif input_type == "scipysparse":
inp = a.get()
elif input_type == "cupysparse":
inp = a
elif input_type == "numpy":
inp = a.get().todense()
elif input_type == "cupy":
inp = a.todense()
arr = to_sparse_dask_array(inp, c)
arr.compute_chunk_sizes()
assert arr.shape == (nrows, ncols)
# We can't call compute directly on this array yet when it has
# multiple partitions yet so we will manually concat any
# potential pieces.
parts = c.sync(_extract_partitions, arr)
local_parts = cp.vstack(
[part[1].result().todense() for part in parts]
).get()
assert array_equal(a.todense().get(), local_parts)
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [24])
@pytest.mark.parametrize("ncols", [1, 4, 8])
@pytest.mark.parametrize("n_parts", [2, 12])
@pytest.mark.parametrize("col_chunking", [True, False])
@pytest.mark.parametrize("n_col_chunks", [2, 4])
def test_validate_dask_array(
nrows, ncols, n_parts, col_chunking, n_col_chunks, client
):
if ncols > 1:
X = cp.random.standard_normal((nrows, ncols))
X = dask.array.from_array(X, chunks=(nrows / n_parts, -1))
if col_chunking:
X = X.rechunk((nrows / n_parts, ncols / n_col_chunks))
else:
X = cp.random.standard_normal(nrows)
X = dask.array.from_array(X, chunks=(nrows / n_parts))
if col_chunking and ncols > 1:
with pytest.raises(Exception):
validate_dask_array(X, client)
else:
validate_dask_array(X, client)
assert True
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_pca.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.dask_arr_utils import to_dask_cudf
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("ncols", [20])
@pytest.mark.parametrize("n_parts", [67])
@pytest.mark.parametrize("input_type", ["dataframe", "array"])
def test_pca_fit(nrows, ncols, n_parts, input_type, client):
from cuml.dask.decomposition import PCA as daskPCA
from sklearn.decomposition import PCA
from cuml.dask.datasets import make_blobs
X, _ = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=1,
n_parts=n_parts,
cluster_std=0.5,
random_state=10,
dtype=np.float32,
)
if input_type == "dataframe":
X_train = to_dask_cudf(X)
X_cpu = X_train.compute().to_pandas().values
elif input_type == "array":
X_train = X
X_cpu = cp.asnumpy(X_train.compute())
try:
cupca = daskPCA(n_components=5, whiten=True)
cupca.fit(X_train)
except Exception as e:
print(str(e))
skpca = PCA(n_components=5, whiten=True, svd_solver="full")
skpca.fit(X_cpu)
from cuml.testing.utils import array_equal
all_attr = [
"singular_values_",
"components_",
"explained_variance_",
"explained_variance_ratio_",
]
for attr in all_attr:
with_sign = False if attr in ["components_"] else True
cuml_res = getattr(cupca, attr)
if type(cuml_res) == np.ndarray:
cuml_res = cuml_res.to_numpy()
skl_res = getattr(skpca, attr)
assert array_equal(cuml_res, skl_res, 1e-1, with_sign=with_sign)
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("ncols", [20])
@pytest.mark.parametrize("n_parts", [46])
def test_pca_fit_transform_fp32(nrows, ncols, n_parts, client):
from cuml.dask.decomposition import PCA as daskPCA
from cuml.dask.datasets import make_blobs
X_cudf, _ = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=1,
n_parts=n_parts,
cluster_std=1.5,
random_state=10,
dtype=np.float32,
)
cupca = daskPCA(n_components=20, whiten=True)
res = cupca.fit_transform(X_cudf)
res = res.compute()
assert res.shape[0] == nrows and res.shape[1] == 20
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("ncols", [20])
@pytest.mark.parametrize("n_parts", [33])
def test_pca_fit_transform_fp64(nrows, ncols, n_parts, client):
from cuml.dask.decomposition import PCA as daskPCA
from cuml.dask.datasets import make_blobs
X_cudf, _ = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=1,
n_parts=n_parts,
cluster_std=1.5,
random_state=10,
dtype=np.float64,
)
cupca = daskPCA(n_components=30, whiten=False)
res = cupca.fit_transform(X_cudf)
res = res.compute()
assert res.shape[0] == nrows and res.shape[1] == 30
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [1000])
@pytest.mark.parametrize("ncols", [20])
@pytest.mark.parametrize("n_parts", [28])
def test_pca_fit_transform_fp32_noncomponents(nrows, ncols, n_parts, client):
# Tests the case when n_components is not passed for MG scenarios
from cuml.dask.decomposition import PCA as daskPCA
from cuml.dask.datasets import make_blobs
X_cudf, _ = make_blobs(
n_samples=nrows,
n_features=ncols,
centers=1,
n_parts=n_parts,
cluster_std=1.5,
random_state=10,
dtype=np.float32,
)
cupca = daskPCA(whiten=False)
res = cupca.fit_transform(X_cudf)
res = res.compute()
assert res.shape[0] == nrows and res.shape[1] == 20
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/conftest.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
import pytest
from dask_cuda import initialize
from dask_cuda import LocalCUDACluster
from dask_cuda.utils_test import IncreasedCloseTimeoutNanny
from dask.distributed import Client
enable_tcp_over_ucx = True
enable_nvlink = False
enable_infiniband = False
@pytest.fixture(scope="module")
def cluster():
cluster = LocalCUDACluster(
protocol="tcp",
scheduler_port=0,
worker_class=IncreasedCloseTimeoutNanny,
)
yield cluster
cluster.close()
@pytest.fixture(scope="function")
def client(cluster):
client = Client(cluster)
yield client
client.close()
@pytest.fixture(scope="module")
def ucx_cluster():
initialize.initialize(
create_cuda_context=True,
enable_tcp_over_ucx=enable_tcp_over_ucx,
enable_nvlink=enable_nvlink,
enable_infiniband=enable_infiniband,
)
cluster = LocalCUDACluster(
protocol="ucx",
enable_tcp_over_ucx=enable_tcp_over_ucx,
enable_nvlink=enable_nvlink,
enable_infiniband=enable_infiniband,
worker_class=IncreasedCloseTimeoutNanny,
)
yield cluster
cluster.close()
@pytest.fixture(scope="function")
def ucx_client(ucx_cluster):
client = Client(ucx_cluster)
yield client
client.close()
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_naive_bayes.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.metrics import accuracy_score
from cuml.testing.dask.utils import load_text_corpus
from cuml.naive_bayes.naive_bayes import MultinomialNB as SGNB
from cuml.dask.naive_bayes import MultinomialNB
import pytest
import dask.array
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
def test_basic_fit_predict(client):
X, y = load_text_corpus(client)
model = MultinomialNB()
model.fit(X, y)
y_hat = model.predict(X)
y_hat = y_hat.compute()
y = y.compute()
assert accuracy_score(y_hat.get(), y) > 0.97
def test_single_distributed_exact_results(client):
X, y = load_text_corpus(client)
sgX, sgy = (X.compute(), y.compute())
model = MultinomialNB()
model.fit(X, y)
sg_model = SGNB()
sg_model.fit(sgX, sgy)
y_hat = model.predict(X)
sg_y_hat = sg_model.predict(sgX).get()
y_hat = y_hat.compute().get()
assert accuracy_score(y_hat, sg_y_hat) == 1.0
def test_score(client):
X, y = load_text_corpus(client)
model = MultinomialNB()
model.fit(X, y)
y_hat = model.predict(X)
score = model.score(X, y)
y_hat_local = y_hat.compute()
y_local = y.compute()
assert accuracy_score(y_hat_local.get(), y_local) == score
@pytest.mark.parametrize("dtype", [cp.float32, cp.float64, cp.int32])
def test_model_multiple_chunks(client, dtype):
# tests naive_bayes with n_chunks being greater than one, related to issue
# https://github.com/rapidsai/cuml/issues/3150
X = cp.array([[0, 0, 0, 1], [1, 0, 0, 1], [1, 0, 0, 0]])
X = dask.array.from_array(X, chunks=((1, 1, 1), -1)).astype(dtype)
y = dask.array.from_array(
[1, 0, 0], asarray=False, fancy=False, chunks=(1)
).astype(cp.int32)
model = MultinomialNB()
model.fit(X, y)
# this test is a code coverage test, it is too small to be a numeric test,
# but we call score here to exercise the whole model.
assert 0 <= model.score(X, y) <= 1
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_tfidf.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.feature_extraction.text import (
TfidfTransformer as SkTfidfTransformer,
)
from cuml.dask.feature_extraction.text import TfidfTransformer
import dask
import dask.array as da
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals.safe_imports import cpu_only_import_from
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
scipy_csr_matrix = cpu_only_import_from("scipy.sparse", "csr_matrix")
cp_csr_matrix = gpu_only_import_from("cupyx.scipy.sparse", "csr_matrix")
# Testing Util Functions
def generate_dask_array(np_array, n_parts):
"""
Creates a dask array from a numpy 2d array
"""
n_samples = np_array.shape[0]
n_samples_per_part = int(n_samples / n_parts)
chunks = [n_samples_per_part] * n_parts
samples_last_row = n_samples - ((n_parts - 1) * n_samples_per_part)
chunks[-1] = samples_last_row
chunks = tuple(chunks)
return da.from_array(np_array, chunks=(chunks, -1))
def create_cp_sparse_ar_from_dense_np_ar(ar, dtype=np.float32):
"""
Creates a gpu array from a dense cpu array
"""
return cp_csr_matrix(scipy_csr_matrix(ar), dtype=dtype)
def create_cp_sparse_dask_array(np_ar, n_parts):
"""
Creates a sparse gpu dask array from the given numpy array
"""
ar = generate_dask_array(np_ar, n_parts)
meta = dask.array.from_array(cp_csr_matrix(cp.zeros(1, dtype=cp.float32)))
ar = ar.map_blocks(create_cp_sparse_ar_from_dense_np_ar, meta=meta)
return ar
def create_scipy_sparse_array_from_dask_cp_sparse_array(ar):
"""
Creates a cpu sparse array from the given numpy array
Will not be needed probably once we have
https://github.com/cupy/cupy/issues/3178
"""
meta = dask.array.from_array(scipy_csr_matrix(np.zeros(1, dtype=ar.dtype)))
ar = ar.map_blocks(lambda x: x.get(), meta=meta)
ar = ar.compute()
return ar
# data_ids correspond to data, order is important
data_ids = ["base_case", "diag", "empty_feature", "123", "empty_doc"]
data = [
np.array(
[
[0, 1, 1, 1, 0, 0, 1, 0, 1],
[0, 2, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 0, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 0, 1, 0, 1],
]
),
np.array([[1, 1, 1], [1, 1, 0], [1, 0, 0]]),
np.array([[1, 1, 0], [1, 1, 0], [1, 0, 0]]),
np.array([[1], [2], [3]]),
np.array([[1, 1, 1], [1, 1, 0], [0, 0, 0]]),
]
@pytest.mark.mg
@pytest.mark.parametrize("data", data, ids=data_ids)
@pytest.mark.parametrize("norm", ["l1", "l2", None])
@pytest.mark.parametrize("use_idf", [True, False])
@pytest.mark.parametrize("smooth_idf", [True, False])
@pytest.mark.parametrize("sublinear_tf", [True, False])
@pytest.mark.filterwarnings(
"ignore:divide by zero(.*):RuntimeWarning:" "sklearn[.*]"
)
def test_tfidf_transformer(
data, norm, use_idf, smooth_idf, sublinear_tf, client
):
# Testing across multiple-n_parts
for n_parts in range(1, data.shape[0]):
dask_sp_array = create_cp_sparse_dask_array(data, n_parts)
tfidf = TfidfTransformer(
norm=norm,
use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf,
)
sk_tfidf = SkTfidfTransformer(
norm=norm,
use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf,
)
res = tfidf.fit_transform(dask_sp_array)
res = create_scipy_sparse_array_from_dask_cp_sparse_array(
res
).todense()
ref = sk_tfidf.fit_transform(data).todense()
cp.testing.assert_array_almost_equal(res, ref)
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_logistic_regression.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.dask.common import utils as dask_utils
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression as skLR
from cuml.internals.safe_imports import cpu_only_import
from cuml.testing.utils import array_equal
pd = cpu_only_import("pandas")
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
dask_cudf = gpu_only_import("dask_cudf")
cudf = gpu_only_import("cudf")
pytestmark = pytest.mark.mg
def _prep_training_data(c, X_train, y_train, partitions_per_worker):
workers = c.has_what().keys()
n_partitions = partitions_per_worker * len(workers)
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X_train))
X_train_df = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
y_cudf = np.array(pd.DataFrame(y_train).values)
y_cudf = y_cudf[:, 0]
y_cudf = cudf.Series(y_cudf)
y_train_df = dask_cudf.from_cudf(y_cudf, npartitions=n_partitions)
X_train_df, y_train_df = dask_utils.persist_across_workers(
c, [X_train_df, y_train_df], workers=workers
)
return X_train_df, y_train_df
def make_classification_dataset(datatype, nrows, ncols, n_info, n_classes=2):
X, y = make_classification(
n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_classes=n_classes,
random_state=0,
)
X = X.astype(datatype)
y = y.astype(datatype)
return X, y
def select_sk_solver(cuml_solver):
if cuml_solver == "newton":
return "newton-cg"
elif cuml_solver in ["admm", "lbfgs"]:
return "lbfgs"
else:
pytest.xfail("No matched sklearn solver")
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [1e5])
@pytest.mark.parametrize("ncols", [20])
@pytest.mark.parametrize("n_parts", [2, 6])
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("gpu_array_input", [False, True])
@pytest.mark.parametrize(
"solver", ["admm", "gradient_descent", "newton", "lbfgs", "proximal_grad"]
)
def test_lr_fit_predict_score(
nrows,
ncols,
n_parts,
fit_intercept,
datatype,
gpu_array_input,
solver,
client,
):
sk_solver = select_sk_solver(cuml_solver=solver)
def imp():
import cuml.comm.serialize # NOQA
client.run(imp)
from cuml.dask.extended.linear_model import (
LogisticRegression as cumlLR_dask,
)
n_info = 5
nrows = int(nrows)
ncols = int(ncols)
X, y = make_classification_dataset(datatype, nrows, ncols, n_info)
gX, gy = _prep_training_data(client, X, y, n_parts)
if gpu_array_input:
gX = gX.values
gX._meta = cp.asarray(gX._meta)
gy = gy.values
gy._meta = cp.asarray(gy._meta)
cuml_model = cumlLR_dask(
fit_intercept=fit_intercept, solver=solver, max_iter=10
)
# test fit and predict
cuml_model.fit(gX, gy)
cu_preds = cuml_model.predict(gX)
accuracy_cuml = accuracy_score(y, cu_preds.compute().get())
sk_model = skLR(fit_intercept=fit_intercept, solver=sk_solver, max_iter=10)
sk_model.fit(X, y)
sk_preds = sk_model.predict(X)
accuracy_sk = accuracy_score(y, sk_preds)
assert (accuracy_cuml >= accuracy_sk) | (
np.abs(accuracy_cuml - accuracy_sk) < 1e-3
)
# score
accuracy_cuml = cuml_model.score(gX, gy).compute().item()
accuracy_sk = sk_model.score(X, y)
assert (accuracy_cuml >= accuracy_sk) | (
np.abs(accuracy_cuml - accuracy_sk) < 1e-3
)
# predicted probabilities should differ by <= 5%
# even with different solvers (arbitrary)
probs_cuml = cuml_model.predict_proba(gX).compute()
probs_sk = sk_model.predict_proba(X)[:, 1]
assert np.abs(probs_sk - probs_cuml.get()).max() <= 0.05
@pytest.mark.mg
@pytest.mark.parametrize("n_parts", [2])
@pytest.mark.parametrize("datatype", [np.float32])
def test_lbfgs_toy(n_parts, datatype, client):
def imp():
import cuml.comm.serialize # NOQA
client.run(imp)
X = np.array([(1, 2), (1, 3), (2, 1), (3, 1)], datatype)
y = np.array([1.0, 1.0, 0.0, 0.0], datatype)
from cuml.dask.linear_model import LogisticRegression as cumlLBFGS_dask
X_df, y_df = _prep_training_data(client, X, y, n_parts)
lr = cumlLBFGS_dask()
lr.fit(X_df, y_df)
lr_coef = lr.coef_.to_numpy()
lr_intercept = lr.intercept_.to_numpy()
assert len(lr_coef) == 1
assert lr_coef[0] == pytest.approx([-0.71483153, 0.7148315], abs=1e-6)
assert lr_intercept == pytest.approx([-2.2614916e-08], abs=1e-6)
# test predict
preds = lr.predict(X_df, delayed=True).compute().to_numpy()
from numpy.testing import assert_array_equal
assert_array_equal(preds, y, strict=True)
# assert error on float64
X = X.astype(np.float64)
y = y.astype(np.float64)
X_df, y_df = _prep_training_data(client, X, y, n_parts)
with pytest.raises(
RuntimeError,
match="dtypes other than float32 are currently not supported yet. See issue: https://github.com/rapidsai/cuml/issues/5589",
):
lr.fit(X_df, y_df)
def test_lbfgs_init(client):
def imp():
import cuml.comm.serialize # NOQA
client.run(imp)
X = np.array([(1, 2), (1, 3), (2, 1), (3, 1)], dtype=np.float32)
y = np.array([1.0, 1.0, 0.0, 0.0], dtype=np.float32)
X_df, y_df = _prep_training_data(
c=client, X_train=X, y_train=y, partitions_per_worker=2
)
from cuml.dask.linear_model.logistic_regression import (
LogisticRegression as cumlLBFGS_dask,
)
def assert_params(
tol,
C,
fit_intercept,
max_iter,
linesearch_max_iter,
verbose,
output_type,
):
lr = cumlLBFGS_dask(
tol=tol,
C=C,
fit_intercept=fit_intercept,
max_iter=max_iter,
linesearch_max_iter=linesearch_max_iter,
verbose=verbose,
output_type=output_type,
)
lr.fit(X_df, y_df)
qnpams = lr.qnparams.params
assert qnpams["grad_tol"] == tol
assert qnpams["loss"] == 0 # "sigmoid" loss
assert qnpams["penalty_l1"] == 0.0
assert qnpams["penalty_l2"] == 1.0 / C
assert qnpams["fit_intercept"] == fit_intercept
assert qnpams["max_iter"] == max_iter
assert qnpams["linesearch_max_iter"] == linesearch_max_iter
assert (
qnpams["verbose"] == 5 if verbose is True else 4
) # cuml Verbosity Levels
assert (
lr.output_type == "input" if output_type is None else output_type
) # cuml.global_settings.output_type
assert_params(
tol=1e-4,
C=1.0,
fit_intercept=True,
max_iter=1000,
linesearch_max_iter=50,
verbose=False,
output_type=None,
)
assert_params(
tol=1e-6,
C=1.5,
fit_intercept=False,
max_iter=200,
linesearch_max_iter=100,
verbose=True,
output_type="cudf",
)
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [1e5])
@pytest.mark.parametrize("ncols", [20])
@pytest.mark.parametrize("n_parts", [2, 23])
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("datatype", [np.float32])
@pytest.mark.parametrize("delayed", [True, False])
def test_lbfgs(
nrows,
ncols,
n_parts,
fit_intercept,
datatype,
delayed,
client,
penalty="l2",
l1_ratio=None,
C=1.0,
n_classes=2,
):
tolerance = 0.005
def imp():
import cuml.comm.serialize # NOQA
client.run(imp)
from cuml.dask.linear_model.logistic_regression import (
LogisticRegression as cumlLBFGS_dask,
)
# set n_informative variable for calling sklearn.datasets.make_classification
n_info = 5
nrows = int(nrows)
ncols = int(ncols)
X, y = make_classification_dataset(
datatype, nrows, ncols, n_info, n_classes=n_classes
)
X_df, y_df = _prep_training_data(client, X, y, n_parts)
lr = cumlLBFGS_dask(
solver="qn",
fit_intercept=fit_intercept,
penalty=penalty,
l1_ratio=l1_ratio,
C=C,
verbose=True,
)
lr.fit(X_df, y_df)
lr_coef = lr.coef_.to_numpy()
lr_intercept = lr.intercept_.to_numpy()
if penalty == "l2" or penalty == "none":
sk_solver = "lbfgs"
elif penalty == "l1" or penalty == "elasticnet":
sk_solver = "saga"
else:
raise ValueError(f"unexpected penalty {penalty}")
sk_model = skLR(
solver=sk_solver,
fit_intercept=fit_intercept,
penalty=penalty,
l1_ratio=l1_ratio,
C=C,
)
sk_model.fit(X, y)
sk_coef = sk_model.coef_
sk_intercept = sk_model.intercept_
if sk_solver == "lbfgs":
assert len(lr_coef) == len(sk_coef)
assert array_equal(lr_coef, sk_coef, tolerance, with_sign=True)
assert array_equal(
lr_intercept, sk_intercept, tolerance, with_sign=True
)
# test predict
cu_preds = lr.predict(X_df, delayed=delayed).compute().to_numpy()
accuracy_cuml = accuracy_score(y, cu_preds)
sk_preds = sk_model.predict(X)
accuracy_sk = accuracy_score(y, sk_preds)
assert len(cu_preds) == len(sk_preds)
assert (accuracy_cuml >= accuracy_sk) | (
np.abs(accuracy_cuml - accuracy_sk) < 1e-3
)
return lr
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_noreg(fit_intercept, client):
lr = test_lbfgs(
nrows=1e5,
ncols=20,
n_parts=23,
fit_intercept=fit_intercept,
datatype=np.float32,
delayed=True,
client=client,
penalty="none",
)
qnpams = lr.qnparams.params
assert qnpams["penalty_l1"] == 0.0
assert qnpams["penalty_l2"] == 0.0
l1_strength, l2_strength = lr._get_qn_params()
assert l1_strength == 0.0
assert l2_strength == 0.0
def test_n_classes_small(client):
def assert_small(X, y, n_classes):
X_df, y_df = _prep_training_data(client, X, y, partitions_per_worker=1)
from cuml.dask.linear_model import LogisticRegression as cumlLBFGS_dask
lr = cumlLBFGS_dask()
lr.fit(X_df, y_df)
assert lr._num_classes == n_classes
return lr
X = np.array([(1, 2), (1, 3)], np.float32)
y = np.array([1.0, 0.0], np.float32)
lr = assert_small(X=X, y=y, n_classes=2)
assert np.array_equal(
lr.classes_.to_numpy(), np.array([0.0, 1.0], np.float32)
)
X = np.array([(1, 2), (1, 3), (1, 2.5)], np.float32)
y = np.array([1.0, 0.0, 1.0], np.float32)
lr = assert_small(X=X, y=y, n_classes=2)
assert np.array_equal(
lr.classes_.to_numpy(), np.array([0.0, 1.0], np.float32)
)
X = np.array([(1, 2), (1, 2.5), (1, 3)], np.float32)
y = np.array([1.0, 1.0, 0.0], np.float32)
lr = assert_small(X=X, y=y, n_classes=2)
assert np.array_equal(
lr.classes_.to_numpy(), np.array([0.0, 1.0], np.float32)
)
X = np.array([(1, 2), (1, 3), (1, 2.5)], np.float32)
y = np.array([10.0, 50.0, 20.0], np.float32)
lr = assert_small(X=X, y=y, n_classes=3)
assert np.array_equal(
lr.classes_.to_numpy(), np.array([10.0, 20.0, 50.0], np.float32)
)
@pytest.mark.parametrize("n_parts", [2, 23])
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("n_classes", [8])
def test_n_classes(n_parts, fit_intercept, n_classes, client):
lr = test_lbfgs(
nrows=1e5,
ncols=20,
n_parts=n_parts,
fit_intercept=fit_intercept,
datatype=np.float32,
delayed=True,
client=client,
penalty="l2",
n_classes=n_classes,
)
assert lr._num_classes == n_classes
@pytest.mark.mg
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("datatype", [np.float32])
@pytest.mark.parametrize("delayed", [True])
@pytest.mark.parametrize("n_classes", [2, 8])
@pytest.mark.parametrize("C", [1.0, 10.0])
def test_l1(fit_intercept, datatype, delayed, n_classes, C, client):
lr = test_lbfgs(
nrows=1e5,
ncols=20,
n_parts=2,
fit_intercept=fit_intercept,
datatype=datatype,
delayed=delayed,
client=client,
penalty="l1",
n_classes=n_classes,
C=C,
)
l1_strength, l2_strength = lr._get_qn_params()
assert l1_strength == 1.0 / lr.C
assert l2_strength == 0.0
@pytest.mark.mg
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("datatype", [np.float32])
@pytest.mark.parametrize("delayed", [True])
@pytest.mark.parametrize("n_classes", [2, 8])
@pytest.mark.parametrize("l1_ratio", [0.2, 0.8])
def test_elasticnet(
fit_intercept, datatype, delayed, n_classes, l1_ratio, client
):
lr = test_lbfgs(
nrows=1e5,
ncols=20,
n_parts=2,
fit_intercept=fit_intercept,
datatype=datatype,
delayed=delayed,
client=client,
penalty="elasticnet",
n_classes=n_classes,
l1_ratio=l1_ratio,
)
l1_strength, l2_strength = lr._get_qn_params()
strength = 1.0 / lr.C
assert l1_strength == lr.l1_ratio * strength
assert l2_strength == (1.0 - lr.l1_ratio) * strength
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_coordinate_descent.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml.dask.datasets import make_regression
from cuml.dask.linear_model import ElasticNet
from cuml.dask.linear_model import Lasso
from cuml.metrics import r2_score
from cuml.testing.utils import unit_param, quality_param, stress_param
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
@pytest.mark.mg
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("alpha", [0.001])
@pytest.mark.parametrize("algorithm", ["cyclic", "random"])
@pytest.mark.parametrize(
"nrows", [unit_param(50), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
@pytest.mark.parametrize(
"n_parts", [unit_param(4), quality_param(32), stress_param(64)]
)
@pytest.mark.parametrize("delayed", [True, False])
def test_lasso(
dtype, alpha, algorithm, nrows, column_info, n_parts, delayed, client
):
ncols, n_info = column_info
X, y = make_regression(
n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_parts=n_parts,
client=client,
dtype=dtype,
)
lasso = Lasso(
alpha=np.array([alpha]),
fit_intercept=True,
normalize=False,
max_iter=1000,
selection=algorithm,
tol=1e-10,
client=client,
)
lasso.fit(X, y)
y_hat = lasso.predict(X, delayed=delayed)
assert r2_score(y.compute(), y_hat.compute()) >= 0.99
@pytest.mark.mg
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize(
"nrows", [unit_param(50), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
@pytest.mark.parametrize(
"n_parts", [unit_param(16), quality_param(32), stress_param(64)]
)
def test_lasso_default(dtype, nrows, column_info, n_parts, client):
ncols, n_info = column_info
X, y = make_regression(
n_samples=nrows,
n_features=ncols,
n_informative=n_info,
client=client,
dtype=dtype,
)
lasso = Lasso(client=client)
lasso.fit(X, y)
y_hat = lasso.predict(X)
assert r2_score(y.compute(), y_hat.compute()) >= 0.99
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("alpha", [0.5])
@pytest.mark.parametrize("algorithm", ["cyclic", "random"])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
@pytest.mark.parametrize(
"n_parts", [unit_param(16), quality_param(32), stress_param(64)]
)
@pytest.mark.parametrize("delayed", [True, False])
def test_elastic_net(
dtype, alpha, algorithm, nrows, column_info, n_parts, client, delayed
):
ncols, n_info = column_info
X, y = make_regression(
n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_parts=n_parts,
client=client,
dtype=dtype,
)
elasticnet = ElasticNet(
alpha=np.array([alpha]),
fit_intercept=True,
normalize=False,
max_iter=1000,
selection=algorithm,
tol=1e-10,
client=client,
)
elasticnet.fit(X, y)
y_hat = elasticnet.predict(X, delayed=delayed)
# based on differences with scikit-learn 0.22
if alpha == 0.2:
assert r2_score(y.compute(), y_hat.compute()) >= 0.96
else:
assert r2_score(y.compute(), y_hat.compute()) >= 0.80
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"column_info",
[
unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500]),
],
)
@pytest.mark.parametrize(
"n_parts", [unit_param(16), quality_param(32), stress_param(64)]
)
def test_elastic_net_default(dtype, nrows, column_info, n_parts, client):
ncols, n_info = column_info
X, y = make_regression(
n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_parts=n_parts,
client=client,
dtype=dtype,
)
elasticnet = ElasticNet(client=client)
elasticnet.fit(X, y)
y_hat = elasticnet.predict(X)
assert r2_score(y.compute(), y_hat.compute()) >= 0.96
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_global_settings.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=no-member
from time import sleep
import pytest
from dask import delayed
import cuml
from cuml import set_global_output_type, using_output_type
from cuml.internals.api_context_managers import _using_mirror_output_type
from cuml.internals.global_settings import (
_global_settings_data,
_GlobalSettingsData,
GlobalSettings,
)
test_output_types_str = ("numpy", "numba", "cupy", "cudf")
test_global_settings_data_obj = _GlobalSettingsData()
def test_set_global_output_type():
"""Ensure that set_global_output_type is thread-safe"""
def check_correct_type(index):
output_type = test_output_types_str[index]
# Force a race condition
if index == 0:
sleep(0.1)
set_global_output_type(output_type)
sleep(0.5)
return cuml.global_settings.output_type == output_type
results = [
delayed(check_correct_type)(index)
for index in range(len(test_output_types_str))
]
assert (delayed(all)(results)).compute()
def test_using_output_type():
"""Ensure that using_output_type is thread-safe"""
def check_correct_type(index):
output_type = test_output_types_str[index]
# Force a race condition
if index == 0:
sleep(0.1)
with using_output_type(output_type):
sleep(0.5)
return cuml.global_settings.output_type == output_type
results = [
delayed(check_correct_type)(index)
for index in range(len(test_output_types_str))
]
assert (delayed(all)(results)).compute()
def test_using_mirror_output_type():
"""Ensure that _using_mirror_output_type is thread-safe"""
def check_correct_type(index):
# Force a race condition
if index == 0:
sleep(0.1)
if index % 2 == 0:
with _using_mirror_output_type():
sleep(0.5)
return cuml.global_settings.output_type == "mirror"
else:
output_type = test_output_types_str[index]
with using_output_type(output_type):
sleep(0.5)
return cuml.global_settings.output_type == output_type
results = [
delayed(check_correct_type)(index)
for index in range(len(test_output_types_str))
]
assert (delayed(all)(results)).compute()
def test_global_settings_data():
"""Ensure that GlobalSettingsData objects are properly initialized
per-thread"""
def check_initialized(index):
if index == 0:
sleep(0.1)
with pytest.raises(AttributeError):
_global_settings_data.testing_index # pylint: disable=W0104
_global_settings_data.testing_index = index
sleep(0.5)
return (
test_global_settings_data_obj.shared_state["_output_type"] is None
and test_global_settings_data_obj.shared_state["root_cm"] is None
and _global_settings_data.testing_index == index
)
results = [delayed(check_initialized)(index) for index in range(5)]
assert (delayed(all)(results)).compute()
def test_global_settings():
"""Ensure that GlobalSettings acts as a proper thread-local borg"""
def check_settings(index):
# Force a race condition
if index == 0:
sleep(0.1)
cuml.global_settings.index = index
sleep(0.5)
return (
cuml.global_settings.index == index
and cuml.global_settings.index == GlobalSettings().index
)
results = [delayed(check_settings)(index) for index in range(5)]
assert (delayed(all)(results)).compute()
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_label_encoder.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuml.common.exceptions import NotFittedError
import pytest
from cuml.internals.safe_imports import cpu_only_import
import cuml
from cuml.dask.preprocessing.LabelEncoder import LabelEncoder
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
dask_cudf = gpu_only_import("dask_cudf")
cp = gpu_only_import("cupy")
def _arr_to_similarity_mat(arr):
arr = arr.reshape(1, -1)
return np.pad(arr, [(arr.shape[1] - 1, 0), (0, 0)], "edge")
@pytest.mark.parametrize("length", [10, 1000])
@pytest.mark.parametrize("cardinality", [5, 10, 50])
def test_labelencoder_fit_transform(length, cardinality, client):
"""Try encoding the entire df"""
tmp = cudf.Series(np.random.choice(cardinality, (length,)))
df = dask_cudf.from_cudf(tmp, npartitions=len(client.has_what()))
encoded = cuml.dask.preprocessing.LabelEncoder().fit_transform(df)
df_arr = df.compute().to_numpy()
df_arr = _arr_to_similarity_mat(df_arr)
encoder_arr = cp.asnumpy(encoded.compute().to_numpy())
encoded_arr = _arr_to_similarity_mat(encoder_arr)
assert ((encoded_arr == encoded_arr.T) == (df_arr == df_arr.T)).all()
@pytest.mark.parametrize("length", [10, 100, 1000])
@pytest.mark.parametrize("cardinality", [5, 10, 50])
def test_labelencoder_transform(length, cardinality, client):
"""Try fitting and then encoding a small subset of the df"""
tmp = cudf.Series(np.random.choice(cardinality, (length,)))
df = dask_cudf.from_cudf(tmp, npartitions=len(client.has_what()))
le = LabelEncoder().fit(df)
assert le._fitted
encoded = le.transform(df)
df_arr = df.compute().to_numpy()
df_arr = _arr_to_similarity_mat(df_arr)
encoder_arr = cp.asnumpy(encoded.compute().to_numpy())
encoded_arr = _arr_to_similarity_mat(encoder_arr)
assert ((encoded_arr == encoded_arr.T) == (df_arr == df_arr.T)).all()
def test_labelencoder_unseen(client):
"""Try encoding a value that was not present during fitting"""
df = dask_cudf.from_cudf(
cudf.Series(np.random.choice(10, (10,))),
npartitions=len(client.has_what()),
)
le = LabelEncoder().fit(df)
assert le._fitted
with pytest.raises(KeyError):
tmp = dask_cudf.from_cudf(
cudf.Series([-100, -120]), npartitions=len(client.has_what())
)
le.transform(tmp).compute()
def test_labelencoder_unfitted(client):
"""Try calling `.transform()` without fitting first"""
df = dask_cudf.from_cudf(
cudf.Series(np.random.choice(10, (10,))),
npartitions=len(client.has_what()),
)
le = LabelEncoder()
with pytest.raises(NotFittedError):
le.transform(df).compute()
@pytest.mark.parametrize("use_fit_transform", [False, True])
@pytest.mark.parametrize(
"orig_label, ord_label, expected_reverted, bad_ord_label",
[
(
cudf.Series(["a", "b", "c"]),
cudf.Series([2, 1, 2, 0]),
cudf.Series(["c", "b", "c", "a"]),
cudf.Series([-1, 1, 2, 0]),
),
(
cudf.Series(["Tokyo", "Paris", "Austin"]),
cudf.Series([0, 2, 0]),
cudf.Series(["Austin", "Tokyo", "Austin"]),
cudf.Series([0, 1, 2, 3]),
),
(
cudf.Series(["a", "b", "c1"]),
cudf.Series([2, 1]),
cudf.Series(["c1", "b"]),
cudf.Series([0, 1, 2, 3]),
),
(
cudf.Series(["1.09", "0.09", ".09", "09"]),
cudf.Series([0, 1, 2, 3]),
cudf.Series([".09", "0.09", "09", "1.09"]),
cudf.Series([0, 1, 2, 3, 4]),
),
],
)
def test_inverse_transform(
orig_label,
ord_label,
expected_reverted,
bad_ord_label,
use_fit_transform,
client,
):
n_workers = len(client.has_what())
orig_label = dask_cudf.from_cudf(orig_label, npartitions=n_workers)
ord_label = dask_cudf.from_cudf(ord_label, npartitions=n_workers)
expected_reverted = dask_cudf.from_cudf(
expected_reverted, npartitions=n_workers
)
bad_ord_label = dask_cudf.from_cudf(bad_ord_label, npartitions=n_workers)
# prepare LabelEncoder
le = LabelEncoder()
if use_fit_transform:
le.fit_transform(orig_label)
else:
le.fit(orig_label)
assert le._fitted is True
# test if inverse_transform is correct
reverted = le.inverse_transform(ord_label)
reverted = reverted.compute().reset_index(drop=True)
expected_reverted = expected_reverted.compute()
assert len(reverted) == len(expected_reverted)
assert len(reverted) == len(reverted[reverted == expected_reverted])
# test if correctly raies ValueError
with pytest.raises(ValueError, match="y contains previously unseen label"):
le.inverse_transform(bad_ord_label).compute()
def test_unfitted_inverse_transform(client):
"""Try calling `.inverse_transform()` without fitting first"""
tmp = cudf.Series(np.random.choice(10, (10,)))
df = dask_cudf.from_cudf(tmp, npartitions=len(client.has_what()))
le = LabelEncoder()
with pytest.raises(NotFittedError):
le.transform(df)
@pytest.mark.parametrize(
"empty, ord_label", [(cudf.Series([]), cudf.Series([2, 1]))]
)
def test_empty_input(empty, ord_label, client):
# prepare LabelEncoder
n_workers = len(client.has_what())
empty = dask_cudf.from_cudf(empty, npartitions=n_workers)
ord_label = dask_cudf.from_cudf(ord_label, npartitions=n_workers)
le = LabelEncoder()
le.fit(empty)
assert le._fitted is True
# test if correctly raies ValueError
with pytest.raises(ValueError, match="y contains previously unseen label"):
le.inverse_transform(ord_label).compute()
# check fit_transform()
le = LabelEncoder()
transformed = le.fit_transform(empty).compute()
assert le._fitted is True
assert len(transformed) == 0
def test_masked_encode(client):
n_workers = len(client.has_what())
df = cudf.DataFrame(
{
"filter_col": [1, 1, 2, 3, 1, 1, 1, 1, 6, 5],
"cat_col": ["a", "b", "c", "d", "a", "a", "a", "c", "b", "c"],
}
)
ddf = dask_cudf.from_cudf(df, npartitions=n_workers)
ddf_filter = ddf[ddf["filter_col"] == 1]
filter_encoded = LabelEncoder().fit_transform(ddf_filter["cat_col"])
ddf_filter = ddf_filter.assign(filter_encoded=filter_encoded.values)
encoded_filter = LabelEncoder().fit_transform(ddf["cat_col"])
ddf = ddf.assign(encoded_filter=encoded_filter.values)
ddf = ddf[ddf.filter_col == 1]
assert (ddf.encoded_filter == ddf_filter.filter_encoded).compute().all()
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_dbscan.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import pairwise_distances
from sklearn.datasets import make_blobs
from sklearn.cluster import DBSCAN as skDBSCAN
from cuml.testing.utils import (
get_pattern,
unit_param,
quality_param,
stress_param,
array_equal,
assert_dbscan_equal,
)
import pytest
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
@pytest.mark.mg
@pytest.mark.parametrize("max_mbytes_per_batch", [1e3, None])
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
@pytest.mark.parametrize(
"ncols", [unit_param(20), quality_param(100), stress_param(1000)]
)
@pytest.mark.parametrize(
"out_dtype",
[
unit_param("int32"),
unit_param(np.int32),
unit_param("int64"),
unit_param(np.int64),
quality_param("int32"),
stress_param("int32"),
],
)
def test_dbscan(
datatype, nrows, ncols, max_mbytes_per_batch, out_dtype, client
):
from cuml.dask.cluster.dbscan import DBSCAN as cuDBSCAN
n_samples = nrows
n_feats = ncols
X, y = make_blobs(
n_samples=n_samples,
cluster_std=0.01,
n_features=n_feats,
random_state=0,
)
eps = 1
cuml_dbscan = cuDBSCAN(
eps=eps,
min_samples=2,
max_mbytes_per_batch=max_mbytes_per_batch,
output_type="numpy",
)
cu_labels = cuml_dbscan.fit_predict(X, out_dtype=out_dtype)
if nrows < 500000:
sk_dbscan = skDBSCAN(eps=1, min_samples=2, algorithm="brute")
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
if out_dtype == "int32" or out_dtype == np.int32:
assert cu_labels.dtype == np.int32
elif out_dtype == "int64" or out_dtype == np.int64:
assert cu_labels.dtype == np.int64
@pytest.mark.mg
@pytest.mark.parametrize(
"max_mbytes_per_batch",
[unit_param(1), quality_param(1e2), stress_param(None)],
)
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(10000)]
)
@pytest.mark.parametrize("out_dtype", ["int32", "int64"])
def test_dbscan_precomputed(
datatype, nrows, max_mbytes_per_batch, out_dtype, client
):
from cuml.dask.cluster.dbscan import DBSCAN as cuDBSCAN
# 2-dimensional dataset for easy distance matrix computation
X, y = make_blobs(
n_samples=nrows, cluster_std=0.01, n_features=2, random_state=0
)
# Precompute distances
X_dist = pairwise_distances(X).astype(datatype)
eps = 1
cuml_dbscan = cuDBSCAN(
eps=eps,
min_samples=2,
metric="precomputed",
max_mbytes_per_batch=max_mbytes_per_batch,
output_type="numpy",
)
cu_labels = cuml_dbscan.fit_predict(X_dist, out_dtype=out_dtype)
sk_dbscan = skDBSCAN(
eps=eps, min_samples=2, metric="precomputed", algorithm="brute"
)
sk_labels = sk_dbscan.fit_predict(X_dist)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
@pytest.mark.mg
@pytest.mark.parametrize("name", ["noisy_moons", "blobs", "no_structure"])
@pytest.mark.parametrize(
"nrows", [unit_param(500), quality_param(5000), stress_param(500000)]
)
# Vary the eps to get a range of core point counts
@pytest.mark.parametrize("eps", [0.05, 0.1, 0.5])
def test_dbscan_sklearn_comparison(name, nrows, eps, client):
from cuml.dask.cluster.dbscan import DBSCAN as cuDBSCAN
default_base = {
"quantile": 0.2,
"eps": eps,
"damping": 0.9,
"preference": -200,
"n_neighbors": 10,
"n_clusters": 2,
}
n_samples = nrows
pat = get_pattern(name, n_samples)
params = default_base.copy()
params.update(pat[1])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cuml_dbscan = cuDBSCAN(
eps=params["eps"], min_samples=5, output_type="numpy"
)
cu_labels = cuml_dbscan.fit_predict(X)
if nrows < 500000:
sk_dbscan = skDBSCAN(eps=params["eps"], min_samples=5)
sk_labels = sk_dbscan.fit_predict(X)
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
@pytest.mark.mg
@pytest.mark.parametrize("name", ["noisy_moons", "blobs", "no_structure"])
def test_dbscan_default(name, client):
from cuml.dask.cluster.dbscan import DBSCAN as cuDBSCAN
eps = 0.5
default_base = {
"quantile": 0.3,
"eps": eps,
"damping": 0.9,
"preference": -200,
"n_neighbors": 10,
"n_clusters": 2,
}
n_samples = 500
pat = get_pattern(name, n_samples)
params = default_base.copy()
params.update(pat[1])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cuml_dbscan = cuDBSCAN(output_type="numpy")
cu_labels = cuml_dbscan.fit_predict(X)
sk_dbscan = skDBSCAN(eps=params["eps"], min_samples=5)
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
@pytest.mark.mg
@pytest.mark.xfail(strict=True, raises=ValueError)
def test_dbscan_out_dtype_fails_invalid_input(client):
from cuml.dask.cluster.dbscan import DBSCAN as cuDBSCAN
X, _ = make_blobs(n_samples=500)
cuml_dbscan = cuDBSCAN(output_type="numpy")
cuml_dbscan.fit_predict(X, out_dtype="bad_input")
@pytest.mark.mg
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("out_dtype", ["int32", np.int32, "int64", np.int64])
def test_dbscan_propagation(datatype, out_dtype, client):
from cuml.dask.cluster.dbscan import DBSCAN as cuDBSCAN
X, y = make_blobs(
5000,
centers=1,
cluster_std=8.0,
center_box=(-100.0, 100.0),
random_state=8,
)
X = X.astype(datatype)
eps = 0.5
cuml_dbscan = cuDBSCAN(eps=eps, min_samples=5, output_type="numpy")
cu_labels = cuml_dbscan.fit_predict(X, out_dtype=out_dtype)
sk_dbscan = skDBSCAN(eps=eps, min_samples=5)
sk_labels = sk_dbscan.fit_predict(X)
# Check the core points are equal
assert array_equal(
cuml_dbscan.core_sample_indices_, sk_dbscan.core_sample_indices_
)
# Check the labels are correct
assert_dbscan_equal(
sk_labels, cu_labels, X, cuml_dbscan.core_sample_indices_, eps
)
@pytest.mark.mg
def test_dbscan_no_calc_core_point_indices(client):
from cuml.dask.cluster.dbscan import DBSCAN as cuDBSCAN
params = {"eps": 1.1, "min_samples": 4}
n_samples = 1000
pat = get_pattern("noisy_moons", n_samples)
X, y = pat[0]
X = StandardScaler().fit_transform(X)
# Set calc_core_sample_indices=False
cuml_dbscan = cuDBSCAN(
eps=params["eps"],
min_samples=5,
output_type="numpy",
calc_core_sample_indices=False,
)
cuml_dbscan.fit_predict(X)
# Make sure we are None
assert cuml_dbscan.core_sample_indices_ is None
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_one_hot_encoder.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import cpu_only_import_from
from sklearn.preprocessing import OneHotEncoder as SkOneHotEncoder
from cuml.testing.utils import (
stress_param,
generate_inputs_from_categories,
assert_inverse_equal,
from_df_to_numpy,
)
from cuml.dask.preprocessing import OneHotEncoder
import dask.array as da
from cuml.internals.safe_imports import cpu_only_import
from cudf import DataFrame, Series
import pytest
from cuml.internals.safe_imports import gpu_only_import
dask_cudf = gpu_only_import("dask_cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
assert_frame_equal = cpu_only_import_from(
"pandas.testing", "assert_frame_equal"
)
@pytest.mark.mg
def test_onehot_vs_skonehot(client):
X = DataFrame({"gender": ["Male", "Female", "Female"], "int": [1, 3, 2]})
skX = from_df_to_numpy(X)
X = dask_cudf.from_cudf(X, npartitions=2)
enc = OneHotEncoder(sparse=False)
skohe = SkOneHotEncoder(sparse=False)
ohe = enc.fit_transform(X)
ref = skohe.fit_transform(skX)
cp.testing.assert_array_equal(ohe.compute(), ref)
@pytest.mark.mg
@pytest.mark.parametrize(
"drop", [None, "first", {"g": Series("F"), "i": Series(3)}]
)
def test_onehot_inverse_transform(client, drop):
df = DataFrame({"g": ["M", "F", "F"], "i": [1, 3, 2]})
X = dask_cudf.from_cudf(df, npartitions=2)
enc = OneHotEncoder(drop=drop)
ohe = enc.fit_transform(X)
inv = enc.inverse_transform(ohe)
assert_frame_equal(
inv.compute().to_pandas().reset_index(drop=True), df.to_pandas()
)
@pytest.mark.mg
def test_onehot_categories(client):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
X = dask_cudf.from_cudf(X, npartitions=2)
cats = DataFrame({"chars": ["a", "b", "c"], "int": [0, 1, 2]})
enc = OneHotEncoder(categories=cats, sparse=False)
ref = cp.array(
[[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 1.0]]
)
res = enc.fit_transform(X)
cp.testing.assert_array_equal(res.compute(), ref)
@pytest.mark.mg
def test_onehot_fit_handle_unknown(client):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
Y = DataFrame({"chars": ["c", "b"], "int": [0, 2]})
X = dask_cudf.from_cudf(X, npartitions=2)
enc = OneHotEncoder(handle_unknown="error", categories=Y)
with pytest.raises(KeyError):
enc.fit(X)
enc = OneHotEncoder(handle_unknown="ignore", categories=Y)
enc.fit(X)
@pytest.mark.mg
def test_onehot_transform_handle_unknown(client):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
Y = DataFrame({"chars": ["c", "b"], "int": [0, 2]})
X = dask_cudf.from_cudf(X, npartitions=2)
Y = dask_cudf.from_cudf(Y, npartitions=2)
enc = OneHotEncoder(handle_unknown="error", sparse=False)
enc = enc.fit(X)
with pytest.raises(KeyError):
enc.transform(Y).compute()
enc = OneHotEncoder(handle_unknown="ignore", sparse=False)
enc = enc.fit(X)
ohe = enc.transform(Y)
ref = cp.array([[0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]])
cp.testing.assert_array_equal(ohe.compute(), ref)
@pytest.mark.mg
def test_onehot_inverse_transform_handle_unknown(client):
X = DataFrame({"chars": ["a", "b"], "int": [0, 2]})
X = dask_cudf.from_cudf(X, npartitions=2)
Y_ohe = cp.array([[0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]])
Y_ohe = da.from_array(Y_ohe)
enc = OneHotEncoder(handle_unknown="ignore")
enc = enc.fit(X)
df = enc.inverse_transform(Y_ohe)
ref = DataFrame({"chars": [None, "b"], "int": [0, 2]})
assert_frame_equal(df.compute().to_pandas(), ref.to_pandas())
@pytest.mark.mg
@pytest.mark.parametrize("drop", [None, "first"])
@pytest.mark.parametrize("as_array", [True, False], ids=["cupy", "cudf"])
@pytest.mark.parametrize("sparse", [True, False], ids=["sparse", "dense"])
@pytest.mark.parametrize("n_samples", [10, 1000, stress_param(50000)])
def test_onehot_random_inputs(client, drop, as_array, sparse, n_samples):
X, ary = generate_inputs_from_categories(
n_samples=n_samples, as_array=as_array
)
if as_array:
dX = da.from_array(X)
else:
dX = dask_cudf.from_cudf(X, npartitions=1)
enc = OneHotEncoder(sparse=sparse, drop=drop, categories="auto")
sk_enc = SkOneHotEncoder(sparse=sparse, drop=drop, categories="auto")
ohe = enc.fit_transform(dX)
ref = sk_enc.fit_transform(ary)
if sparse:
cp.testing.assert_array_equal(ohe.compute().toarray(), ref.toarray())
else:
cp.testing.assert_array_equal(ohe.compute(), ref)
inv_ohe = enc.inverse_transform(ohe)
assert_inverse_equal(inv_ohe.compute(), dX.compute())
@pytest.mark.mg
def test_onehot_drop_idx_first(client):
X_ary = [["c", 2, "a"], ["b", 2, "b"]]
X = DataFrame({"chars": ["c", "b"], "int": [2, 2], "letters": ["a", "b"]})
ddf = dask_cudf.from_cudf(X, npartitions=2)
enc = OneHotEncoder(sparse=False, drop="first")
sk_enc = SkOneHotEncoder(sparse=False, drop="first")
ohe = enc.fit_transform(ddf)
ref = sk_enc.fit_transform(X_ary)
cp.testing.assert_array_equal(ohe.compute(), ref)
inv = enc.inverse_transform(ohe)
assert_frame_equal(
inv.compute().to_pandas().reset_index(drop=True), X.to_pandas()
)
@pytest.mark.mg
def test_onehot_drop_one_of_each(client):
X_ary = [["c", 2, "a"], ["b", 2, "b"]]
X = DataFrame({"chars": ["c", "b"], "int": [2, 2], "letters": ["a", "b"]})
ddf = dask_cudf.from_cudf(X, npartitions=2)
drop = dict({"chars": "b", "int": 2, "letters": "b"})
enc = OneHotEncoder(sparse=False, drop=drop)
sk_enc = SkOneHotEncoder(sparse=False, drop=["b", 2, "b"])
ohe = enc.fit_transform(ddf)
ref = sk_enc.fit_transform(X_ary)
cp.testing.assert_array_equal(ohe.compute(), ref)
inv = enc.inverse_transform(ohe)
assert_frame_equal(
inv.compute().to_pandas().reset_index(drop=True), X.to_pandas()
)
@pytest.mark.mg
@pytest.mark.parametrize(
"drop, pattern",
[
[dict({"chars": "b"}), "`drop` should have as many columns"],
[
dict({"chars": "b", "int": [2, 0]}),
"Trying to drop multiple values",
],
[
dict({"chars": "b", "int": 3}),
"Some categories [a-zA-Z, ]* were not found",
],
[
DataFrame({"chars": "b", "int": 3}),
"Wrong input for parameter `drop`.",
],
],
)
def test_onehot_drop_exceptions(client, drop, pattern):
X = DataFrame({"chars": ["c", "b", "d"], "int": [2, 1, 0]})
X = dask_cudf.from_cudf(X, npartitions=2)
with pytest.raises(ValueError, match=pattern):
OneHotEncoder(sparse=False, drop=drop).fit(X)
@pytest.mark.mg
def test_onehot_get_categories(client):
X = DataFrame({"chars": ["c", "b", "d"], "ints": [2, 1, 0]})
X = dask_cudf.from_cudf(X, npartitions=2)
ref = [np.array(["b", "c", "d"]), np.array([0, 1, 2])]
enc = OneHotEncoder().fit(X)
cats = enc.categories_
for i in range(len(ref)):
np.testing.assert_array_equal(ref[i], cats[i].to_numpy())
| 0 |
rapidsai_public_repos/cuml/python/cuml/tests | rapidsai_public_repos/cuml/python/cuml/tests/dask/test_dask_linear_regression.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
import pytest
from cuml.dask.common import utils as dask_utils
from sklearn.metrics import mean_squared_error
from sklearn.datasets import make_regression
from cuml.internals.safe_imports import cpu_only_import
pd = cpu_only_import("pandas")
np = cpu_only_import("numpy")
dask_cudf = gpu_only_import("dask_cudf")
cudf = gpu_only_import("cudf")
pytestmark = pytest.mark.mg
def _prep_training_data(c, X_train, y_train, partitions_per_worker):
workers = c.has_what().keys()
n_partitions = partitions_per_worker * len(workers)
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X_train))
X_train_df = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
y_cudf = np.array(pd.DataFrame(y_train).values)
y_cudf = y_cudf[:, 0]
y_cudf = cudf.Series(y_cudf)
y_train_df = dask_cudf.from_cudf(y_cudf, npartitions=n_partitions)
X_train_df, y_train_df = dask_utils.persist_across_workers(
c, [X_train_df, y_train_df], workers=workers
)
return X_train_df, y_train_df
def make_regression_dataset(datatype, nrows, ncols, n_info):
X, y = make_regression(
n_samples=nrows, n_features=ncols, n_informative=5, random_state=0
)
X = X.astype(datatype)
y = y.astype(datatype)
return X, y
@pytest.mark.mg
@pytest.mark.parametrize("nrows", [1e5])
@pytest.mark.parametrize("ncols", [20])
@pytest.mark.parametrize("n_parts", [2, 23])
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("normalize", [False])
@pytest.mark.parametrize("datatype", [np.float32, np.float64])
@pytest.mark.parametrize("delayed", [True, False])
def test_ols(
nrows, ncols, n_parts, fit_intercept, normalize, datatype, delayed, client
):
def imp():
import cuml.comm.serialize # NOQA
client.run(imp)
from cuml.dask.linear_model import LinearRegression as cumlOLS_dask
n_info = 5
nrows = int(nrows)
ncols = int(ncols)
X, y = make_regression_dataset(datatype, nrows, ncols, n_info)
X_df, y_df = _prep_training_data(client, X, y, n_parts)
lr = cumlOLS_dask(fit_intercept=fit_intercept, normalize=normalize)
lr.fit(X_df, y_df)
ret = lr.predict(X_df, delayed=delayed)
error_cuml = mean_squared_error(y, ret.compute().to_pandas().values)
assert error_cuml < 1e-6
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.