Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/descr/species_distributions.rst +40 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_20news.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_california_housing.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_covtype.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_kddcup99.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_rcv1.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_samples_generator.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/__init__.py +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__init__.py +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__pycache__/__init__.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__init__.py +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__init__.py +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__pycache__/__init__.cpython-310.pyc +0 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_multilabel.txt +5 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_20news.py +143 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_arff_parser.py +284 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_covtype.py +55 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_kddcup99.py +89 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_lfw.py +229 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_olivetti_faces.py +26 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_openml.py +1618 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_rcv1.py +71 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_samples_generator.py +686 -0
- openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_svmlight_format.py +613 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_ops.h +28 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat_native.h +24 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_dirichlet_grad_ops.h +39 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_cuda_dispatch.h +23 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_compositeexplicitautograd_dispatch.h +26 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2_ops.h +50 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt_ops.h +50 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_native.h +25 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_scalar_native.h +21 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_native.h +23 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h +24 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear_ops.h +28 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_filled_intlist_compositeexplicitautograd_dispatch.h +24 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl.h +39 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_native.h +26 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_meta_dispatch.h +28 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors_native.h +21 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_meta_dispatch.h +25 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_cuda_dispatch.h +23 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits.h +39 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward_compositeexplicitautograd_dispatch.h +26 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_compositeexplicitautograd_dispatch.h +24 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h +28 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_ops.h +39 -0
- phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft_native.h +22 -0
openflamingo/lib/python3.10/site-packages/sklearn/datasets/descr/species_distributions.rst
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. _species_distribution_dataset:
|
| 2 |
+
|
| 3 |
+
Species distribution dataset
|
| 4 |
+
----------------------------
|
| 5 |
+
|
| 6 |
+
This dataset represents the geographic distribution of two species in Central and
|
| 7 |
+
South America. The two species are:
|
| 8 |
+
|
| 9 |
+
- `"Bradypus variegatus" <http://www.iucnredlist.org/details/3038/0>`_ ,
|
| 10 |
+
the Brown-throated Sloth.
|
| 11 |
+
|
| 12 |
+
- `"Microryzomys minutus" <http://www.iucnredlist.org/details/13408/0>`_ ,
|
| 13 |
+
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
|
| 14 |
+
Colombia, Ecuador, Peru, and Venezuela.
|
| 15 |
+
|
| 16 |
+
The dataset is not a typical dataset since a :class:`~sklearn.datasets.base.Bunch`
|
| 17 |
+
containing the attributes `data` and `target` is not returned. Instead, we have
|
| 18 |
+
information allowing to create a "density" map of the different species.
|
| 19 |
+
|
| 20 |
+
The grid for the map can be built using the attributes `x_left_lower_corner`,
|
| 21 |
+
`y_left_lower_corner`, `Nx`, `Ny` and `grid_size`, which respectively correspond
|
| 22 |
+
to the x and y coordinates of the lower left corner of the grid, the number of
|
| 23 |
+
points along the x- and y-axis and the size of the step on the grid.
|
| 24 |
+
|
| 25 |
+
The density at each location of the grid is contained in the `coverage` attribute.
|
| 26 |
+
|
| 27 |
+
Finally, the `train` and `test` attributes contain information regarding the location
|
| 28 |
+
of a species at a specific location.
|
| 29 |
+
|
| 30 |
+
The dataset is provided by Phillips et. al. (2006).
|
| 31 |
+
|
| 32 |
+
.. rubric:: References
|
| 33 |
+
|
| 34 |
+
* `"Maximum entropy modeling of species geographic distributions"
|
| 35 |
+
<http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips,
|
| 36 |
+
R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006.
|
| 37 |
+
|
| 38 |
+
.. rubric:: Examples
|
| 39 |
+
|
| 40 |
+
* :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py`
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (180 Bytes). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_20news.cpython-310.pyc
ADDED
|
Binary file (4.22 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_california_housing.cpython-310.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_covtype.cpython-310.pyc
ADDED
|
Binary file (2.03 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_kddcup99.cpython-310.pyc
ADDED
|
Binary file (2.68 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_rcv1.cpython-310.pyc
ADDED
|
Binary file (1.91 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/__pycache__/test_samples_generator.cpython-310.pyc
ADDED
|
Binary file (18.4 kB). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/__init__.py
ADDED
|
File without changes
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__init__.py
ADDED
|
File without changes
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (200 Bytes). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/__init__.py
ADDED
|
File without changes
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__init__.py
ADDED
|
File without changes
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (199 Bytes). View file
|
|
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/data/svmlight_multilabel.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# multilabel dataset in SVMlight format
|
| 2 |
+
1,0 2:2.5 10:-5.2 15:1.5
|
| 3 |
+
2 5:1.0 12:-3
|
| 4 |
+
2:3.5 11:26
|
| 5 |
+
1,2 20:27
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_20news.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test the 20news downloader, if the data is available,
|
| 2 |
+
or if specifically requested via environment variable
|
| 3 |
+
(e.g. for CI jobs)."""
|
| 4 |
+
|
| 5 |
+
from functools import partial
|
| 6 |
+
from unittest.mock import patch
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pytest
|
| 10 |
+
import scipy.sparse as sp
|
| 11 |
+
|
| 12 |
+
from sklearn.datasets.tests.test_common import (
|
| 13 |
+
check_as_frame,
|
| 14 |
+
check_pandas_dependency_message,
|
| 15 |
+
check_return_X_y,
|
| 16 |
+
)
|
| 17 |
+
from sklearn.preprocessing import normalize
|
| 18 |
+
from sklearn.utils._testing import assert_allclose_dense_sparse
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def test_20news(fetch_20newsgroups_fxt):
|
| 22 |
+
data = fetch_20newsgroups_fxt(subset="all", shuffle=False)
|
| 23 |
+
assert data.DESCR.startswith(".. _20newsgroups_dataset:")
|
| 24 |
+
|
| 25 |
+
# Extract a reduced dataset
|
| 26 |
+
data2cats = fetch_20newsgroups_fxt(
|
| 27 |
+
subset="all", categories=data.target_names[-1:-3:-1], shuffle=False
|
| 28 |
+
)
|
| 29 |
+
# Check that the ordering of the target_names is the same
|
| 30 |
+
# as the ordering in the full dataset
|
| 31 |
+
assert data2cats.target_names == data.target_names[-2:]
|
| 32 |
+
# Assert that we have only 0 and 1 as labels
|
| 33 |
+
assert np.unique(data2cats.target).tolist() == [0, 1]
|
| 34 |
+
|
| 35 |
+
# Check that the number of filenames is consistent with data/target
|
| 36 |
+
assert len(data2cats.filenames) == len(data2cats.target)
|
| 37 |
+
assert len(data2cats.filenames) == len(data2cats.data)
|
| 38 |
+
|
| 39 |
+
# Check that the first entry of the reduced dataset corresponds to
|
| 40 |
+
# the first entry of the corresponding category in the full dataset
|
| 41 |
+
entry1 = data2cats.data[0]
|
| 42 |
+
category = data2cats.target_names[data2cats.target[0]]
|
| 43 |
+
label = data.target_names.index(category)
|
| 44 |
+
entry2 = data.data[np.where(data.target == label)[0][0]]
|
| 45 |
+
assert entry1 == entry2
|
| 46 |
+
|
| 47 |
+
# check that return_X_y option
|
| 48 |
+
X, y = fetch_20newsgroups_fxt(subset="all", shuffle=False, return_X_y=True)
|
| 49 |
+
assert len(X) == len(data.data)
|
| 50 |
+
assert y.shape == data.target.shape
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def test_20news_length_consistency(fetch_20newsgroups_fxt):
|
| 54 |
+
"""Checks the length consistencies within the bunch
|
| 55 |
+
|
| 56 |
+
This is a non-regression test for a bug present in 0.16.1.
|
| 57 |
+
"""
|
| 58 |
+
# Extract the full dataset
|
| 59 |
+
data = fetch_20newsgroups_fxt(subset="all")
|
| 60 |
+
assert len(data["data"]) == len(data.data)
|
| 61 |
+
assert len(data["target"]) == len(data.target)
|
| 62 |
+
assert len(data["filenames"]) == len(data.filenames)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def test_20news_vectorized(fetch_20newsgroups_vectorized_fxt):
|
| 66 |
+
# test subset = train
|
| 67 |
+
bunch = fetch_20newsgroups_vectorized_fxt(subset="train")
|
| 68 |
+
assert sp.issparse(bunch.data) and bunch.data.format == "csr"
|
| 69 |
+
assert bunch.data.shape == (11314, 130107)
|
| 70 |
+
assert bunch.target.shape[0] == 11314
|
| 71 |
+
assert bunch.data.dtype == np.float64
|
| 72 |
+
assert bunch.DESCR.startswith(".. _20newsgroups_dataset:")
|
| 73 |
+
|
| 74 |
+
# test subset = test
|
| 75 |
+
bunch = fetch_20newsgroups_vectorized_fxt(subset="test")
|
| 76 |
+
assert sp.issparse(bunch.data) and bunch.data.format == "csr"
|
| 77 |
+
assert bunch.data.shape == (7532, 130107)
|
| 78 |
+
assert bunch.target.shape[0] == 7532
|
| 79 |
+
assert bunch.data.dtype == np.float64
|
| 80 |
+
assert bunch.DESCR.startswith(".. _20newsgroups_dataset:")
|
| 81 |
+
|
| 82 |
+
# test return_X_y option
|
| 83 |
+
fetch_func = partial(fetch_20newsgroups_vectorized_fxt, subset="test")
|
| 84 |
+
check_return_X_y(bunch, fetch_func)
|
| 85 |
+
|
| 86 |
+
# test subset = all
|
| 87 |
+
bunch = fetch_20newsgroups_vectorized_fxt(subset="all")
|
| 88 |
+
assert sp.issparse(bunch.data) and bunch.data.format == "csr"
|
| 89 |
+
assert bunch.data.shape == (11314 + 7532, 130107)
|
| 90 |
+
assert bunch.target.shape[0] == 11314 + 7532
|
| 91 |
+
assert bunch.data.dtype == np.float64
|
| 92 |
+
assert bunch.DESCR.startswith(".. _20newsgroups_dataset:")
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def test_20news_normalization(fetch_20newsgroups_vectorized_fxt):
|
| 96 |
+
X = fetch_20newsgroups_vectorized_fxt(normalize=False)
|
| 97 |
+
X_ = fetch_20newsgroups_vectorized_fxt(normalize=True)
|
| 98 |
+
X_norm = X_["data"][:100]
|
| 99 |
+
X = X["data"][:100]
|
| 100 |
+
|
| 101 |
+
assert_allclose_dense_sparse(X_norm, normalize(X))
|
| 102 |
+
assert np.allclose(np.linalg.norm(X_norm.todense(), axis=1), 1)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def test_20news_as_frame(fetch_20newsgroups_vectorized_fxt):
|
| 106 |
+
pd = pytest.importorskip("pandas")
|
| 107 |
+
|
| 108 |
+
bunch = fetch_20newsgroups_vectorized_fxt(as_frame=True)
|
| 109 |
+
check_as_frame(bunch, fetch_20newsgroups_vectorized_fxt)
|
| 110 |
+
|
| 111 |
+
frame = bunch.frame
|
| 112 |
+
assert frame.shape == (11314, 130108)
|
| 113 |
+
assert all([isinstance(col, pd.SparseDtype) for col in bunch.data.dtypes])
|
| 114 |
+
|
| 115 |
+
# Check a small subset of features
|
| 116 |
+
for expected_feature in [
|
| 117 |
+
"beginner",
|
| 118 |
+
"beginners",
|
| 119 |
+
"beginning",
|
| 120 |
+
"beginnings",
|
| 121 |
+
"begins",
|
| 122 |
+
"begley",
|
| 123 |
+
"begone",
|
| 124 |
+
]:
|
| 125 |
+
assert expected_feature in frame.keys()
|
| 126 |
+
assert "category_class" in frame.keys()
|
| 127 |
+
assert bunch.target.name == "category_class"
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def test_as_frame_no_pandas(fetch_20newsgroups_vectorized_fxt, hide_available_pandas):
|
| 131 |
+
check_pandas_dependency_message(fetch_20newsgroups_vectorized_fxt)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def test_outdated_pickle(fetch_20newsgroups_vectorized_fxt):
|
| 135 |
+
with patch("os.path.exists") as mock_is_exist:
|
| 136 |
+
with patch("joblib.load") as mock_load:
|
| 137 |
+
# mock that the dataset was cached
|
| 138 |
+
mock_is_exist.return_value = True
|
| 139 |
+
# mock that we have an outdated pickle with only X and y returned
|
| 140 |
+
mock_load.return_value = ("X", "y")
|
| 141 |
+
err_msg = "The cached dataset located in"
|
| 142 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 143 |
+
fetch_20newsgroups_vectorized_fxt(as_frame=True)
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_arff_parser.py
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import textwrap
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from sklearn.datasets._arff_parser import (
|
| 7 |
+
_liac_arff_parser,
|
| 8 |
+
_pandas_arff_parser,
|
| 9 |
+
_post_process_frame,
|
| 10 |
+
load_arff_from_gzip_file,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@pytest.mark.parametrize(
|
| 15 |
+
"feature_names, target_names",
|
| 16 |
+
[
|
| 17 |
+
(
|
| 18 |
+
[
|
| 19 |
+
"col_int_as_integer",
|
| 20 |
+
"col_int_as_numeric",
|
| 21 |
+
"col_float_as_real",
|
| 22 |
+
"col_float_as_numeric",
|
| 23 |
+
],
|
| 24 |
+
["col_categorical", "col_string"],
|
| 25 |
+
),
|
| 26 |
+
(
|
| 27 |
+
[
|
| 28 |
+
"col_int_as_integer",
|
| 29 |
+
"col_int_as_numeric",
|
| 30 |
+
"col_float_as_real",
|
| 31 |
+
"col_float_as_numeric",
|
| 32 |
+
],
|
| 33 |
+
["col_categorical"],
|
| 34 |
+
),
|
| 35 |
+
(
|
| 36 |
+
[
|
| 37 |
+
"col_int_as_integer",
|
| 38 |
+
"col_int_as_numeric",
|
| 39 |
+
"col_float_as_real",
|
| 40 |
+
"col_float_as_numeric",
|
| 41 |
+
],
|
| 42 |
+
[],
|
| 43 |
+
),
|
| 44 |
+
],
|
| 45 |
+
)
|
| 46 |
+
def test_post_process_frame(feature_names, target_names):
|
| 47 |
+
"""Check the behaviour of the post-processing function for splitting a dataframe."""
|
| 48 |
+
pd = pytest.importorskip("pandas")
|
| 49 |
+
|
| 50 |
+
X_original = pd.DataFrame(
|
| 51 |
+
{
|
| 52 |
+
"col_int_as_integer": [1, 2, 3],
|
| 53 |
+
"col_int_as_numeric": [1, 2, 3],
|
| 54 |
+
"col_float_as_real": [1.0, 2.0, 3.0],
|
| 55 |
+
"col_float_as_numeric": [1.0, 2.0, 3.0],
|
| 56 |
+
"col_categorical": ["a", "b", "c"],
|
| 57 |
+
"col_string": ["a", "b", "c"],
|
| 58 |
+
}
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
X, y = _post_process_frame(X_original, feature_names, target_names)
|
| 62 |
+
assert isinstance(X, pd.DataFrame)
|
| 63 |
+
if len(target_names) >= 2:
|
| 64 |
+
assert isinstance(y, pd.DataFrame)
|
| 65 |
+
elif len(target_names) == 1:
|
| 66 |
+
assert isinstance(y, pd.Series)
|
| 67 |
+
else:
|
| 68 |
+
assert y is None
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def test_load_arff_from_gzip_file_error_parser():
|
| 72 |
+
"""An error will be raised if the parser is not known."""
|
| 73 |
+
# None of the input parameters are required to be accurate since the check
|
| 74 |
+
# of the parser will be carried out first.
|
| 75 |
+
|
| 76 |
+
err_msg = "Unknown parser: 'xxx'. Should be 'liac-arff' or 'pandas'"
|
| 77 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 78 |
+
load_arff_from_gzip_file("xxx", "xxx", "xxx", "xxx", "xxx", "xxx")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@pytest.mark.parametrize("parser_func", [_liac_arff_parser, _pandas_arff_parser])
|
| 82 |
+
def test_pandas_arff_parser_strip_single_quotes(parser_func):
|
| 83 |
+
"""Check that we properly strip single quotes from the data."""
|
| 84 |
+
pd = pytest.importorskip("pandas")
|
| 85 |
+
|
| 86 |
+
arff_file = BytesIO(
|
| 87 |
+
textwrap.dedent(
|
| 88 |
+
"""
|
| 89 |
+
@relation 'toy'
|
| 90 |
+
@attribute 'cat_single_quote' {'A', 'B', 'C'}
|
| 91 |
+
@attribute 'str_single_quote' string
|
| 92 |
+
@attribute 'str_nested_quote' string
|
| 93 |
+
@attribute 'class' numeric
|
| 94 |
+
@data
|
| 95 |
+
'A','some text','\"expect double quotes\"',0
|
| 96 |
+
"""
|
| 97 |
+
).encode("utf-8")
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
columns_info = {
|
| 101 |
+
"cat_single_quote": {
|
| 102 |
+
"data_type": "nominal",
|
| 103 |
+
"name": "cat_single_quote",
|
| 104 |
+
},
|
| 105 |
+
"str_single_quote": {
|
| 106 |
+
"data_type": "string",
|
| 107 |
+
"name": "str_single_quote",
|
| 108 |
+
},
|
| 109 |
+
"str_nested_quote": {
|
| 110 |
+
"data_type": "string",
|
| 111 |
+
"name": "str_nested_quote",
|
| 112 |
+
},
|
| 113 |
+
"class": {
|
| 114 |
+
"data_type": "numeric",
|
| 115 |
+
"name": "class",
|
| 116 |
+
},
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
feature_names = [
|
| 120 |
+
"cat_single_quote",
|
| 121 |
+
"str_single_quote",
|
| 122 |
+
"str_nested_quote",
|
| 123 |
+
]
|
| 124 |
+
target_names = ["class"]
|
| 125 |
+
|
| 126 |
+
# We don't strip single quotes for string columns with the pandas parser.
|
| 127 |
+
expected_values = {
|
| 128 |
+
"cat_single_quote": "A",
|
| 129 |
+
"str_single_quote": (
|
| 130 |
+
"some text" if parser_func is _liac_arff_parser else "'some text'"
|
| 131 |
+
),
|
| 132 |
+
"str_nested_quote": (
|
| 133 |
+
'"expect double quotes"'
|
| 134 |
+
if parser_func is _liac_arff_parser
|
| 135 |
+
else "'\"expect double quotes\"'"
|
| 136 |
+
),
|
| 137 |
+
"class": 0,
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
_, _, frame, _ = parser_func(
|
| 141 |
+
arff_file,
|
| 142 |
+
output_arrays_type="pandas",
|
| 143 |
+
openml_columns_info=columns_info,
|
| 144 |
+
feature_names_to_select=feature_names,
|
| 145 |
+
target_names_to_select=target_names,
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
assert frame.columns.tolist() == feature_names + target_names
|
| 149 |
+
pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0))
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@pytest.mark.parametrize("parser_func", [_liac_arff_parser, _pandas_arff_parser])
|
| 153 |
+
def test_pandas_arff_parser_strip_double_quotes(parser_func):
|
| 154 |
+
"""Check that we properly strip double quotes from the data."""
|
| 155 |
+
pd = pytest.importorskip("pandas")
|
| 156 |
+
|
| 157 |
+
arff_file = BytesIO(
|
| 158 |
+
textwrap.dedent(
|
| 159 |
+
"""
|
| 160 |
+
@relation 'toy'
|
| 161 |
+
@attribute 'cat_double_quote' {"A", "B", "C"}
|
| 162 |
+
@attribute 'str_double_quote' string
|
| 163 |
+
@attribute 'str_nested_quote' string
|
| 164 |
+
@attribute 'class' numeric
|
| 165 |
+
@data
|
| 166 |
+
"A","some text","\'expect double quotes\'",0
|
| 167 |
+
"""
|
| 168 |
+
).encode("utf-8")
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
columns_info = {
|
| 172 |
+
"cat_double_quote": {
|
| 173 |
+
"data_type": "nominal",
|
| 174 |
+
"name": "cat_double_quote",
|
| 175 |
+
},
|
| 176 |
+
"str_double_quote": {
|
| 177 |
+
"data_type": "string",
|
| 178 |
+
"name": "str_double_quote",
|
| 179 |
+
},
|
| 180 |
+
"str_nested_quote": {
|
| 181 |
+
"data_type": "string",
|
| 182 |
+
"name": "str_nested_quote",
|
| 183 |
+
},
|
| 184 |
+
"class": {
|
| 185 |
+
"data_type": "numeric",
|
| 186 |
+
"name": "class",
|
| 187 |
+
},
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
feature_names = [
|
| 191 |
+
"cat_double_quote",
|
| 192 |
+
"str_double_quote",
|
| 193 |
+
"str_nested_quote",
|
| 194 |
+
]
|
| 195 |
+
target_names = ["class"]
|
| 196 |
+
|
| 197 |
+
expected_values = {
|
| 198 |
+
"cat_double_quote": "A",
|
| 199 |
+
"str_double_quote": "some text",
|
| 200 |
+
"str_nested_quote": "'expect double quotes'",
|
| 201 |
+
"class": 0,
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
_, _, frame, _ = parser_func(
|
| 205 |
+
arff_file,
|
| 206 |
+
output_arrays_type="pandas",
|
| 207 |
+
openml_columns_info=columns_info,
|
| 208 |
+
feature_names_to_select=feature_names,
|
| 209 |
+
target_names_to_select=target_names,
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
assert frame.columns.tolist() == feature_names + target_names
|
| 213 |
+
pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0))
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
@pytest.mark.parametrize(
|
| 217 |
+
"parser_func",
|
| 218 |
+
[
|
| 219 |
+
# internal quotes are not considered to follow the ARFF spec in LIAC ARFF
|
| 220 |
+
pytest.param(_liac_arff_parser, marks=pytest.mark.xfail),
|
| 221 |
+
_pandas_arff_parser,
|
| 222 |
+
],
|
| 223 |
+
)
|
| 224 |
+
def test_pandas_arff_parser_strip_no_quotes(parser_func):
|
| 225 |
+
"""Check that we properly parse with no quotes characters."""
|
| 226 |
+
pd = pytest.importorskip("pandas")
|
| 227 |
+
|
| 228 |
+
arff_file = BytesIO(
|
| 229 |
+
textwrap.dedent(
|
| 230 |
+
"""
|
| 231 |
+
@relation 'toy'
|
| 232 |
+
@attribute 'cat_without_quote' {A, B, C}
|
| 233 |
+
@attribute 'str_without_quote' string
|
| 234 |
+
@attribute 'str_internal_quote' string
|
| 235 |
+
@attribute 'class' numeric
|
| 236 |
+
@data
|
| 237 |
+
A,some text,'internal' quote,0
|
| 238 |
+
"""
|
| 239 |
+
).encode("utf-8")
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
columns_info = {
|
| 243 |
+
"cat_without_quote": {
|
| 244 |
+
"data_type": "nominal",
|
| 245 |
+
"name": "cat_without_quote",
|
| 246 |
+
},
|
| 247 |
+
"str_without_quote": {
|
| 248 |
+
"data_type": "string",
|
| 249 |
+
"name": "str_without_quote",
|
| 250 |
+
},
|
| 251 |
+
"str_internal_quote": {
|
| 252 |
+
"data_type": "string",
|
| 253 |
+
"name": "str_internal_quote",
|
| 254 |
+
},
|
| 255 |
+
"class": {
|
| 256 |
+
"data_type": "numeric",
|
| 257 |
+
"name": "class",
|
| 258 |
+
},
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
feature_names = [
|
| 262 |
+
"cat_without_quote",
|
| 263 |
+
"str_without_quote",
|
| 264 |
+
"str_internal_quote",
|
| 265 |
+
]
|
| 266 |
+
target_names = ["class"]
|
| 267 |
+
|
| 268 |
+
expected_values = {
|
| 269 |
+
"cat_without_quote": "A",
|
| 270 |
+
"str_without_quote": "some text",
|
| 271 |
+
"str_internal_quote": "'internal' quote",
|
| 272 |
+
"class": 0,
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
_, _, frame, _ = parser_func(
|
| 276 |
+
arff_file,
|
| 277 |
+
output_arrays_type="pandas",
|
| 278 |
+
openml_columns_info=columns_info,
|
| 279 |
+
feature_names_to_select=feature_names,
|
| 280 |
+
target_names_to_select=target_names,
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
assert frame.columns.tolist() == feature_names + target_names
|
| 284 |
+
pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0))
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_covtype.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test the covtype loader, if the data is available,
|
| 2 |
+
or if specifically requested via environment variable
|
| 3 |
+
(e.g. for CI jobs)."""
|
| 4 |
+
|
| 5 |
+
from functools import partial
|
| 6 |
+
|
| 7 |
+
import pytest
|
| 8 |
+
|
| 9 |
+
from sklearn.datasets.tests.test_common import check_return_X_y
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def test_fetch(fetch_covtype_fxt, global_random_seed):
|
| 13 |
+
data1 = fetch_covtype_fxt(shuffle=True, random_state=global_random_seed)
|
| 14 |
+
data2 = fetch_covtype_fxt(shuffle=True, random_state=global_random_seed + 1)
|
| 15 |
+
|
| 16 |
+
X1, X2 = data1["data"], data2["data"]
|
| 17 |
+
assert (581012, 54) == X1.shape
|
| 18 |
+
assert X1.shape == X2.shape
|
| 19 |
+
|
| 20 |
+
assert X1.sum() == X2.sum()
|
| 21 |
+
|
| 22 |
+
y1, y2 = data1["target"], data2["target"]
|
| 23 |
+
assert (X1.shape[0],) == y1.shape
|
| 24 |
+
assert (X1.shape[0],) == y2.shape
|
| 25 |
+
|
| 26 |
+
descr_prefix = ".. _covtype_dataset:"
|
| 27 |
+
assert data1.DESCR.startswith(descr_prefix)
|
| 28 |
+
assert data2.DESCR.startswith(descr_prefix)
|
| 29 |
+
|
| 30 |
+
# test return_X_y option
|
| 31 |
+
fetch_func = partial(fetch_covtype_fxt)
|
| 32 |
+
check_return_X_y(data1, fetch_func)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def test_fetch_asframe(fetch_covtype_fxt):
|
| 36 |
+
pytest.importorskip("pandas")
|
| 37 |
+
|
| 38 |
+
bunch = fetch_covtype_fxt(as_frame=True)
|
| 39 |
+
assert hasattr(bunch, "frame")
|
| 40 |
+
frame = bunch.frame
|
| 41 |
+
assert frame.shape == (581012, 55)
|
| 42 |
+
assert bunch.data.shape == (581012, 54)
|
| 43 |
+
assert bunch.target.shape == (581012,)
|
| 44 |
+
|
| 45 |
+
column_names = set(frame.columns)
|
| 46 |
+
|
| 47 |
+
# enumerated names are added correctly
|
| 48 |
+
assert set(f"Wilderness_Area_{i}" for i in range(4)) < column_names
|
| 49 |
+
assert set(f"Soil_Type_{i}" for i in range(40)) < column_names
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def test_pandas_dependency_message(fetch_covtype_fxt, hide_available_pandas):
|
| 53 |
+
expected_msg = "fetch_covtype with as_frame=True requires pandas"
|
| 54 |
+
with pytest.raises(ImportError, match=expected_msg):
|
| 55 |
+
fetch_covtype_fxt(as_frame=True)
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_kddcup99.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test kddcup99 loader, if the data is available,
|
| 2 |
+
or if specifically requested via environment variable
|
| 3 |
+
(e.g. for CI jobs).
|
| 4 |
+
|
| 5 |
+
Only 'percent10' mode is tested, as the full data
|
| 6 |
+
is too big to use in unit-testing.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from functools import partial
|
| 10 |
+
|
| 11 |
+
import pytest
|
| 12 |
+
|
| 13 |
+
from sklearn.datasets.tests.test_common import (
|
| 14 |
+
check_as_frame,
|
| 15 |
+
check_pandas_dependency_message,
|
| 16 |
+
check_return_X_y,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@pytest.mark.parametrize("as_frame", [True, False])
|
| 21 |
+
@pytest.mark.parametrize(
|
| 22 |
+
"subset, n_samples, n_features",
|
| 23 |
+
[
|
| 24 |
+
(None, 494021, 41),
|
| 25 |
+
("SA", 100655, 41),
|
| 26 |
+
("SF", 73237, 4),
|
| 27 |
+
("http", 58725, 3),
|
| 28 |
+
("smtp", 9571, 3),
|
| 29 |
+
],
|
| 30 |
+
)
|
| 31 |
+
def test_fetch_kddcup99_percent10(
|
| 32 |
+
fetch_kddcup99_fxt, as_frame, subset, n_samples, n_features
|
| 33 |
+
):
|
| 34 |
+
data = fetch_kddcup99_fxt(subset=subset, as_frame=as_frame)
|
| 35 |
+
assert data.data.shape == (n_samples, n_features)
|
| 36 |
+
assert data.target.shape == (n_samples,)
|
| 37 |
+
if as_frame:
|
| 38 |
+
assert data.frame.shape == (n_samples, n_features + 1)
|
| 39 |
+
assert data.DESCR.startswith(".. _kddcup99_dataset:")
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def test_fetch_kddcup99_return_X_y(fetch_kddcup99_fxt):
|
| 43 |
+
fetch_func = partial(fetch_kddcup99_fxt, subset="smtp")
|
| 44 |
+
data = fetch_func()
|
| 45 |
+
check_return_X_y(data, fetch_func)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def test_fetch_kddcup99_as_frame(fetch_kddcup99_fxt):
|
| 49 |
+
bunch = fetch_kddcup99_fxt()
|
| 50 |
+
check_as_frame(bunch, fetch_kddcup99_fxt)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def test_fetch_kddcup99_shuffle(fetch_kddcup99_fxt):
|
| 54 |
+
dataset = fetch_kddcup99_fxt(
|
| 55 |
+
random_state=0,
|
| 56 |
+
subset="SA",
|
| 57 |
+
percent10=True,
|
| 58 |
+
)
|
| 59 |
+
dataset_shuffled = fetch_kddcup99_fxt(
|
| 60 |
+
random_state=0,
|
| 61 |
+
subset="SA",
|
| 62 |
+
shuffle=True,
|
| 63 |
+
percent10=True,
|
| 64 |
+
)
|
| 65 |
+
assert set(dataset["target"]) == set(dataset_shuffled["target"])
|
| 66 |
+
assert dataset_shuffled.data.shape == dataset.data.shape
|
| 67 |
+
assert dataset_shuffled.target.shape == dataset.target.shape
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def test_pandas_dependency_message(fetch_kddcup99_fxt, hide_available_pandas):
|
| 71 |
+
check_pandas_dependency_message(fetch_kddcup99_fxt)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def test_corrupted_file_error_message(fetch_kddcup99_fxt, tmp_path):
|
| 75 |
+
"""Check that a nice error message is raised when cache is corrupted."""
|
| 76 |
+
kddcup99_dir = tmp_path / "kddcup99_10-py3"
|
| 77 |
+
kddcup99_dir.mkdir()
|
| 78 |
+
samples_path = kddcup99_dir / "samples"
|
| 79 |
+
|
| 80 |
+
with samples_path.open("wb") as f:
|
| 81 |
+
f.write(b"THIS IS CORRUPTED")
|
| 82 |
+
|
| 83 |
+
msg = (
|
| 84 |
+
"The cache for fetch_kddcup99 is invalid, please "
|
| 85 |
+
f"delete {str(kddcup99_dir)} and run the fetch_kddcup99 again"
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
with pytest.raises(OSError, match=msg):
|
| 89 |
+
fetch_kddcup99_fxt(data_home=str(tmp_path))
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_lfw.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This test for the LFW require medium-size data downloading and processing
|
| 2 |
+
|
| 3 |
+
If the data has not been already downloaded by running the examples,
|
| 4 |
+
the tests won't run (skipped).
|
| 5 |
+
|
| 6 |
+
If the test are run, the first execution will be long (typically a bit
|
| 7 |
+
more than a couple of minutes) but as the dataset loader is leveraging
|
| 8 |
+
joblib, successive runs will be fast (less than 200ms).
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import random
|
| 12 |
+
from functools import partial
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import pytest
|
| 16 |
+
|
| 17 |
+
from sklearn.datasets import fetch_lfw_pairs, fetch_lfw_people
|
| 18 |
+
from sklearn.datasets.tests.test_common import check_return_X_y
|
| 19 |
+
from sklearn.utils._testing import assert_array_equal
|
| 20 |
+
|
| 21 |
+
FAKE_NAMES = [
|
| 22 |
+
"Abdelatif_Smith",
|
| 23 |
+
"Abhati_Kepler",
|
| 24 |
+
"Camara_Alvaro",
|
| 25 |
+
"Chen_Dupont",
|
| 26 |
+
"John_Lee",
|
| 27 |
+
"Lin_Bauman",
|
| 28 |
+
"Onur_Lopez",
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@pytest.fixture(scope="module")
|
| 33 |
+
def mock_empty_data_home(tmp_path_factory):
|
| 34 |
+
data_dir = tmp_path_factory.mktemp("scikit_learn_empty_test")
|
| 35 |
+
|
| 36 |
+
yield data_dir
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@pytest.fixture(scope="module")
|
| 40 |
+
def mock_data_home(tmp_path_factory):
|
| 41 |
+
"""Test fixture run once and common to all tests of this module"""
|
| 42 |
+
Image = pytest.importorskip("PIL.Image")
|
| 43 |
+
|
| 44 |
+
data_dir = tmp_path_factory.mktemp("scikit_learn_lfw_test")
|
| 45 |
+
lfw_home = data_dir / "lfw_home"
|
| 46 |
+
lfw_home.mkdir(parents=True, exist_ok=True)
|
| 47 |
+
|
| 48 |
+
random_state = random.Random(42)
|
| 49 |
+
np_rng = np.random.RandomState(42)
|
| 50 |
+
|
| 51 |
+
# generate some random jpeg files for each person
|
| 52 |
+
counts = {}
|
| 53 |
+
for name in FAKE_NAMES:
|
| 54 |
+
folder_name = lfw_home / "lfw_funneled" / name
|
| 55 |
+
folder_name.mkdir(parents=True, exist_ok=True)
|
| 56 |
+
|
| 57 |
+
n_faces = np_rng.randint(1, 5)
|
| 58 |
+
counts[name] = n_faces
|
| 59 |
+
for i in range(n_faces):
|
| 60 |
+
file_path = folder_name / (name + "_%04d.jpg" % i)
|
| 61 |
+
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
|
| 62 |
+
img = Image.fromarray(uniface.astype(np.uint8))
|
| 63 |
+
img.save(file_path)
|
| 64 |
+
|
| 65 |
+
# add some random file pollution to test robustness
|
| 66 |
+
(lfw_home / "lfw_funneled" / ".test.swp").write_bytes(
|
| 67 |
+
b"Text file to be ignored by the dataset loader."
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
# generate some pairing metadata files using the same format as LFW
|
| 71 |
+
with open(lfw_home / "pairsDevTrain.txt", "wb") as f:
|
| 72 |
+
f.write(b"10\n")
|
| 73 |
+
more_than_two = [name for name, count in counts.items() if count >= 2]
|
| 74 |
+
for i in range(5):
|
| 75 |
+
name = random_state.choice(more_than_two)
|
| 76 |
+
first, second = random_state.sample(range(counts[name]), 2)
|
| 77 |
+
f.write(("%s\t%d\t%d\n" % (name, first, second)).encode())
|
| 78 |
+
|
| 79 |
+
for i in range(5):
|
| 80 |
+
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
|
| 81 |
+
first_index = np_rng.choice(np.arange(counts[first_name]))
|
| 82 |
+
second_index = np_rng.choice(np.arange(counts[second_name]))
|
| 83 |
+
f.write(
|
| 84 |
+
(
|
| 85 |
+
"%s\t%d\t%s\t%d\n"
|
| 86 |
+
% (first_name, first_index, second_name, second_index)
|
| 87 |
+
).encode()
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
(lfw_home / "pairsDevTest.txt").write_bytes(
|
| 91 |
+
b"Fake place holder that won't be tested"
|
| 92 |
+
)
|
| 93 |
+
(lfw_home / "pairs.txt").write_bytes(b"Fake place holder that won't be tested")
|
| 94 |
+
|
| 95 |
+
yield data_dir
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def test_load_empty_lfw_people(mock_empty_data_home):
|
| 99 |
+
with pytest.raises(OSError):
|
| 100 |
+
fetch_lfw_people(data_home=mock_empty_data_home, download_if_missing=False)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def test_load_fake_lfw_people(mock_data_home):
|
| 104 |
+
lfw_people = fetch_lfw_people(
|
| 105 |
+
data_home=mock_data_home, min_faces_per_person=3, download_if_missing=False
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
# The data is croped around the center as a rectangular bounding box
|
| 109 |
+
# around the face. Colors are converted to gray levels:
|
| 110 |
+
assert lfw_people.images.shape == (10, 62, 47)
|
| 111 |
+
assert lfw_people.data.shape == (10, 2914)
|
| 112 |
+
|
| 113 |
+
# the target is array of person integer ids
|
| 114 |
+
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
|
| 115 |
+
|
| 116 |
+
# names of the persons can be found using the target_names array
|
| 117 |
+
expected_classes = ["Abdelatif Smith", "Abhati Kepler", "Onur Lopez"]
|
| 118 |
+
assert_array_equal(lfw_people.target_names, expected_classes)
|
| 119 |
+
|
| 120 |
+
# It is possible to ask for the original data without any croping or color
|
| 121 |
+
# conversion and not limit on the number of picture per person
|
| 122 |
+
lfw_people = fetch_lfw_people(
|
| 123 |
+
data_home=mock_data_home,
|
| 124 |
+
resize=None,
|
| 125 |
+
slice_=None,
|
| 126 |
+
color=True,
|
| 127 |
+
download_if_missing=False,
|
| 128 |
+
)
|
| 129 |
+
assert lfw_people.images.shape == (17, 250, 250, 3)
|
| 130 |
+
assert lfw_people.DESCR.startswith(".. _labeled_faces_in_the_wild_dataset:")
|
| 131 |
+
|
| 132 |
+
# the ids and class names are the same as previously
|
| 133 |
+
assert_array_equal(
|
| 134 |
+
lfw_people.target, [0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2]
|
| 135 |
+
)
|
| 136 |
+
assert_array_equal(
|
| 137 |
+
lfw_people.target_names,
|
| 138 |
+
[
|
| 139 |
+
"Abdelatif Smith",
|
| 140 |
+
"Abhati Kepler",
|
| 141 |
+
"Camara Alvaro",
|
| 142 |
+
"Chen Dupont",
|
| 143 |
+
"John Lee",
|
| 144 |
+
"Lin Bauman",
|
| 145 |
+
"Onur Lopez",
|
| 146 |
+
],
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
# test return_X_y option
|
| 150 |
+
fetch_func = partial(
|
| 151 |
+
fetch_lfw_people,
|
| 152 |
+
data_home=mock_data_home,
|
| 153 |
+
resize=None,
|
| 154 |
+
slice_=None,
|
| 155 |
+
color=True,
|
| 156 |
+
download_if_missing=False,
|
| 157 |
+
)
|
| 158 |
+
check_return_X_y(lfw_people, fetch_func)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def test_load_fake_lfw_people_too_restrictive(mock_data_home):
|
| 162 |
+
with pytest.raises(ValueError):
|
| 163 |
+
fetch_lfw_people(
|
| 164 |
+
data_home=mock_data_home,
|
| 165 |
+
min_faces_per_person=100,
|
| 166 |
+
download_if_missing=False,
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def test_load_empty_lfw_pairs(mock_empty_data_home):
|
| 171 |
+
with pytest.raises(OSError):
|
| 172 |
+
fetch_lfw_pairs(data_home=mock_empty_data_home, download_if_missing=False)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def test_load_fake_lfw_pairs(mock_data_home):
|
| 176 |
+
lfw_pairs_train = fetch_lfw_pairs(
|
| 177 |
+
data_home=mock_data_home, download_if_missing=False
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
# The data is croped around the center as a rectangular bounding box
|
| 181 |
+
# around the face. Colors are converted to gray levels:
|
| 182 |
+
assert lfw_pairs_train.pairs.shape == (10, 2, 62, 47)
|
| 183 |
+
|
| 184 |
+
# the target is whether the person is the same or not
|
| 185 |
+
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
|
| 186 |
+
|
| 187 |
+
# names of the persons can be found using the target_names array
|
| 188 |
+
expected_classes = ["Different persons", "Same person"]
|
| 189 |
+
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
| 190 |
+
|
| 191 |
+
# It is possible to ask for the original data without any croping or color
|
| 192 |
+
# conversion
|
| 193 |
+
lfw_pairs_train = fetch_lfw_pairs(
|
| 194 |
+
data_home=mock_data_home,
|
| 195 |
+
resize=None,
|
| 196 |
+
slice_=None,
|
| 197 |
+
color=True,
|
| 198 |
+
download_if_missing=False,
|
| 199 |
+
)
|
| 200 |
+
assert lfw_pairs_train.pairs.shape == (10, 2, 250, 250, 3)
|
| 201 |
+
|
| 202 |
+
# the ids and class names are the same as previously
|
| 203 |
+
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
|
| 204 |
+
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
| 205 |
+
|
| 206 |
+
assert lfw_pairs_train.DESCR.startswith(".. _labeled_faces_in_the_wild_dataset:")
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def test_fetch_lfw_people_internal_cropping(mock_data_home):
|
| 210 |
+
"""Check that we properly crop the images.
|
| 211 |
+
|
| 212 |
+
Non-regression test for:
|
| 213 |
+
https://github.com/scikit-learn/scikit-learn/issues/24942
|
| 214 |
+
"""
|
| 215 |
+
# If cropping was not done properly and we don't resize the images, the images would
|
| 216 |
+
# have their original size (250x250) and the image would not fit in the NumPy array
|
| 217 |
+
# pre-allocated based on `slice_` parameter.
|
| 218 |
+
slice_ = (slice(70, 195), slice(78, 172))
|
| 219 |
+
lfw = fetch_lfw_people(
|
| 220 |
+
data_home=mock_data_home,
|
| 221 |
+
min_faces_per_person=3,
|
| 222 |
+
download_if_missing=False,
|
| 223 |
+
resize=None,
|
| 224 |
+
slice_=slice_,
|
| 225 |
+
)
|
| 226 |
+
assert lfw.images[0].shape == (
|
| 227 |
+
slice_[0].stop - slice_[0].start,
|
| 228 |
+
slice_[1].stop - slice_[1].start,
|
| 229 |
+
)
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_olivetti_faces.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test Olivetti faces fetcher, if the data is available,
|
| 2 |
+
or if specifically requested via environment variable
|
| 3 |
+
(e.g. for CI jobs)."""
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from sklearn.datasets.tests.test_common import check_return_X_y
|
| 8 |
+
from sklearn.utils import Bunch
|
| 9 |
+
from sklearn.utils._testing import assert_array_equal
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def test_olivetti_faces(fetch_olivetti_faces_fxt):
|
| 13 |
+
data = fetch_olivetti_faces_fxt(shuffle=True, random_state=0)
|
| 14 |
+
|
| 15 |
+
assert isinstance(data, Bunch)
|
| 16 |
+
for expected_keys in ("data", "images", "target", "DESCR"):
|
| 17 |
+
assert expected_keys in data.keys()
|
| 18 |
+
|
| 19 |
+
assert data.data.shape == (400, 4096)
|
| 20 |
+
assert data.images.shape == (400, 64, 64)
|
| 21 |
+
assert data.target.shape == (400,)
|
| 22 |
+
assert_array_equal(np.unique(np.sort(data.target)), np.arange(40))
|
| 23 |
+
assert data.DESCR.startswith(".. _olivetti_faces_dataset:")
|
| 24 |
+
|
| 25 |
+
# test the return_X_y option
|
| 26 |
+
check_return_X_y(data, fetch_olivetti_faces_fxt)
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_openml.py
ADDED
|
@@ -0,0 +1,1618 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test the openml loader."""
|
| 2 |
+
|
| 3 |
+
import gzip
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
import re
|
| 7 |
+
from functools import partial
|
| 8 |
+
from importlib import resources
|
| 9 |
+
from io import BytesIO
|
| 10 |
+
from urllib.error import HTTPError
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
import pytest
|
| 14 |
+
import scipy.sparse
|
| 15 |
+
|
| 16 |
+
import sklearn
|
| 17 |
+
from sklearn import config_context
|
| 18 |
+
from sklearn.datasets import fetch_openml as fetch_openml_orig
|
| 19 |
+
from sklearn.datasets._openml import (
|
| 20 |
+
_OPENML_PREFIX,
|
| 21 |
+
_get_local_path,
|
| 22 |
+
_open_openml_url,
|
| 23 |
+
_retry_with_clean_cache,
|
| 24 |
+
)
|
| 25 |
+
from sklearn.utils import Bunch
|
| 26 |
+
from sklearn.utils._optional_dependencies import check_pandas_support
|
| 27 |
+
from sklearn.utils._testing import (
|
| 28 |
+
SkipTest,
|
| 29 |
+
assert_allclose,
|
| 30 |
+
assert_array_equal,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
OPENML_TEST_DATA_MODULE = "sklearn.datasets.tests.data.openml"
|
| 34 |
+
# if True, urlopen will be monkey patched to only use local files
|
| 35 |
+
test_offline = True
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class _MockHTTPResponse:
|
| 39 |
+
def __init__(self, data, is_gzip):
|
| 40 |
+
self.data = data
|
| 41 |
+
self.is_gzip = is_gzip
|
| 42 |
+
|
| 43 |
+
def read(self, amt=-1):
|
| 44 |
+
return self.data.read(amt)
|
| 45 |
+
|
| 46 |
+
def close(self):
|
| 47 |
+
self.data.close()
|
| 48 |
+
|
| 49 |
+
def info(self):
|
| 50 |
+
if self.is_gzip:
|
| 51 |
+
return {"Content-Encoding": "gzip"}
|
| 52 |
+
return {}
|
| 53 |
+
|
| 54 |
+
def __iter__(self):
|
| 55 |
+
return iter(self.data)
|
| 56 |
+
|
| 57 |
+
def __enter__(self):
|
| 58 |
+
return self
|
| 59 |
+
|
| 60 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 61 |
+
return False
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# Disable the disk-based cache when testing `fetch_openml`:
|
| 65 |
+
# the mock data in sklearn/datasets/tests/data/openml/ is not always consistent
|
| 66 |
+
# with the version on openml.org. If one were to load the dataset outside of
|
| 67 |
+
# the tests, it may result in data that does not represent openml.org.
|
| 68 |
+
fetch_openml = partial(fetch_openml_orig, data_home=None)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _monkey_patch_webbased_functions(context, data_id, gzip_response):
|
| 72 |
+
# monkey patches the urlopen function. Important note: Do NOT use this
|
| 73 |
+
# in combination with a regular cache directory, as the files that are
|
| 74 |
+
# stored as cache should not be mixed up with real openml datasets
|
| 75 |
+
url_prefix_data_description = "https://api.openml.org/api/v1/json/data/"
|
| 76 |
+
url_prefix_data_features = "https://api.openml.org/api/v1/json/data/features/"
|
| 77 |
+
url_prefix_download_data = "https://api.openml.org/data/v1/"
|
| 78 |
+
url_prefix_data_list = "https://api.openml.org/api/v1/json/data/list/"
|
| 79 |
+
|
| 80 |
+
path_suffix = ".gz"
|
| 81 |
+
read_fn = gzip.open
|
| 82 |
+
|
| 83 |
+
data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}"
|
| 84 |
+
|
| 85 |
+
def _file_name(url, suffix):
|
| 86 |
+
output = (
|
| 87 |
+
re.sub(r"\W", "-", url[len("https://api.openml.org/") :])
|
| 88 |
+
+ suffix
|
| 89 |
+
+ path_suffix
|
| 90 |
+
)
|
| 91 |
+
# Shorten the filenames to have better compatibility with windows 10
|
| 92 |
+
# and filenames > 260 characters
|
| 93 |
+
return (
|
| 94 |
+
output.replace("-json-data-list", "-jdl")
|
| 95 |
+
.replace("-json-data-features", "-jdf")
|
| 96 |
+
.replace("-json-data-qualities", "-jdq")
|
| 97 |
+
.replace("-json-data", "-jd")
|
| 98 |
+
.replace("-data_name", "-dn")
|
| 99 |
+
.replace("-download", "-dl")
|
| 100 |
+
.replace("-limit", "-l")
|
| 101 |
+
.replace("-data_version", "-dv")
|
| 102 |
+
.replace("-status", "-s")
|
| 103 |
+
.replace("-deactivated", "-dact")
|
| 104 |
+
.replace("-active", "-act")
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
|
| 108 |
+
assert url.startswith(expected_prefix)
|
| 109 |
+
|
| 110 |
+
data_file_name = _file_name(url, suffix)
|
| 111 |
+
data_file_path = resources.files(data_module) / data_file_name
|
| 112 |
+
|
| 113 |
+
with data_file_path.open("rb") as f:
|
| 114 |
+
if has_gzip_header and gzip_response:
|
| 115 |
+
fp = BytesIO(f.read())
|
| 116 |
+
return _MockHTTPResponse(fp, True)
|
| 117 |
+
else:
|
| 118 |
+
decompressed_f = read_fn(f, "rb")
|
| 119 |
+
fp = BytesIO(decompressed_f.read())
|
| 120 |
+
return _MockHTTPResponse(fp, False)
|
| 121 |
+
|
| 122 |
+
def _mock_urlopen_data_description(url, has_gzip_header):
|
| 123 |
+
return _mock_urlopen_shared(
|
| 124 |
+
url=url,
|
| 125 |
+
has_gzip_header=has_gzip_header,
|
| 126 |
+
expected_prefix=url_prefix_data_description,
|
| 127 |
+
suffix=".json",
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
def _mock_urlopen_data_features(url, has_gzip_header):
|
| 131 |
+
return _mock_urlopen_shared(
|
| 132 |
+
url=url,
|
| 133 |
+
has_gzip_header=has_gzip_header,
|
| 134 |
+
expected_prefix=url_prefix_data_features,
|
| 135 |
+
suffix=".json",
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
def _mock_urlopen_download_data(url, has_gzip_header):
|
| 139 |
+
return _mock_urlopen_shared(
|
| 140 |
+
url=url,
|
| 141 |
+
has_gzip_header=has_gzip_header,
|
| 142 |
+
expected_prefix=url_prefix_download_data,
|
| 143 |
+
suffix=".arff",
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
def _mock_urlopen_data_list(url, has_gzip_header):
|
| 147 |
+
assert url.startswith(url_prefix_data_list)
|
| 148 |
+
|
| 149 |
+
data_file_name = _file_name(url, ".json")
|
| 150 |
+
data_file_path = resources.files(data_module) / data_file_name
|
| 151 |
+
|
| 152 |
+
# load the file itself, to simulate a http error
|
| 153 |
+
with data_file_path.open("rb") as f:
|
| 154 |
+
decompressed_f = read_fn(f, "rb")
|
| 155 |
+
decoded_s = decompressed_f.read().decode("utf-8")
|
| 156 |
+
json_data = json.loads(decoded_s)
|
| 157 |
+
if "error" in json_data:
|
| 158 |
+
raise HTTPError(
|
| 159 |
+
url=None, code=412, msg="Simulated mock error", hdrs=None, fp=BytesIO()
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
with data_file_path.open("rb") as f:
|
| 163 |
+
if has_gzip_header:
|
| 164 |
+
fp = BytesIO(f.read())
|
| 165 |
+
return _MockHTTPResponse(fp, True)
|
| 166 |
+
else:
|
| 167 |
+
decompressed_f = read_fn(f, "rb")
|
| 168 |
+
fp = BytesIO(decompressed_f.read())
|
| 169 |
+
return _MockHTTPResponse(fp, False)
|
| 170 |
+
|
| 171 |
+
def _mock_urlopen(request, *args, **kwargs):
|
| 172 |
+
url = request.get_full_url()
|
| 173 |
+
has_gzip_header = request.get_header("Accept-encoding") == "gzip"
|
| 174 |
+
if url.startswith(url_prefix_data_list):
|
| 175 |
+
return _mock_urlopen_data_list(url, has_gzip_header)
|
| 176 |
+
elif url.startswith(url_prefix_data_features):
|
| 177 |
+
return _mock_urlopen_data_features(url, has_gzip_header)
|
| 178 |
+
elif url.startswith(url_prefix_download_data):
|
| 179 |
+
return _mock_urlopen_download_data(url, has_gzip_header)
|
| 180 |
+
elif url.startswith(url_prefix_data_description):
|
| 181 |
+
return _mock_urlopen_data_description(url, has_gzip_header)
|
| 182 |
+
else:
|
| 183 |
+
raise ValueError("Unknown mocking URL pattern: %s" % url)
|
| 184 |
+
|
| 185 |
+
# XXX: Global variable
|
| 186 |
+
if test_offline:
|
| 187 |
+
context.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
###############################################################################
|
| 191 |
+
# Test the behaviour of `fetch_openml` depending of the input parameters.
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
@pytest.mark.parametrize(
|
| 195 |
+
"data_id, dataset_params, n_samples, n_features, n_targets",
|
| 196 |
+
[
|
| 197 |
+
# iris
|
| 198 |
+
(61, {"data_id": 61}, 150, 4, 1),
|
| 199 |
+
(61, {"name": "iris", "version": 1}, 150, 4, 1),
|
| 200 |
+
# anneal
|
| 201 |
+
(2, {"data_id": 2}, 11, 38, 1),
|
| 202 |
+
(2, {"name": "anneal", "version": 1}, 11, 38, 1),
|
| 203 |
+
# cpu
|
| 204 |
+
(561, {"data_id": 561}, 209, 7, 1),
|
| 205 |
+
(561, {"name": "cpu", "version": 1}, 209, 7, 1),
|
| 206 |
+
# emotions
|
| 207 |
+
(40589, {"data_id": 40589}, 13, 72, 6),
|
| 208 |
+
# adult-census
|
| 209 |
+
(1119, {"data_id": 1119}, 10, 14, 1),
|
| 210 |
+
(1119, {"name": "adult-census"}, 10, 14, 1),
|
| 211 |
+
# miceprotein
|
| 212 |
+
(40966, {"data_id": 40966}, 7, 77, 1),
|
| 213 |
+
(40966, {"name": "MiceProtein"}, 7, 77, 1),
|
| 214 |
+
# titanic
|
| 215 |
+
(40945, {"data_id": 40945}, 1309, 13, 1),
|
| 216 |
+
],
|
| 217 |
+
)
|
| 218 |
+
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
|
| 219 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 220 |
+
def test_fetch_openml_as_frame_true(
|
| 221 |
+
monkeypatch,
|
| 222 |
+
data_id,
|
| 223 |
+
dataset_params,
|
| 224 |
+
n_samples,
|
| 225 |
+
n_features,
|
| 226 |
+
n_targets,
|
| 227 |
+
parser,
|
| 228 |
+
gzip_response,
|
| 229 |
+
):
|
| 230 |
+
"""Check the behaviour of `fetch_openml` with `as_frame=True`.
|
| 231 |
+
|
| 232 |
+
Fetch by ID and/or name (depending if the file was previously cached).
|
| 233 |
+
"""
|
| 234 |
+
pd = pytest.importorskip("pandas")
|
| 235 |
+
|
| 236 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
|
| 237 |
+
bunch = fetch_openml(
|
| 238 |
+
as_frame=True,
|
| 239 |
+
cache=False,
|
| 240 |
+
parser=parser,
|
| 241 |
+
**dataset_params,
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
assert int(bunch.details["id"]) == data_id
|
| 245 |
+
assert isinstance(bunch, Bunch)
|
| 246 |
+
|
| 247 |
+
assert isinstance(bunch.frame, pd.DataFrame)
|
| 248 |
+
assert bunch.frame.shape == (n_samples, n_features + n_targets)
|
| 249 |
+
|
| 250 |
+
assert isinstance(bunch.data, pd.DataFrame)
|
| 251 |
+
assert bunch.data.shape == (n_samples, n_features)
|
| 252 |
+
|
| 253 |
+
if n_targets == 1:
|
| 254 |
+
assert isinstance(bunch.target, pd.Series)
|
| 255 |
+
assert bunch.target.shape == (n_samples,)
|
| 256 |
+
else:
|
| 257 |
+
assert isinstance(bunch.target, pd.DataFrame)
|
| 258 |
+
assert bunch.target.shape == (n_samples, n_targets)
|
| 259 |
+
|
| 260 |
+
assert bunch.categories is None
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
@pytest.mark.parametrize(
|
| 264 |
+
"data_id, dataset_params, n_samples, n_features, n_targets",
|
| 265 |
+
[
|
| 266 |
+
# iris
|
| 267 |
+
(61, {"data_id": 61}, 150, 4, 1),
|
| 268 |
+
(61, {"name": "iris", "version": 1}, 150, 4, 1),
|
| 269 |
+
# anneal
|
| 270 |
+
(2, {"data_id": 2}, 11, 38, 1),
|
| 271 |
+
(2, {"name": "anneal", "version": 1}, 11, 38, 1),
|
| 272 |
+
# cpu
|
| 273 |
+
(561, {"data_id": 561}, 209, 7, 1),
|
| 274 |
+
(561, {"name": "cpu", "version": 1}, 209, 7, 1),
|
| 275 |
+
# emotions
|
| 276 |
+
(40589, {"data_id": 40589}, 13, 72, 6),
|
| 277 |
+
# adult-census
|
| 278 |
+
(1119, {"data_id": 1119}, 10, 14, 1),
|
| 279 |
+
(1119, {"name": "adult-census"}, 10, 14, 1),
|
| 280 |
+
# miceprotein
|
| 281 |
+
(40966, {"data_id": 40966}, 7, 77, 1),
|
| 282 |
+
(40966, {"name": "MiceProtein"}, 7, 77, 1),
|
| 283 |
+
],
|
| 284 |
+
)
|
| 285 |
+
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
|
| 286 |
+
def test_fetch_openml_as_frame_false(
|
| 287 |
+
monkeypatch,
|
| 288 |
+
data_id,
|
| 289 |
+
dataset_params,
|
| 290 |
+
n_samples,
|
| 291 |
+
n_features,
|
| 292 |
+
n_targets,
|
| 293 |
+
parser,
|
| 294 |
+
):
|
| 295 |
+
"""Check the behaviour of `fetch_openml` with `as_frame=False`.
|
| 296 |
+
|
| 297 |
+
Fetch both by ID and/or name + version.
|
| 298 |
+
"""
|
| 299 |
+
pytest.importorskip("pandas")
|
| 300 |
+
|
| 301 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
|
| 302 |
+
bunch = fetch_openml(
|
| 303 |
+
as_frame=False,
|
| 304 |
+
cache=False,
|
| 305 |
+
parser=parser,
|
| 306 |
+
**dataset_params,
|
| 307 |
+
)
|
| 308 |
+
assert int(bunch.details["id"]) == data_id
|
| 309 |
+
assert isinstance(bunch, Bunch)
|
| 310 |
+
|
| 311 |
+
assert bunch.frame is None
|
| 312 |
+
|
| 313 |
+
assert isinstance(bunch.data, np.ndarray)
|
| 314 |
+
assert bunch.data.shape == (n_samples, n_features)
|
| 315 |
+
|
| 316 |
+
assert isinstance(bunch.target, np.ndarray)
|
| 317 |
+
if n_targets == 1:
|
| 318 |
+
assert bunch.target.shape == (n_samples,)
|
| 319 |
+
else:
|
| 320 |
+
assert bunch.target.shape == (n_samples, n_targets)
|
| 321 |
+
|
| 322 |
+
assert isinstance(bunch.categories, dict)
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
@pytest.mark.parametrize("data_id", [61, 1119, 40945])
|
| 326 |
+
def test_fetch_openml_consistency_parser(monkeypatch, data_id):
|
| 327 |
+
"""Check the consistency of the LIAC-ARFF and pandas parsers."""
|
| 328 |
+
pd = pytest.importorskip("pandas")
|
| 329 |
+
|
| 330 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
|
| 331 |
+
bunch_liac = fetch_openml(
|
| 332 |
+
data_id=data_id,
|
| 333 |
+
as_frame=True,
|
| 334 |
+
cache=False,
|
| 335 |
+
parser="liac-arff",
|
| 336 |
+
)
|
| 337 |
+
bunch_pandas = fetch_openml(
|
| 338 |
+
data_id=data_id,
|
| 339 |
+
as_frame=True,
|
| 340 |
+
cache=False,
|
| 341 |
+
parser="pandas",
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
# The data frames for the input features should match up to some numerical
|
| 345 |
+
# dtype conversions (e.g. float64 <=> Int64) due to limitations of the
|
| 346 |
+
# LIAC-ARFF parser.
|
| 347 |
+
data_liac, data_pandas = bunch_liac.data, bunch_pandas.data
|
| 348 |
+
|
| 349 |
+
def convert_numerical_dtypes(series):
|
| 350 |
+
pandas_series = data_pandas[series.name]
|
| 351 |
+
if pd.api.types.is_numeric_dtype(pandas_series):
|
| 352 |
+
return series.astype(pandas_series.dtype)
|
| 353 |
+
else:
|
| 354 |
+
return series
|
| 355 |
+
|
| 356 |
+
data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes)
|
| 357 |
+
pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas)
|
| 358 |
+
|
| 359 |
+
# Let's also check that the .frame attributes also match
|
| 360 |
+
frame_liac, frame_pandas = bunch_liac.frame, bunch_pandas.frame
|
| 361 |
+
|
| 362 |
+
# Note that the .frame attribute is a superset of the .data attribute:
|
| 363 |
+
pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas)
|
| 364 |
+
|
| 365 |
+
# However the remaining columns, typically the target(s), are not necessarily
|
| 366 |
+
# dtyped similarly by both parsers due to limitations of the LIAC-ARFF parser.
|
| 367 |
+
# Therefore, extra dtype conversions are required for those columns:
|
| 368 |
+
|
| 369 |
+
def convert_numerical_and_categorical_dtypes(series):
|
| 370 |
+
pandas_series = frame_pandas[series.name]
|
| 371 |
+
if pd.api.types.is_numeric_dtype(pandas_series):
|
| 372 |
+
return series.astype(pandas_series.dtype)
|
| 373 |
+
elif isinstance(pandas_series.dtype, pd.CategoricalDtype):
|
| 374 |
+
# Compare categorical features by converting categorical liac uses
|
| 375 |
+
# strings to denote the categories, we rename the categories to make
|
| 376 |
+
# them comparable to the pandas parser. Fixing this behavior in
|
| 377 |
+
# LIAC-ARFF would allow to check the consistency in the future but
|
| 378 |
+
# we do not plan to maintain the LIAC-ARFF on the long term.
|
| 379 |
+
return series.cat.rename_categories(pandas_series.cat.categories)
|
| 380 |
+
else:
|
| 381 |
+
return series
|
| 382 |
+
|
| 383 |
+
frame_liac_with_fixed_dtypes = frame_liac.apply(
|
| 384 |
+
convert_numerical_and_categorical_dtypes
|
| 385 |
+
)
|
| 386 |
+
pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas)
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
|
| 390 |
+
def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser):
|
| 391 |
+
"""Check the equivalence of the dataset when using `as_frame=False` and
|
| 392 |
+
`as_frame=True`.
|
| 393 |
+
"""
|
| 394 |
+
pytest.importorskip("pandas")
|
| 395 |
+
|
| 396 |
+
data_id = 61
|
| 397 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
|
| 398 |
+
bunch_as_frame_true = fetch_openml(
|
| 399 |
+
data_id=data_id,
|
| 400 |
+
as_frame=True,
|
| 401 |
+
cache=False,
|
| 402 |
+
parser=parser,
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
bunch_as_frame_false = fetch_openml(
|
| 406 |
+
data_id=data_id,
|
| 407 |
+
as_frame=False,
|
| 408 |
+
cache=False,
|
| 409 |
+
parser=parser,
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data)
|
| 413 |
+
assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target)
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
|
| 417 |
+
def test_fetch_openml_iris_pandas(monkeypatch, parser):
|
| 418 |
+
"""Check fetching on a numerical only dataset with string labels."""
|
| 419 |
+
pd = pytest.importorskip("pandas")
|
| 420 |
+
CategoricalDtype = pd.api.types.CategoricalDtype
|
| 421 |
+
data_id = 61
|
| 422 |
+
data_shape = (150, 4)
|
| 423 |
+
target_shape = (150,)
|
| 424 |
+
frame_shape = (150, 5)
|
| 425 |
+
|
| 426 |
+
target_dtype = CategoricalDtype(
|
| 427 |
+
["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
|
| 428 |
+
)
|
| 429 |
+
data_dtypes = [np.float64] * 4
|
| 430 |
+
data_names = ["sepallength", "sepalwidth", "petallength", "petalwidth"]
|
| 431 |
+
target_name = "class"
|
| 432 |
+
|
| 433 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
|
| 434 |
+
|
| 435 |
+
bunch = fetch_openml(
|
| 436 |
+
data_id=data_id,
|
| 437 |
+
as_frame=True,
|
| 438 |
+
cache=False,
|
| 439 |
+
parser=parser,
|
| 440 |
+
)
|
| 441 |
+
data = bunch.data
|
| 442 |
+
target = bunch.target
|
| 443 |
+
frame = bunch.frame
|
| 444 |
+
|
| 445 |
+
assert isinstance(data, pd.DataFrame)
|
| 446 |
+
assert np.all(data.dtypes == data_dtypes)
|
| 447 |
+
assert data.shape == data_shape
|
| 448 |
+
assert np.all(data.columns == data_names)
|
| 449 |
+
assert np.all(bunch.feature_names == data_names)
|
| 450 |
+
assert bunch.target_names == [target_name]
|
| 451 |
+
|
| 452 |
+
assert isinstance(target, pd.Series)
|
| 453 |
+
assert target.dtype == target_dtype
|
| 454 |
+
assert target.shape == target_shape
|
| 455 |
+
assert target.name == target_name
|
| 456 |
+
assert target.index.is_unique
|
| 457 |
+
|
| 458 |
+
assert isinstance(frame, pd.DataFrame)
|
| 459 |
+
assert frame.shape == frame_shape
|
| 460 |
+
assert np.all(frame.dtypes == data_dtypes + [target_dtype])
|
| 461 |
+
assert frame.index.is_unique
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
|
| 465 |
+
@pytest.mark.parametrize("target_column", ["petalwidth", ["petalwidth", "petallength"]])
|
| 466 |
+
def test_fetch_openml_forcing_targets(monkeypatch, parser, target_column):
|
| 467 |
+
"""Check that we can force the target to not be the default target."""
|
| 468 |
+
pd = pytest.importorskip("pandas")
|
| 469 |
+
|
| 470 |
+
data_id = 61
|
| 471 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
|
| 472 |
+
bunch_forcing_target = fetch_openml(
|
| 473 |
+
data_id=data_id,
|
| 474 |
+
as_frame=True,
|
| 475 |
+
cache=False,
|
| 476 |
+
target_column=target_column,
|
| 477 |
+
parser=parser,
|
| 478 |
+
)
|
| 479 |
+
bunch_default = fetch_openml(
|
| 480 |
+
data_id=data_id,
|
| 481 |
+
as_frame=True,
|
| 482 |
+
cache=False,
|
| 483 |
+
parser=parser,
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
pd.testing.assert_frame_equal(bunch_forcing_target.frame, bunch_default.frame)
|
| 487 |
+
if isinstance(target_column, list):
|
| 488 |
+
pd.testing.assert_index_equal(
|
| 489 |
+
bunch_forcing_target.target.columns, pd.Index(target_column)
|
| 490 |
+
)
|
| 491 |
+
assert bunch_forcing_target.data.shape == (150, 3)
|
| 492 |
+
else:
|
| 493 |
+
assert bunch_forcing_target.target.name == target_column
|
| 494 |
+
assert bunch_forcing_target.data.shape == (150, 4)
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
@pytest.mark.parametrize("data_id", [61, 2, 561, 40589, 1119])
|
| 498 |
+
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
|
| 499 |
+
def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser):
|
| 500 |
+
"""Check the behaviour of `return_X_y=True` when `as_frame=True`."""
|
| 501 |
+
pd = pytest.importorskip("pandas")
|
| 502 |
+
|
| 503 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
|
| 504 |
+
bunch = fetch_openml(
|
| 505 |
+
data_id=data_id,
|
| 506 |
+
as_frame=True,
|
| 507 |
+
cache=False,
|
| 508 |
+
return_X_y=False,
|
| 509 |
+
parser=parser,
|
| 510 |
+
)
|
| 511 |
+
X, y = fetch_openml(
|
| 512 |
+
data_id=data_id,
|
| 513 |
+
as_frame=True,
|
| 514 |
+
cache=False,
|
| 515 |
+
return_X_y=True,
|
| 516 |
+
parser=parser,
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
pd.testing.assert_frame_equal(bunch.data, X)
|
| 520 |
+
if isinstance(y, pd.Series):
|
| 521 |
+
pd.testing.assert_series_equal(bunch.target, y)
|
| 522 |
+
else:
|
| 523 |
+
pd.testing.assert_frame_equal(bunch.target, y)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
@pytest.mark.parametrize("data_id", [61, 561, 40589, 1119])
|
| 527 |
+
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
|
| 528 |
+
def test_fetch_openml_equivalence_array_return_X_y(monkeypatch, data_id, parser):
|
| 529 |
+
"""Check the behaviour of `return_X_y=True` when `as_frame=False`."""
|
| 530 |
+
pytest.importorskip("pandas")
|
| 531 |
+
|
| 532 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
|
| 533 |
+
bunch = fetch_openml(
|
| 534 |
+
data_id=data_id,
|
| 535 |
+
as_frame=False,
|
| 536 |
+
cache=False,
|
| 537 |
+
return_X_y=False,
|
| 538 |
+
parser=parser,
|
| 539 |
+
)
|
| 540 |
+
X, y = fetch_openml(
|
| 541 |
+
data_id=data_id,
|
| 542 |
+
as_frame=False,
|
| 543 |
+
cache=False,
|
| 544 |
+
return_X_y=True,
|
| 545 |
+
parser=parser,
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
assert_array_equal(bunch.data, X)
|
| 549 |
+
assert_array_equal(bunch.target, y)
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def test_fetch_openml_difference_parsers(monkeypatch):
|
| 553 |
+
"""Check the difference between liac-arff and pandas parser."""
|
| 554 |
+
pytest.importorskip("pandas")
|
| 555 |
+
|
| 556 |
+
data_id = 1119
|
| 557 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
|
| 558 |
+
# When `as_frame=False`, the categories will be ordinally encoded with
|
| 559 |
+
# liac-arff parser while this is not the case with pandas parser.
|
| 560 |
+
as_frame = False
|
| 561 |
+
bunch_liac_arff = fetch_openml(
|
| 562 |
+
data_id=data_id,
|
| 563 |
+
as_frame=as_frame,
|
| 564 |
+
cache=False,
|
| 565 |
+
parser="liac-arff",
|
| 566 |
+
)
|
| 567 |
+
bunch_pandas = fetch_openml(
|
| 568 |
+
data_id=data_id,
|
| 569 |
+
as_frame=as_frame,
|
| 570 |
+
cache=False,
|
| 571 |
+
parser="pandas",
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
assert bunch_liac_arff.data.dtype.kind == "f"
|
| 575 |
+
assert bunch_pandas.data.dtype == "O"
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
###############################################################################
|
| 579 |
+
# Test the ARFF parsing on several dataset to check if detect the correct
|
| 580 |
+
# types (categories, integers, floats).
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
@pytest.fixture(scope="module")
|
| 584 |
+
def datasets_column_names():
|
| 585 |
+
"""Returns the columns names for each dataset."""
|
| 586 |
+
return {
|
| 587 |
+
61: ["sepallength", "sepalwidth", "petallength", "petalwidth", "class"],
|
| 588 |
+
2: [
|
| 589 |
+
"family",
|
| 590 |
+
"product-type",
|
| 591 |
+
"steel",
|
| 592 |
+
"carbon",
|
| 593 |
+
"hardness",
|
| 594 |
+
"temper_rolling",
|
| 595 |
+
"condition",
|
| 596 |
+
"formability",
|
| 597 |
+
"strength",
|
| 598 |
+
"non-ageing",
|
| 599 |
+
"surface-finish",
|
| 600 |
+
"surface-quality",
|
| 601 |
+
"enamelability",
|
| 602 |
+
"bc",
|
| 603 |
+
"bf",
|
| 604 |
+
"bt",
|
| 605 |
+
"bw%2Fme",
|
| 606 |
+
"bl",
|
| 607 |
+
"m",
|
| 608 |
+
"chrom",
|
| 609 |
+
"phos",
|
| 610 |
+
"cbond",
|
| 611 |
+
"marvi",
|
| 612 |
+
"exptl",
|
| 613 |
+
"ferro",
|
| 614 |
+
"corr",
|
| 615 |
+
"blue%2Fbright%2Fvarn%2Fclean",
|
| 616 |
+
"lustre",
|
| 617 |
+
"jurofm",
|
| 618 |
+
"s",
|
| 619 |
+
"p",
|
| 620 |
+
"shape",
|
| 621 |
+
"thick",
|
| 622 |
+
"width",
|
| 623 |
+
"len",
|
| 624 |
+
"oil",
|
| 625 |
+
"bore",
|
| 626 |
+
"packing",
|
| 627 |
+
"class",
|
| 628 |
+
],
|
| 629 |
+
561: ["vendor", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "class"],
|
| 630 |
+
40589: [
|
| 631 |
+
"Mean_Acc1298_Mean_Mem40_Centroid",
|
| 632 |
+
"Mean_Acc1298_Mean_Mem40_Rolloff",
|
| 633 |
+
"Mean_Acc1298_Mean_Mem40_Flux",
|
| 634 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_0",
|
| 635 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_1",
|
| 636 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_2",
|
| 637 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_3",
|
| 638 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_4",
|
| 639 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_5",
|
| 640 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_6",
|
| 641 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_7",
|
| 642 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_8",
|
| 643 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_9",
|
| 644 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_10",
|
| 645 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_11",
|
| 646 |
+
"Mean_Acc1298_Mean_Mem40_MFCC_12",
|
| 647 |
+
"Mean_Acc1298_Std_Mem40_Centroid",
|
| 648 |
+
"Mean_Acc1298_Std_Mem40_Rolloff",
|
| 649 |
+
"Mean_Acc1298_Std_Mem40_Flux",
|
| 650 |
+
"Mean_Acc1298_Std_Mem40_MFCC_0",
|
| 651 |
+
"Mean_Acc1298_Std_Mem40_MFCC_1",
|
| 652 |
+
"Mean_Acc1298_Std_Mem40_MFCC_2",
|
| 653 |
+
"Mean_Acc1298_Std_Mem40_MFCC_3",
|
| 654 |
+
"Mean_Acc1298_Std_Mem40_MFCC_4",
|
| 655 |
+
"Mean_Acc1298_Std_Mem40_MFCC_5",
|
| 656 |
+
"Mean_Acc1298_Std_Mem40_MFCC_6",
|
| 657 |
+
"Mean_Acc1298_Std_Mem40_MFCC_7",
|
| 658 |
+
"Mean_Acc1298_Std_Mem40_MFCC_8",
|
| 659 |
+
"Mean_Acc1298_Std_Mem40_MFCC_9",
|
| 660 |
+
"Mean_Acc1298_Std_Mem40_MFCC_10",
|
| 661 |
+
"Mean_Acc1298_Std_Mem40_MFCC_11",
|
| 662 |
+
"Mean_Acc1298_Std_Mem40_MFCC_12",
|
| 663 |
+
"Std_Acc1298_Mean_Mem40_Centroid",
|
| 664 |
+
"Std_Acc1298_Mean_Mem40_Rolloff",
|
| 665 |
+
"Std_Acc1298_Mean_Mem40_Flux",
|
| 666 |
+
"Std_Acc1298_Mean_Mem40_MFCC_0",
|
| 667 |
+
"Std_Acc1298_Mean_Mem40_MFCC_1",
|
| 668 |
+
"Std_Acc1298_Mean_Mem40_MFCC_2",
|
| 669 |
+
"Std_Acc1298_Mean_Mem40_MFCC_3",
|
| 670 |
+
"Std_Acc1298_Mean_Mem40_MFCC_4",
|
| 671 |
+
"Std_Acc1298_Mean_Mem40_MFCC_5",
|
| 672 |
+
"Std_Acc1298_Mean_Mem40_MFCC_6",
|
| 673 |
+
"Std_Acc1298_Mean_Mem40_MFCC_7",
|
| 674 |
+
"Std_Acc1298_Mean_Mem40_MFCC_8",
|
| 675 |
+
"Std_Acc1298_Mean_Mem40_MFCC_9",
|
| 676 |
+
"Std_Acc1298_Mean_Mem40_MFCC_10",
|
| 677 |
+
"Std_Acc1298_Mean_Mem40_MFCC_11",
|
| 678 |
+
"Std_Acc1298_Mean_Mem40_MFCC_12",
|
| 679 |
+
"Std_Acc1298_Std_Mem40_Centroid",
|
| 680 |
+
"Std_Acc1298_Std_Mem40_Rolloff",
|
| 681 |
+
"Std_Acc1298_Std_Mem40_Flux",
|
| 682 |
+
"Std_Acc1298_Std_Mem40_MFCC_0",
|
| 683 |
+
"Std_Acc1298_Std_Mem40_MFCC_1",
|
| 684 |
+
"Std_Acc1298_Std_Mem40_MFCC_2",
|
| 685 |
+
"Std_Acc1298_Std_Mem40_MFCC_3",
|
| 686 |
+
"Std_Acc1298_Std_Mem40_MFCC_4",
|
| 687 |
+
"Std_Acc1298_Std_Mem40_MFCC_5",
|
| 688 |
+
"Std_Acc1298_Std_Mem40_MFCC_6",
|
| 689 |
+
"Std_Acc1298_Std_Mem40_MFCC_7",
|
| 690 |
+
"Std_Acc1298_Std_Mem40_MFCC_8",
|
| 691 |
+
"Std_Acc1298_Std_Mem40_MFCC_9",
|
| 692 |
+
"Std_Acc1298_Std_Mem40_MFCC_10",
|
| 693 |
+
"Std_Acc1298_Std_Mem40_MFCC_11",
|
| 694 |
+
"Std_Acc1298_Std_Mem40_MFCC_12",
|
| 695 |
+
"BH_LowPeakAmp",
|
| 696 |
+
"BH_LowPeakBPM",
|
| 697 |
+
"BH_HighPeakAmp",
|
| 698 |
+
"BH_HighPeakBPM",
|
| 699 |
+
"BH_HighLowRatio",
|
| 700 |
+
"BHSUM1",
|
| 701 |
+
"BHSUM2",
|
| 702 |
+
"BHSUM3",
|
| 703 |
+
"amazed.suprised",
|
| 704 |
+
"happy.pleased",
|
| 705 |
+
"relaxing.calm",
|
| 706 |
+
"quiet.still",
|
| 707 |
+
"sad.lonely",
|
| 708 |
+
"angry.aggresive",
|
| 709 |
+
],
|
| 710 |
+
1119: [
|
| 711 |
+
"age",
|
| 712 |
+
"workclass",
|
| 713 |
+
"fnlwgt:",
|
| 714 |
+
"education:",
|
| 715 |
+
"education-num:",
|
| 716 |
+
"marital-status:",
|
| 717 |
+
"occupation:",
|
| 718 |
+
"relationship:",
|
| 719 |
+
"race:",
|
| 720 |
+
"sex:",
|
| 721 |
+
"capital-gain:",
|
| 722 |
+
"capital-loss:",
|
| 723 |
+
"hours-per-week:",
|
| 724 |
+
"native-country:",
|
| 725 |
+
"class",
|
| 726 |
+
],
|
| 727 |
+
40966: [
|
| 728 |
+
"DYRK1A_N",
|
| 729 |
+
"ITSN1_N",
|
| 730 |
+
"BDNF_N",
|
| 731 |
+
"NR1_N",
|
| 732 |
+
"NR2A_N",
|
| 733 |
+
"pAKT_N",
|
| 734 |
+
"pBRAF_N",
|
| 735 |
+
"pCAMKII_N",
|
| 736 |
+
"pCREB_N",
|
| 737 |
+
"pELK_N",
|
| 738 |
+
"pERK_N",
|
| 739 |
+
"pJNK_N",
|
| 740 |
+
"PKCA_N",
|
| 741 |
+
"pMEK_N",
|
| 742 |
+
"pNR1_N",
|
| 743 |
+
"pNR2A_N",
|
| 744 |
+
"pNR2B_N",
|
| 745 |
+
"pPKCAB_N",
|
| 746 |
+
"pRSK_N",
|
| 747 |
+
"AKT_N",
|
| 748 |
+
"BRAF_N",
|
| 749 |
+
"CAMKII_N",
|
| 750 |
+
"CREB_N",
|
| 751 |
+
"ELK_N",
|
| 752 |
+
"ERK_N",
|
| 753 |
+
"GSK3B_N",
|
| 754 |
+
"JNK_N",
|
| 755 |
+
"MEK_N",
|
| 756 |
+
"TRKA_N",
|
| 757 |
+
"RSK_N",
|
| 758 |
+
"APP_N",
|
| 759 |
+
"Bcatenin_N",
|
| 760 |
+
"SOD1_N",
|
| 761 |
+
"MTOR_N",
|
| 762 |
+
"P38_N",
|
| 763 |
+
"pMTOR_N",
|
| 764 |
+
"DSCR1_N",
|
| 765 |
+
"AMPKA_N",
|
| 766 |
+
"NR2B_N",
|
| 767 |
+
"pNUMB_N",
|
| 768 |
+
"RAPTOR_N",
|
| 769 |
+
"TIAM1_N",
|
| 770 |
+
"pP70S6_N",
|
| 771 |
+
"NUMB_N",
|
| 772 |
+
"P70S6_N",
|
| 773 |
+
"pGSK3B_N",
|
| 774 |
+
"pPKCG_N",
|
| 775 |
+
"CDK5_N",
|
| 776 |
+
"S6_N",
|
| 777 |
+
"ADARB1_N",
|
| 778 |
+
"AcetylH3K9_N",
|
| 779 |
+
"RRP1_N",
|
| 780 |
+
"BAX_N",
|
| 781 |
+
"ARC_N",
|
| 782 |
+
"ERBB4_N",
|
| 783 |
+
"nNOS_N",
|
| 784 |
+
"Tau_N",
|
| 785 |
+
"GFAP_N",
|
| 786 |
+
"GluR3_N",
|
| 787 |
+
"GluR4_N",
|
| 788 |
+
"IL1B_N",
|
| 789 |
+
"P3525_N",
|
| 790 |
+
"pCASP9_N",
|
| 791 |
+
"PSD95_N",
|
| 792 |
+
"SNCA_N",
|
| 793 |
+
"Ubiquitin_N",
|
| 794 |
+
"pGSK3B_Tyr216_N",
|
| 795 |
+
"SHH_N",
|
| 796 |
+
"BAD_N",
|
| 797 |
+
"BCL2_N",
|
| 798 |
+
"pS6_N",
|
| 799 |
+
"pCFOS_N",
|
| 800 |
+
"SYP_N",
|
| 801 |
+
"H3AcK18_N",
|
| 802 |
+
"EGR1_N",
|
| 803 |
+
"H3MeK4_N",
|
| 804 |
+
"CaNA_N",
|
| 805 |
+
"class",
|
| 806 |
+
],
|
| 807 |
+
40945: [
|
| 808 |
+
"pclass",
|
| 809 |
+
"survived",
|
| 810 |
+
"name",
|
| 811 |
+
"sex",
|
| 812 |
+
"age",
|
| 813 |
+
"sibsp",
|
| 814 |
+
"parch",
|
| 815 |
+
"ticket",
|
| 816 |
+
"fare",
|
| 817 |
+
"cabin",
|
| 818 |
+
"embarked",
|
| 819 |
+
"boat",
|
| 820 |
+
"body",
|
| 821 |
+
"home.dest",
|
| 822 |
+
],
|
| 823 |
+
}
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
@pytest.fixture(scope="module")
|
| 827 |
+
def datasets_missing_values():
|
| 828 |
+
return {
|
| 829 |
+
61: {},
|
| 830 |
+
2: {
|
| 831 |
+
"family": 11,
|
| 832 |
+
"temper_rolling": 9,
|
| 833 |
+
"condition": 2,
|
| 834 |
+
"formability": 4,
|
| 835 |
+
"non-ageing": 10,
|
| 836 |
+
"surface-finish": 11,
|
| 837 |
+
"enamelability": 11,
|
| 838 |
+
"bc": 11,
|
| 839 |
+
"bf": 10,
|
| 840 |
+
"bt": 11,
|
| 841 |
+
"bw%2Fme": 8,
|
| 842 |
+
"bl": 9,
|
| 843 |
+
"m": 11,
|
| 844 |
+
"chrom": 11,
|
| 845 |
+
"phos": 11,
|
| 846 |
+
"cbond": 10,
|
| 847 |
+
"marvi": 11,
|
| 848 |
+
"exptl": 11,
|
| 849 |
+
"ferro": 11,
|
| 850 |
+
"corr": 11,
|
| 851 |
+
"blue%2Fbright%2Fvarn%2Fclean": 11,
|
| 852 |
+
"lustre": 8,
|
| 853 |
+
"jurofm": 11,
|
| 854 |
+
"s": 11,
|
| 855 |
+
"p": 11,
|
| 856 |
+
"oil": 10,
|
| 857 |
+
"packing": 11,
|
| 858 |
+
},
|
| 859 |
+
561: {},
|
| 860 |
+
40589: {},
|
| 861 |
+
1119: {},
|
| 862 |
+
40966: {"BCL2_N": 7},
|
| 863 |
+
40945: {
|
| 864 |
+
"age": 263,
|
| 865 |
+
"fare": 1,
|
| 866 |
+
"cabin": 1014,
|
| 867 |
+
"embarked": 2,
|
| 868 |
+
"boat": 823,
|
| 869 |
+
"body": 1188,
|
| 870 |
+
"home.dest": 564,
|
| 871 |
+
},
|
| 872 |
+
}
|
| 873 |
+
|
| 874 |
+
|
| 875 |
+
@pytest.mark.parametrize(
|
| 876 |
+
"data_id, parser, expected_n_categories, expected_n_floats, expected_n_ints",
|
| 877 |
+
[
|
| 878 |
+
# iris dataset
|
| 879 |
+
(61, "liac-arff", 1, 4, 0),
|
| 880 |
+
(61, "pandas", 1, 4, 0),
|
| 881 |
+
# anneal dataset
|
| 882 |
+
(2, "liac-arff", 33, 6, 0),
|
| 883 |
+
(2, "pandas", 33, 2, 4),
|
| 884 |
+
# cpu dataset
|
| 885 |
+
(561, "liac-arff", 1, 7, 0),
|
| 886 |
+
(561, "pandas", 1, 0, 7),
|
| 887 |
+
# emotions dataset
|
| 888 |
+
(40589, "liac-arff", 6, 72, 0),
|
| 889 |
+
(40589, "pandas", 6, 69, 3),
|
| 890 |
+
# adult-census dataset
|
| 891 |
+
(1119, "liac-arff", 9, 6, 0),
|
| 892 |
+
(1119, "pandas", 9, 0, 6),
|
| 893 |
+
# miceprotein
|
| 894 |
+
(40966, "liac-arff", 1, 77, 0),
|
| 895 |
+
(40966, "pandas", 1, 77, 0),
|
| 896 |
+
# titanic
|
| 897 |
+
(40945, "liac-arff", 3, 6, 0),
|
| 898 |
+
(40945, "pandas", 3, 3, 3),
|
| 899 |
+
],
|
| 900 |
+
)
|
| 901 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 902 |
+
def test_fetch_openml_types_inference(
|
| 903 |
+
monkeypatch,
|
| 904 |
+
data_id,
|
| 905 |
+
parser,
|
| 906 |
+
expected_n_categories,
|
| 907 |
+
expected_n_floats,
|
| 908 |
+
expected_n_ints,
|
| 909 |
+
gzip_response,
|
| 910 |
+
datasets_column_names,
|
| 911 |
+
datasets_missing_values,
|
| 912 |
+
):
|
| 913 |
+
"""Check that `fetch_openml` infer the right number of categories, integers, and
|
| 914 |
+
floats."""
|
| 915 |
+
pd = pytest.importorskip("pandas")
|
| 916 |
+
CategoricalDtype = pd.api.types.CategoricalDtype
|
| 917 |
+
|
| 918 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
|
| 919 |
+
|
| 920 |
+
bunch = fetch_openml(
|
| 921 |
+
data_id=data_id,
|
| 922 |
+
as_frame=True,
|
| 923 |
+
cache=False,
|
| 924 |
+
parser=parser,
|
| 925 |
+
)
|
| 926 |
+
frame = bunch.frame
|
| 927 |
+
|
| 928 |
+
n_categories = len(
|
| 929 |
+
[dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)]
|
| 930 |
+
)
|
| 931 |
+
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == "f"])
|
| 932 |
+
n_ints = len([dtype for dtype in frame.dtypes if dtype.kind == "i"])
|
| 933 |
+
|
| 934 |
+
assert n_categories == expected_n_categories
|
| 935 |
+
assert n_floats == expected_n_floats
|
| 936 |
+
assert n_ints == expected_n_ints
|
| 937 |
+
|
| 938 |
+
assert frame.columns.tolist() == datasets_column_names[data_id]
|
| 939 |
+
|
| 940 |
+
frame_feature_to_n_nan = frame.isna().sum().to_dict()
|
| 941 |
+
for name, n_missing in frame_feature_to_n_nan.items():
|
| 942 |
+
expected_missing = datasets_missing_values[data_id].get(name, 0)
|
| 943 |
+
assert n_missing == expected_missing
|
| 944 |
+
|
| 945 |
+
|
| 946 |
+
###############################################################################
|
| 947 |
+
# Test some more specific behaviour
|
| 948 |
+
|
| 949 |
+
|
| 950 |
+
@pytest.mark.parametrize(
|
| 951 |
+
"params, err_msg",
|
| 952 |
+
[
|
| 953 |
+
(
|
| 954 |
+
{"parser": "unknown"},
|
| 955 |
+
"The 'parser' parameter of fetch_openml must be a str among",
|
| 956 |
+
),
|
| 957 |
+
(
|
| 958 |
+
{"as_frame": "unknown"},
|
| 959 |
+
"The 'as_frame' parameter of fetch_openml must be an instance",
|
| 960 |
+
),
|
| 961 |
+
],
|
| 962 |
+
)
|
| 963 |
+
def test_fetch_openml_validation_parameter(monkeypatch, params, err_msg):
|
| 964 |
+
data_id = 1119
|
| 965 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
|
| 966 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 967 |
+
fetch_openml(data_id=data_id, **params)
|
| 968 |
+
|
| 969 |
+
|
| 970 |
+
@pytest.mark.parametrize(
|
| 971 |
+
"params",
|
| 972 |
+
[
|
| 973 |
+
{"as_frame": True, "parser": "auto"},
|
| 974 |
+
{"as_frame": "auto", "parser": "auto"},
|
| 975 |
+
{"as_frame": False, "parser": "pandas"},
|
| 976 |
+
{"as_frame": False, "parser": "auto"},
|
| 977 |
+
],
|
| 978 |
+
)
|
| 979 |
+
def test_fetch_openml_requires_pandas_error(monkeypatch, params):
|
| 980 |
+
"""Check that we raise the proper errors when we require pandas."""
|
| 981 |
+
data_id = 1119
|
| 982 |
+
try:
|
| 983 |
+
check_pandas_support("test_fetch_openml_requires_pandas")
|
| 984 |
+
except ImportError:
|
| 985 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
|
| 986 |
+
err_msg = "requires pandas to be installed. Alternatively, explicitly"
|
| 987 |
+
with pytest.raises(ImportError, match=err_msg):
|
| 988 |
+
fetch_openml(data_id=data_id, **params)
|
| 989 |
+
else:
|
| 990 |
+
raise SkipTest("This test requires pandas to not be installed.")
|
| 991 |
+
|
| 992 |
+
|
| 993 |
+
@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive")
|
| 994 |
+
@pytest.mark.parametrize(
|
| 995 |
+
"params, err_msg",
|
| 996 |
+
[
|
| 997 |
+
(
|
| 998 |
+
{"parser": "pandas"},
|
| 999 |
+
"Sparse ARFF datasets cannot be loaded with parser='pandas'",
|
| 1000 |
+
),
|
| 1001 |
+
(
|
| 1002 |
+
{"as_frame": True},
|
| 1003 |
+
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
|
| 1004 |
+
),
|
| 1005 |
+
(
|
| 1006 |
+
{"parser": "pandas", "as_frame": True},
|
| 1007 |
+
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
|
| 1008 |
+
),
|
| 1009 |
+
],
|
| 1010 |
+
)
|
| 1011 |
+
def test_fetch_openml_sparse_arff_error(monkeypatch, params, err_msg):
|
| 1012 |
+
"""Check that we raise the expected error for sparse ARFF datasets and
|
| 1013 |
+
a wrong set of incompatible parameters.
|
| 1014 |
+
"""
|
| 1015 |
+
pytest.importorskip("pandas")
|
| 1016 |
+
data_id = 292
|
| 1017 |
+
|
| 1018 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
|
| 1019 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 1020 |
+
fetch_openml(
|
| 1021 |
+
data_id=data_id,
|
| 1022 |
+
cache=False,
|
| 1023 |
+
**params,
|
| 1024 |
+
)
|
| 1025 |
+
|
| 1026 |
+
|
| 1027 |
+
@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive")
|
| 1028 |
+
@pytest.mark.parametrize(
|
| 1029 |
+
"data_id, data_type",
|
| 1030 |
+
[
|
| 1031 |
+
(61, "dataframe"), # iris dataset version 1
|
| 1032 |
+
(292, "sparse"), # Australian dataset version 1
|
| 1033 |
+
],
|
| 1034 |
+
)
|
| 1035 |
+
def test_fetch_openml_auto_mode(monkeypatch, data_id, data_type):
|
| 1036 |
+
"""Check the auto mode of `fetch_openml`."""
|
| 1037 |
+
pd = pytest.importorskip("pandas")
|
| 1038 |
+
|
| 1039 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
|
| 1040 |
+
data = fetch_openml(data_id=data_id, as_frame="auto", cache=False)
|
| 1041 |
+
klass = pd.DataFrame if data_type == "dataframe" else scipy.sparse.csr_matrix
|
| 1042 |
+
assert isinstance(data.data, klass)
|
| 1043 |
+
|
| 1044 |
+
|
| 1045 |
+
def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch):
|
| 1046 |
+
"""Check that we raise a warning regarding the working memory when using
|
| 1047 |
+
LIAC-ARFF parser."""
|
| 1048 |
+
pytest.importorskip("pandas")
|
| 1049 |
+
|
| 1050 |
+
data_id = 1119
|
| 1051 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
|
| 1052 |
+
|
| 1053 |
+
msg = "Could not adhere to working_memory config."
|
| 1054 |
+
with pytest.warns(UserWarning, match=msg):
|
| 1055 |
+
with config_context(working_memory=1e-6):
|
| 1056 |
+
fetch_openml(
|
| 1057 |
+
data_id=data_id,
|
| 1058 |
+
as_frame=True,
|
| 1059 |
+
cache=False,
|
| 1060 |
+
parser="liac-arff",
|
| 1061 |
+
)
|
| 1062 |
+
|
| 1063 |
+
|
| 1064 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1065 |
+
def test_fetch_openml_iris_warn_multiple_version(monkeypatch, gzip_response):
|
| 1066 |
+
"""Check that a warning is raised when multiple versions exist and no version is
|
| 1067 |
+
requested."""
|
| 1068 |
+
data_id = 61
|
| 1069 |
+
data_name = "iris"
|
| 1070 |
+
|
| 1071 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
|
| 1072 |
+
|
| 1073 |
+
msg = re.escape(
|
| 1074 |
+
"Multiple active versions of the dataset matching the name"
|
| 1075 |
+
" iris exist. Versions may be fundamentally different, "
|
| 1076 |
+
"returning version 1. Available versions:\n"
|
| 1077 |
+
"- version 1, status: active\n"
|
| 1078 |
+
" url: https://www.openml.org/search?type=data&id=61\n"
|
| 1079 |
+
"- version 3, status: active\n"
|
| 1080 |
+
" url: https://www.openml.org/search?type=data&id=969\n"
|
| 1081 |
+
)
|
| 1082 |
+
with pytest.warns(UserWarning, match=msg):
|
| 1083 |
+
fetch_openml(
|
| 1084 |
+
name=data_name,
|
| 1085 |
+
as_frame=False,
|
| 1086 |
+
cache=False,
|
| 1087 |
+
parser="liac-arff",
|
| 1088 |
+
)
|
| 1089 |
+
|
| 1090 |
+
|
| 1091 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1092 |
+
def test_fetch_openml_no_target(monkeypatch, gzip_response):
|
| 1093 |
+
"""Check that we can get a dataset without target."""
|
| 1094 |
+
data_id = 61
|
| 1095 |
+
target_column = None
|
| 1096 |
+
expected_observations = 150
|
| 1097 |
+
expected_features = 5
|
| 1098 |
+
|
| 1099 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
|
| 1100 |
+
data = fetch_openml(
|
| 1101 |
+
data_id=data_id,
|
| 1102 |
+
target_column=target_column,
|
| 1103 |
+
cache=False,
|
| 1104 |
+
as_frame=False,
|
| 1105 |
+
parser="liac-arff",
|
| 1106 |
+
)
|
| 1107 |
+
assert data.data.shape == (expected_observations, expected_features)
|
| 1108 |
+
assert data.target is None
|
| 1109 |
+
|
| 1110 |
+
|
| 1111 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1112 |
+
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
|
| 1113 |
+
def test_missing_values_pandas(monkeypatch, gzip_response, parser):
|
| 1114 |
+
"""check that missing values in categories are compatible with pandas
|
| 1115 |
+
categorical"""
|
| 1116 |
+
pytest.importorskip("pandas")
|
| 1117 |
+
|
| 1118 |
+
data_id = 42585
|
| 1119 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
|
| 1120 |
+
penguins = fetch_openml(
|
| 1121 |
+
data_id=data_id,
|
| 1122 |
+
cache=False,
|
| 1123 |
+
as_frame=True,
|
| 1124 |
+
parser=parser,
|
| 1125 |
+
)
|
| 1126 |
+
|
| 1127 |
+
cat_dtype = penguins.data.dtypes["sex"]
|
| 1128 |
+
# there are nans in the categorical
|
| 1129 |
+
assert penguins.data["sex"].isna().any()
|
| 1130 |
+
assert_array_equal(cat_dtype.categories, ["FEMALE", "MALE", "_"])
|
| 1131 |
+
|
| 1132 |
+
|
| 1133 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1134 |
+
@pytest.mark.parametrize(
|
| 1135 |
+
"dataset_params",
|
| 1136 |
+
[
|
| 1137 |
+
{"data_id": 40675},
|
| 1138 |
+
{"data_id": None, "name": "glass2", "version": 1},
|
| 1139 |
+
],
|
| 1140 |
+
)
|
| 1141 |
+
def test_fetch_openml_inactive(monkeypatch, gzip_response, dataset_params):
|
| 1142 |
+
"""Check that we raise a warning when the dataset is inactive."""
|
| 1143 |
+
data_id = 40675
|
| 1144 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
|
| 1145 |
+
msg = "Version 1 of dataset glass2 is inactive,"
|
| 1146 |
+
with pytest.warns(UserWarning, match=msg):
|
| 1147 |
+
glass2 = fetch_openml(
|
| 1148 |
+
cache=False, as_frame=False, parser="liac-arff", **dataset_params
|
| 1149 |
+
)
|
| 1150 |
+
assert glass2.data.shape == (163, 9)
|
| 1151 |
+
assert glass2.details["id"] == "40675"
|
| 1152 |
+
|
| 1153 |
+
|
| 1154 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1155 |
+
@pytest.mark.parametrize(
|
| 1156 |
+
"data_id, params, err_type, err_msg",
|
| 1157 |
+
[
|
| 1158 |
+
(40675, {"name": "glass2"}, ValueError, "No active dataset glass2 found"),
|
| 1159 |
+
(
|
| 1160 |
+
61,
|
| 1161 |
+
{"data_id": 61, "target_column": ["sepalwidth", "class"]},
|
| 1162 |
+
ValueError,
|
| 1163 |
+
"Can only handle homogeneous multi-target datasets",
|
| 1164 |
+
),
|
| 1165 |
+
(
|
| 1166 |
+
40945,
|
| 1167 |
+
{"data_id": 40945, "as_frame": False},
|
| 1168 |
+
ValueError,
|
| 1169 |
+
(
|
| 1170 |
+
"STRING attributes are not supported for array representation. Try"
|
| 1171 |
+
" as_frame=True"
|
| 1172 |
+
),
|
| 1173 |
+
),
|
| 1174 |
+
(
|
| 1175 |
+
2,
|
| 1176 |
+
{"data_id": 2, "target_column": "family", "as_frame": True},
|
| 1177 |
+
ValueError,
|
| 1178 |
+
"Target column 'family'",
|
| 1179 |
+
),
|
| 1180 |
+
(
|
| 1181 |
+
2,
|
| 1182 |
+
{"data_id": 2, "target_column": "family", "as_frame": False},
|
| 1183 |
+
ValueError,
|
| 1184 |
+
"Target column 'family'",
|
| 1185 |
+
),
|
| 1186 |
+
(
|
| 1187 |
+
61,
|
| 1188 |
+
{"data_id": 61, "target_column": "undefined"},
|
| 1189 |
+
KeyError,
|
| 1190 |
+
"Could not find target_column='undefined'",
|
| 1191 |
+
),
|
| 1192 |
+
(
|
| 1193 |
+
61,
|
| 1194 |
+
{"data_id": 61, "target_column": ["undefined", "class"]},
|
| 1195 |
+
KeyError,
|
| 1196 |
+
"Could not find target_column='undefined'",
|
| 1197 |
+
),
|
| 1198 |
+
],
|
| 1199 |
+
)
|
| 1200 |
+
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
|
| 1201 |
+
def test_fetch_openml_error(
|
| 1202 |
+
monkeypatch, gzip_response, data_id, params, err_type, err_msg, parser
|
| 1203 |
+
):
|
| 1204 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
|
| 1205 |
+
if params.get("as_frame", True) or parser == "pandas":
|
| 1206 |
+
pytest.importorskip("pandas")
|
| 1207 |
+
with pytest.raises(err_type, match=err_msg):
|
| 1208 |
+
fetch_openml(cache=False, parser=parser, **params)
|
| 1209 |
+
|
| 1210 |
+
|
| 1211 |
+
@pytest.mark.parametrize(
|
| 1212 |
+
"params, err_type, err_msg",
|
| 1213 |
+
[
|
| 1214 |
+
(
|
| 1215 |
+
{"data_id": -1, "name": None, "version": "version"},
|
| 1216 |
+
ValueError,
|
| 1217 |
+
"The 'version' parameter of fetch_openml must be an int in the range",
|
| 1218 |
+
),
|
| 1219 |
+
(
|
| 1220 |
+
{"data_id": -1, "name": "nAmE"},
|
| 1221 |
+
ValueError,
|
| 1222 |
+
"The 'data_id' parameter of fetch_openml must be an int in the range",
|
| 1223 |
+
),
|
| 1224 |
+
(
|
| 1225 |
+
{"data_id": -1, "name": "nAmE", "version": "version"},
|
| 1226 |
+
ValueError,
|
| 1227 |
+
"The 'version' parameter of fetch_openml must be an int",
|
| 1228 |
+
),
|
| 1229 |
+
(
|
| 1230 |
+
{},
|
| 1231 |
+
ValueError,
|
| 1232 |
+
"Neither name nor data_id are provided. Please provide name or data_id.",
|
| 1233 |
+
),
|
| 1234 |
+
],
|
| 1235 |
+
)
|
| 1236 |
+
def test_fetch_openml_raises_illegal_argument(params, err_type, err_msg):
|
| 1237 |
+
with pytest.raises(err_type, match=err_msg):
|
| 1238 |
+
fetch_openml(**params)
|
| 1239 |
+
|
| 1240 |
+
|
| 1241 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1242 |
+
def test_warn_ignore_attribute(monkeypatch, gzip_response):
|
| 1243 |
+
data_id = 40966
|
| 1244 |
+
expected_row_id_msg = "target_column='{}' has flag is_row_identifier."
|
| 1245 |
+
expected_ignore_msg = "target_column='{}' has flag is_ignore."
|
| 1246 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
|
| 1247 |
+
# single column test
|
| 1248 |
+
target_col = "MouseID"
|
| 1249 |
+
msg = expected_row_id_msg.format(target_col)
|
| 1250 |
+
with pytest.warns(UserWarning, match=msg):
|
| 1251 |
+
fetch_openml(
|
| 1252 |
+
data_id=data_id,
|
| 1253 |
+
target_column=target_col,
|
| 1254 |
+
cache=False,
|
| 1255 |
+
as_frame=False,
|
| 1256 |
+
parser="liac-arff",
|
| 1257 |
+
)
|
| 1258 |
+
target_col = "Genotype"
|
| 1259 |
+
msg = expected_ignore_msg.format(target_col)
|
| 1260 |
+
with pytest.warns(UserWarning, match=msg):
|
| 1261 |
+
fetch_openml(
|
| 1262 |
+
data_id=data_id,
|
| 1263 |
+
target_column=target_col,
|
| 1264 |
+
cache=False,
|
| 1265 |
+
as_frame=False,
|
| 1266 |
+
parser="liac-arff",
|
| 1267 |
+
)
|
| 1268 |
+
# multi column test
|
| 1269 |
+
target_col = "MouseID"
|
| 1270 |
+
msg = expected_row_id_msg.format(target_col)
|
| 1271 |
+
with pytest.warns(UserWarning, match=msg):
|
| 1272 |
+
fetch_openml(
|
| 1273 |
+
data_id=data_id,
|
| 1274 |
+
target_column=[target_col, "class"],
|
| 1275 |
+
cache=False,
|
| 1276 |
+
as_frame=False,
|
| 1277 |
+
parser="liac-arff",
|
| 1278 |
+
)
|
| 1279 |
+
target_col = "Genotype"
|
| 1280 |
+
msg = expected_ignore_msg.format(target_col)
|
| 1281 |
+
with pytest.warns(UserWarning, match=msg):
|
| 1282 |
+
fetch_openml(
|
| 1283 |
+
data_id=data_id,
|
| 1284 |
+
target_column=[target_col, "class"],
|
| 1285 |
+
cache=False,
|
| 1286 |
+
as_frame=False,
|
| 1287 |
+
parser="liac-arff",
|
| 1288 |
+
)
|
| 1289 |
+
|
| 1290 |
+
|
| 1291 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1292 |
+
def test_dataset_with_openml_error(monkeypatch, gzip_response):
|
| 1293 |
+
data_id = 1
|
| 1294 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
|
| 1295 |
+
msg = "OpenML registered a problem with the dataset. It might be unusable. Error:"
|
| 1296 |
+
with pytest.warns(UserWarning, match=msg):
|
| 1297 |
+
fetch_openml(data_id=data_id, cache=False, as_frame=False, parser="liac-arff")
|
| 1298 |
+
|
| 1299 |
+
|
| 1300 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1301 |
+
def test_dataset_with_openml_warning(monkeypatch, gzip_response):
|
| 1302 |
+
data_id = 3
|
| 1303 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
|
| 1304 |
+
msg = "OpenML raised a warning on the dataset. It might be unusable. Warning:"
|
| 1305 |
+
with pytest.warns(UserWarning, match=msg):
|
| 1306 |
+
fetch_openml(data_id=data_id, cache=False, as_frame=False, parser="liac-arff")
|
| 1307 |
+
|
| 1308 |
+
|
| 1309 |
+
def test_fetch_openml_overwrite_default_params_read_csv(monkeypatch):
|
| 1310 |
+
"""Check that we can overwrite the default parameters of `read_csv`."""
|
| 1311 |
+
pytest.importorskip("pandas")
|
| 1312 |
+
data_id = 1590
|
| 1313 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
|
| 1314 |
+
|
| 1315 |
+
common_params = {
|
| 1316 |
+
"data_id": data_id,
|
| 1317 |
+
"as_frame": True,
|
| 1318 |
+
"cache": False,
|
| 1319 |
+
"parser": "pandas",
|
| 1320 |
+
}
|
| 1321 |
+
|
| 1322 |
+
# By default, the initial spaces are skipped. We checked that setting the parameter
|
| 1323 |
+
# `skipinitialspace` to False will have an effect.
|
| 1324 |
+
adult_without_spaces = fetch_openml(**common_params)
|
| 1325 |
+
adult_with_spaces = fetch_openml(
|
| 1326 |
+
**common_params, read_csv_kwargs={"skipinitialspace": False}
|
| 1327 |
+
)
|
| 1328 |
+
assert all(
|
| 1329 |
+
cat.startswith(" ") for cat in adult_with_spaces.frame["class"].cat.categories
|
| 1330 |
+
)
|
| 1331 |
+
assert not any(
|
| 1332 |
+
cat.startswith(" ")
|
| 1333 |
+
for cat in adult_without_spaces.frame["class"].cat.categories
|
| 1334 |
+
)
|
| 1335 |
+
|
| 1336 |
+
|
| 1337 |
+
###############################################################################
|
| 1338 |
+
# Test cache, retry mechanisms, checksum, etc.
|
| 1339 |
+
|
| 1340 |
+
|
| 1341 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1342 |
+
def test_open_openml_url_cache(monkeypatch, gzip_response, tmpdir):
|
| 1343 |
+
data_id = 61
|
| 1344 |
+
|
| 1345 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
|
| 1346 |
+
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
|
| 1347 |
+
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
|
| 1348 |
+
# first fill the cache
|
| 1349 |
+
response1 = _open_openml_url(openml_path, cache_directory)
|
| 1350 |
+
# assert file exists
|
| 1351 |
+
location = _get_local_path(openml_path, cache_directory)
|
| 1352 |
+
assert os.path.isfile(location)
|
| 1353 |
+
# redownload, to utilize cache
|
| 1354 |
+
response2 = _open_openml_url(openml_path, cache_directory)
|
| 1355 |
+
assert response1.read() == response2.read()
|
| 1356 |
+
|
| 1357 |
+
|
| 1358 |
+
@pytest.mark.parametrize("write_to_disk", [True, False])
|
| 1359 |
+
def test_open_openml_url_unlinks_local_path(monkeypatch, tmpdir, write_to_disk):
|
| 1360 |
+
data_id = 61
|
| 1361 |
+
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
|
| 1362 |
+
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
|
| 1363 |
+
location = _get_local_path(openml_path, cache_directory)
|
| 1364 |
+
|
| 1365 |
+
def _mock_urlopen(request, *args, **kwargs):
|
| 1366 |
+
if write_to_disk:
|
| 1367 |
+
with open(location, "w") as f:
|
| 1368 |
+
f.write("")
|
| 1369 |
+
raise ValueError("Invalid request")
|
| 1370 |
+
|
| 1371 |
+
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen)
|
| 1372 |
+
|
| 1373 |
+
with pytest.raises(ValueError, match="Invalid request"):
|
| 1374 |
+
_open_openml_url(openml_path, cache_directory)
|
| 1375 |
+
|
| 1376 |
+
assert not os.path.exists(location)
|
| 1377 |
+
|
| 1378 |
+
|
| 1379 |
+
def test_retry_with_clean_cache(tmpdir):
|
| 1380 |
+
data_id = 61
|
| 1381 |
+
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
|
| 1382 |
+
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
|
| 1383 |
+
location = _get_local_path(openml_path, cache_directory)
|
| 1384 |
+
os.makedirs(os.path.dirname(location))
|
| 1385 |
+
|
| 1386 |
+
with open(location, "w") as f:
|
| 1387 |
+
f.write("")
|
| 1388 |
+
|
| 1389 |
+
@_retry_with_clean_cache(openml_path, cache_directory)
|
| 1390 |
+
def _load_data():
|
| 1391 |
+
# The first call will raise an error since location exists
|
| 1392 |
+
if os.path.exists(location):
|
| 1393 |
+
raise Exception("File exist!")
|
| 1394 |
+
return 1
|
| 1395 |
+
|
| 1396 |
+
warn_msg = "Invalid cache, redownloading file"
|
| 1397 |
+
with pytest.warns(RuntimeWarning, match=warn_msg):
|
| 1398 |
+
result = _load_data()
|
| 1399 |
+
assert result == 1
|
| 1400 |
+
|
| 1401 |
+
|
| 1402 |
+
def test_retry_with_clean_cache_http_error(tmpdir):
|
| 1403 |
+
data_id = 61
|
| 1404 |
+
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
|
| 1405 |
+
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
|
| 1406 |
+
|
| 1407 |
+
@_retry_with_clean_cache(openml_path, cache_directory)
|
| 1408 |
+
def _load_data():
|
| 1409 |
+
raise HTTPError(
|
| 1410 |
+
url=None, code=412, msg="Simulated mock error", hdrs=None, fp=BytesIO()
|
| 1411 |
+
)
|
| 1412 |
+
|
| 1413 |
+
error_msg = "Simulated mock error"
|
| 1414 |
+
with pytest.raises(HTTPError, match=error_msg):
|
| 1415 |
+
_load_data()
|
| 1416 |
+
|
| 1417 |
+
|
| 1418 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1419 |
+
def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir):
|
| 1420 |
+
def _mock_urlopen_raise(request, *args, **kwargs):
|
| 1421 |
+
raise ValueError(
|
| 1422 |
+
"This mechanism intends to test correct cache"
|
| 1423 |
+
"handling. As such, urlopen should never be "
|
| 1424 |
+
"accessed. URL: %s" % request.get_full_url()
|
| 1425 |
+
)
|
| 1426 |
+
|
| 1427 |
+
data_id = 61
|
| 1428 |
+
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
|
| 1429 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
|
| 1430 |
+
X_fetched, y_fetched = fetch_openml(
|
| 1431 |
+
data_id=data_id,
|
| 1432 |
+
cache=True,
|
| 1433 |
+
data_home=cache_directory,
|
| 1434 |
+
return_X_y=True,
|
| 1435 |
+
as_frame=False,
|
| 1436 |
+
parser="liac-arff",
|
| 1437 |
+
)
|
| 1438 |
+
|
| 1439 |
+
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen_raise)
|
| 1440 |
+
|
| 1441 |
+
X_cached, y_cached = fetch_openml(
|
| 1442 |
+
data_id=data_id,
|
| 1443 |
+
cache=True,
|
| 1444 |
+
data_home=cache_directory,
|
| 1445 |
+
return_X_y=True,
|
| 1446 |
+
as_frame=False,
|
| 1447 |
+
parser="liac-arff",
|
| 1448 |
+
)
|
| 1449 |
+
np.testing.assert_array_equal(X_fetched, X_cached)
|
| 1450 |
+
np.testing.assert_array_equal(y_fetched, y_cached)
|
| 1451 |
+
|
| 1452 |
+
|
| 1453 |
+
@pytest.mark.parametrize(
|
| 1454 |
+
"as_frame, parser",
|
| 1455 |
+
[
|
| 1456 |
+
(True, "liac-arff"),
|
| 1457 |
+
(False, "liac-arff"),
|
| 1458 |
+
(True, "pandas"),
|
| 1459 |
+
(False, "pandas"),
|
| 1460 |
+
],
|
| 1461 |
+
)
|
| 1462 |
+
def test_fetch_openml_verify_checksum(monkeypatch, as_frame, cache, tmpdir, parser):
|
| 1463 |
+
"""Check that the checksum is working as expected."""
|
| 1464 |
+
if as_frame or parser == "pandas":
|
| 1465 |
+
pytest.importorskip("pandas")
|
| 1466 |
+
|
| 1467 |
+
data_id = 2
|
| 1468 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
|
| 1469 |
+
|
| 1470 |
+
# create a temporary modified arff file
|
| 1471 |
+
original_data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}"
|
| 1472 |
+
original_data_file_name = "data-v1-dl-1666876.arff.gz"
|
| 1473 |
+
original_data_path = resources.files(original_data_module) / original_data_file_name
|
| 1474 |
+
corrupt_copy_path = tmpdir / "test_invalid_checksum.arff"
|
| 1475 |
+
with original_data_path.open("rb") as orig_file:
|
| 1476 |
+
orig_gzip = gzip.open(orig_file, "rb")
|
| 1477 |
+
data = bytearray(orig_gzip.read())
|
| 1478 |
+
data[len(data) - 1] = 37
|
| 1479 |
+
|
| 1480 |
+
with gzip.GzipFile(corrupt_copy_path, "wb") as modified_gzip:
|
| 1481 |
+
modified_gzip.write(data)
|
| 1482 |
+
|
| 1483 |
+
# Requests are already mocked by monkey_patch_webbased_functions.
|
| 1484 |
+
# We want to reuse that mock for all requests except file download,
|
| 1485 |
+
# hence creating a thin mock over the original mock
|
| 1486 |
+
mocked_openml_url = sklearn.datasets._openml.urlopen
|
| 1487 |
+
|
| 1488 |
+
def swap_file_mock(request, *args, **kwargs):
|
| 1489 |
+
url = request.get_full_url()
|
| 1490 |
+
if url.endswith("data/v1/download/1666876"):
|
| 1491 |
+
with open(corrupt_copy_path, "rb") as f:
|
| 1492 |
+
corrupted_data = f.read()
|
| 1493 |
+
return _MockHTTPResponse(BytesIO(corrupted_data), is_gzip=True)
|
| 1494 |
+
else:
|
| 1495 |
+
return mocked_openml_url(request)
|
| 1496 |
+
|
| 1497 |
+
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", swap_file_mock)
|
| 1498 |
+
|
| 1499 |
+
# validate failed checksum
|
| 1500 |
+
with pytest.raises(ValueError) as exc:
|
| 1501 |
+
sklearn.datasets.fetch_openml(
|
| 1502 |
+
data_id=data_id, cache=False, as_frame=as_frame, parser=parser
|
| 1503 |
+
)
|
| 1504 |
+
# exception message should have file-path
|
| 1505 |
+
assert exc.match("1666876")
|
| 1506 |
+
|
| 1507 |
+
|
| 1508 |
+
def test_open_openml_url_retry_on_network_error(monkeypatch):
|
| 1509 |
+
def _mock_urlopen_network_error(request, *args, **kwargs):
|
| 1510 |
+
raise HTTPError(
|
| 1511 |
+
url=None, code=404, msg="Simulated network error", hdrs=None, fp=BytesIO()
|
| 1512 |
+
)
|
| 1513 |
+
|
| 1514 |
+
monkeypatch.setattr(
|
| 1515 |
+
sklearn.datasets._openml, "urlopen", _mock_urlopen_network_error
|
| 1516 |
+
)
|
| 1517 |
+
|
| 1518 |
+
invalid_openml_url = "invalid-url"
|
| 1519 |
+
|
| 1520 |
+
with pytest.warns(
|
| 1521 |
+
UserWarning,
|
| 1522 |
+
match=re.escape(
|
| 1523 |
+
"A network error occurred while downloading"
|
| 1524 |
+
f" {_OPENML_PREFIX + invalid_openml_url}. Retrying..."
|
| 1525 |
+
),
|
| 1526 |
+
) as record:
|
| 1527 |
+
with pytest.raises(HTTPError, match="Simulated network error"):
|
| 1528 |
+
_open_openml_url(invalid_openml_url, None, delay=0)
|
| 1529 |
+
assert len(record) == 3
|
| 1530 |
+
|
| 1531 |
+
|
| 1532 |
+
###############################################################################
|
| 1533 |
+
# Non-regressiont tests
|
| 1534 |
+
|
| 1535 |
+
|
| 1536 |
+
@pytest.mark.parametrize("gzip_response", [True, False])
|
| 1537 |
+
@pytest.mark.parametrize("parser", ("liac-arff", "pandas"))
|
| 1538 |
+
def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response, parser):
|
| 1539 |
+
"""Check that we can load the "zoo" dataset.
|
| 1540 |
+
Non-regression test for:
|
| 1541 |
+
https://github.com/scikit-learn/scikit-learn/issues/14340
|
| 1542 |
+
"""
|
| 1543 |
+
if parser == "pandas":
|
| 1544 |
+
pytest.importorskip("pandas")
|
| 1545 |
+
data_id = 62
|
| 1546 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
|
| 1547 |
+
|
| 1548 |
+
dataset = sklearn.datasets.fetch_openml(
|
| 1549 |
+
data_id=data_id, cache=False, as_frame=False, parser=parser
|
| 1550 |
+
)
|
| 1551 |
+
assert dataset is not None
|
| 1552 |
+
# The dataset has 17 features, including 1 ignored (animal),
|
| 1553 |
+
# so we assert that we don't have the ignored feature in the final Bunch
|
| 1554 |
+
assert dataset["data"].shape == (101, 16)
|
| 1555 |
+
assert "animal" not in dataset["feature_names"]
|
| 1556 |
+
|
| 1557 |
+
|
| 1558 |
+
def test_fetch_openml_strip_quotes(monkeypatch):
|
| 1559 |
+
"""Check that we strip the single quotes when used as a string delimiter.
|
| 1560 |
+
|
| 1561 |
+
Non-regression test for:
|
| 1562 |
+
https://github.com/scikit-learn/scikit-learn/issues/23381
|
| 1563 |
+
"""
|
| 1564 |
+
pd = pytest.importorskip("pandas")
|
| 1565 |
+
data_id = 40966
|
| 1566 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
|
| 1567 |
+
|
| 1568 |
+
common_params = {"as_frame": True, "cache": False, "data_id": data_id}
|
| 1569 |
+
mice_pandas = fetch_openml(parser="pandas", **common_params)
|
| 1570 |
+
mice_liac_arff = fetch_openml(parser="liac-arff", **common_params)
|
| 1571 |
+
pd.testing.assert_series_equal(mice_pandas.target, mice_liac_arff.target)
|
| 1572 |
+
assert not mice_pandas.target.str.startswith("'").any()
|
| 1573 |
+
assert not mice_pandas.target.str.endswith("'").any()
|
| 1574 |
+
|
| 1575 |
+
# similar behaviour should be observed when the column is not the target
|
| 1576 |
+
mice_pandas = fetch_openml(parser="pandas", target_column="NUMB_N", **common_params)
|
| 1577 |
+
mice_liac_arff = fetch_openml(
|
| 1578 |
+
parser="liac-arff", target_column="NUMB_N", **common_params
|
| 1579 |
+
)
|
| 1580 |
+
pd.testing.assert_series_equal(
|
| 1581 |
+
mice_pandas.frame["class"], mice_liac_arff.frame["class"]
|
| 1582 |
+
)
|
| 1583 |
+
assert not mice_pandas.frame["class"].str.startswith("'").any()
|
| 1584 |
+
assert not mice_pandas.frame["class"].str.endswith("'").any()
|
| 1585 |
+
|
| 1586 |
+
|
| 1587 |
+
def test_fetch_openml_leading_whitespace(monkeypatch):
|
| 1588 |
+
"""Check that we can strip leading whitespace in pandas parser.
|
| 1589 |
+
|
| 1590 |
+
Non-regression test for:
|
| 1591 |
+
https://github.com/scikit-learn/scikit-learn/issues/25311
|
| 1592 |
+
"""
|
| 1593 |
+
pd = pytest.importorskip("pandas")
|
| 1594 |
+
data_id = 1590
|
| 1595 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
|
| 1596 |
+
|
| 1597 |
+
common_params = {"as_frame": True, "cache": False, "data_id": data_id}
|
| 1598 |
+
adult_pandas = fetch_openml(parser="pandas", **common_params)
|
| 1599 |
+
adult_liac_arff = fetch_openml(parser="liac-arff", **common_params)
|
| 1600 |
+
pd.testing.assert_series_equal(
|
| 1601 |
+
adult_pandas.frame["class"], adult_liac_arff.frame["class"]
|
| 1602 |
+
)
|
| 1603 |
+
|
| 1604 |
+
|
| 1605 |
+
def test_fetch_openml_quotechar_escapechar(monkeypatch):
|
| 1606 |
+
"""Check that we can handle escapechar and single/double quotechar.
|
| 1607 |
+
|
| 1608 |
+
Non-regression test for:
|
| 1609 |
+
https://github.com/scikit-learn/scikit-learn/issues/25478
|
| 1610 |
+
"""
|
| 1611 |
+
pd = pytest.importorskip("pandas")
|
| 1612 |
+
data_id = 42074
|
| 1613 |
+
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
|
| 1614 |
+
|
| 1615 |
+
common_params = {"as_frame": True, "cache": False, "data_id": data_id}
|
| 1616 |
+
adult_pandas = fetch_openml(parser="pandas", **common_params)
|
| 1617 |
+
adult_liac_arff = fetch_openml(parser="liac-arff", **common_params)
|
| 1618 |
+
pd.testing.assert_frame_equal(adult_pandas.frame, adult_liac_arff.frame)
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_rcv1.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test the rcv1 loader, if the data is available,
|
| 2 |
+
or if specifically requested via environment variable
|
| 3 |
+
(e.g. for CI jobs)."""
|
| 4 |
+
|
| 5 |
+
from functools import partial
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import scipy.sparse as sp
|
| 9 |
+
|
| 10 |
+
from sklearn.datasets.tests.test_common import check_return_X_y
|
| 11 |
+
from sklearn.utils._testing import assert_almost_equal, assert_array_equal
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def test_fetch_rcv1(fetch_rcv1_fxt, global_random_seed):
|
| 15 |
+
data1 = fetch_rcv1_fxt(shuffle=False)
|
| 16 |
+
X1, Y1 = data1.data, data1.target
|
| 17 |
+
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
|
| 18 |
+
|
| 19 |
+
# test sparsity
|
| 20 |
+
assert sp.issparse(X1)
|
| 21 |
+
assert sp.issparse(Y1)
|
| 22 |
+
assert 60915113 == X1.data.size
|
| 23 |
+
assert 2606875 == Y1.data.size
|
| 24 |
+
|
| 25 |
+
# test shapes
|
| 26 |
+
assert (804414, 47236) == X1.shape
|
| 27 |
+
assert (804414, 103) == Y1.shape
|
| 28 |
+
assert (804414,) == s1.shape
|
| 29 |
+
assert 103 == len(cat_list)
|
| 30 |
+
|
| 31 |
+
# test descr
|
| 32 |
+
assert data1.DESCR.startswith(".. _rcv1_dataset:")
|
| 33 |
+
|
| 34 |
+
# test ordering of categories
|
| 35 |
+
first_categories = ["C11", "C12", "C13", "C14", "C15", "C151"]
|
| 36 |
+
assert_array_equal(first_categories, cat_list[:6])
|
| 37 |
+
|
| 38 |
+
# test number of sample for some categories
|
| 39 |
+
some_categories = ("GMIL", "E143", "CCAT")
|
| 40 |
+
number_non_zero_in_cat = (5, 1206, 381327)
|
| 41 |
+
for num, cat in zip(number_non_zero_in_cat, some_categories):
|
| 42 |
+
j = cat_list.index(cat)
|
| 43 |
+
assert num == Y1[:, j].data.size
|
| 44 |
+
|
| 45 |
+
# test shuffling and subset
|
| 46 |
+
data2 = fetch_rcv1_fxt(
|
| 47 |
+
shuffle=True, subset="train", random_state=global_random_seed
|
| 48 |
+
)
|
| 49 |
+
X2, Y2 = data2.data, data2.target
|
| 50 |
+
s2 = data2.sample_id
|
| 51 |
+
|
| 52 |
+
# test return_X_y option
|
| 53 |
+
fetch_func = partial(fetch_rcv1_fxt, shuffle=False, subset="train")
|
| 54 |
+
check_return_X_y(data2, fetch_func)
|
| 55 |
+
|
| 56 |
+
# The first 23149 samples are the training samples
|
| 57 |
+
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
|
| 58 |
+
|
| 59 |
+
# test some precise values
|
| 60 |
+
some_sample_ids = (2286, 3274, 14042)
|
| 61 |
+
for sample_id in some_sample_ids:
|
| 62 |
+
idx1 = s1.tolist().index(sample_id)
|
| 63 |
+
idx2 = s2.tolist().index(sample_id)
|
| 64 |
+
|
| 65 |
+
feature_values_1 = X1[idx1, :].toarray()
|
| 66 |
+
feature_values_2 = X2[idx2, :].toarray()
|
| 67 |
+
assert_almost_equal(feature_values_1, feature_values_2)
|
| 68 |
+
|
| 69 |
+
target_values_1 = Y1[idx1, :].toarray()
|
| 70 |
+
target_values_2 = Y2[idx2, :].toarray()
|
| 71 |
+
assert_almost_equal(target_values_1, target_values_2)
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_samples_generator.py
ADDED
|
@@ -0,0 +1,686 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
from functools import partial
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pytest
|
| 7 |
+
import scipy.sparse as sp
|
| 8 |
+
|
| 9 |
+
from sklearn.datasets import (
|
| 10 |
+
make_biclusters,
|
| 11 |
+
make_blobs,
|
| 12 |
+
make_checkerboard,
|
| 13 |
+
make_circles,
|
| 14 |
+
make_classification,
|
| 15 |
+
make_friedman1,
|
| 16 |
+
make_friedman2,
|
| 17 |
+
make_friedman3,
|
| 18 |
+
make_hastie_10_2,
|
| 19 |
+
make_low_rank_matrix,
|
| 20 |
+
make_moons,
|
| 21 |
+
make_multilabel_classification,
|
| 22 |
+
make_regression,
|
| 23 |
+
make_s_curve,
|
| 24 |
+
make_sparse_coded_signal,
|
| 25 |
+
make_sparse_spd_matrix,
|
| 26 |
+
make_sparse_uncorrelated,
|
| 27 |
+
make_spd_matrix,
|
| 28 |
+
make_swiss_roll,
|
| 29 |
+
)
|
| 30 |
+
from sklearn.utils._testing import (
|
| 31 |
+
assert_allclose,
|
| 32 |
+
assert_allclose_dense_sparse,
|
| 33 |
+
assert_almost_equal,
|
| 34 |
+
assert_array_almost_equal,
|
| 35 |
+
assert_array_equal,
|
| 36 |
+
)
|
| 37 |
+
from sklearn.utils.validation import assert_all_finite
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def test_make_classification():
|
| 41 |
+
weights = [0.1, 0.25]
|
| 42 |
+
X, y = make_classification(
|
| 43 |
+
n_samples=100,
|
| 44 |
+
n_features=20,
|
| 45 |
+
n_informative=5,
|
| 46 |
+
n_redundant=1,
|
| 47 |
+
n_repeated=1,
|
| 48 |
+
n_classes=3,
|
| 49 |
+
n_clusters_per_class=1,
|
| 50 |
+
hypercube=False,
|
| 51 |
+
shift=None,
|
| 52 |
+
scale=None,
|
| 53 |
+
weights=weights,
|
| 54 |
+
random_state=0,
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
assert weights == [0.1, 0.25]
|
| 58 |
+
assert X.shape == (100, 20), "X shape mismatch"
|
| 59 |
+
assert y.shape == (100,), "y shape mismatch"
|
| 60 |
+
assert np.unique(y).shape == (3,), "Unexpected number of classes"
|
| 61 |
+
assert sum(y == 0) == 10, "Unexpected number of samples in class #0"
|
| 62 |
+
assert sum(y == 1) == 25, "Unexpected number of samples in class #1"
|
| 63 |
+
assert sum(y == 2) == 65, "Unexpected number of samples in class #2"
|
| 64 |
+
|
| 65 |
+
# Test for n_features > 30
|
| 66 |
+
X, y = make_classification(
|
| 67 |
+
n_samples=2000,
|
| 68 |
+
n_features=31,
|
| 69 |
+
n_informative=31,
|
| 70 |
+
n_redundant=0,
|
| 71 |
+
n_repeated=0,
|
| 72 |
+
hypercube=True,
|
| 73 |
+
scale=0.5,
|
| 74 |
+
random_state=0,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
assert X.shape == (2000, 31), "X shape mismatch"
|
| 78 |
+
assert y.shape == (2000,), "y shape mismatch"
|
| 79 |
+
assert (
|
| 80 |
+
np.unique(X.view([("", X.dtype)] * X.shape[1]))
|
| 81 |
+
.view(X.dtype)
|
| 82 |
+
.reshape(-1, X.shape[1])
|
| 83 |
+
.shape[0]
|
| 84 |
+
== 2000
|
| 85 |
+
), "Unexpected number of unique rows"
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def test_make_classification_informative_features():
|
| 89 |
+
"""Test the construction of informative features in make_classification
|
| 90 |
+
|
| 91 |
+
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
|
| 92 |
+
fully-specified `weights`.
|
| 93 |
+
"""
|
| 94 |
+
# Create very separate clusters; check that vertices are unique and
|
| 95 |
+
# correspond to classes
|
| 96 |
+
class_sep = 1e6
|
| 97 |
+
make = partial(
|
| 98 |
+
make_classification,
|
| 99 |
+
class_sep=class_sep,
|
| 100 |
+
n_redundant=0,
|
| 101 |
+
n_repeated=0,
|
| 102 |
+
flip_y=0,
|
| 103 |
+
shift=0,
|
| 104 |
+
scale=1,
|
| 105 |
+
shuffle=False,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
for n_informative, weights, n_clusters_per_class in [
|
| 109 |
+
(2, [1], 1),
|
| 110 |
+
(2, [1 / 3] * 3, 1),
|
| 111 |
+
(2, [1 / 4] * 4, 1),
|
| 112 |
+
(2, [1 / 2] * 2, 2),
|
| 113 |
+
(2, [3 / 4, 1 / 4], 2),
|
| 114 |
+
(10, [1 / 3] * 3, 10),
|
| 115 |
+
(int(64), [1], 1),
|
| 116 |
+
]:
|
| 117 |
+
n_classes = len(weights)
|
| 118 |
+
n_clusters = n_classes * n_clusters_per_class
|
| 119 |
+
n_samples = n_clusters * 50
|
| 120 |
+
|
| 121 |
+
for hypercube in (False, True):
|
| 122 |
+
X, y = make(
|
| 123 |
+
n_samples=n_samples,
|
| 124 |
+
n_classes=n_classes,
|
| 125 |
+
weights=weights,
|
| 126 |
+
n_features=n_informative,
|
| 127 |
+
n_informative=n_informative,
|
| 128 |
+
n_clusters_per_class=n_clusters_per_class,
|
| 129 |
+
hypercube=hypercube,
|
| 130 |
+
random_state=0,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
assert X.shape == (n_samples, n_informative)
|
| 134 |
+
assert y.shape == (n_samples,)
|
| 135 |
+
|
| 136 |
+
# Cluster by sign, viewed as strings to allow uniquing
|
| 137 |
+
signs = np.sign(X)
|
| 138 |
+
signs = signs.view(dtype="|S{0}".format(signs.strides[0])).ravel()
|
| 139 |
+
unique_signs, cluster_index = np.unique(signs, return_inverse=True)
|
| 140 |
+
|
| 141 |
+
assert (
|
| 142 |
+
len(unique_signs) == n_clusters
|
| 143 |
+
), "Wrong number of clusters, or not in distinct quadrants"
|
| 144 |
+
|
| 145 |
+
clusters_by_class = defaultdict(set)
|
| 146 |
+
for cluster, cls in zip(cluster_index, y):
|
| 147 |
+
clusters_by_class[cls].add(cluster)
|
| 148 |
+
for clusters in clusters_by_class.values():
|
| 149 |
+
assert (
|
| 150 |
+
len(clusters) == n_clusters_per_class
|
| 151 |
+
), "Wrong number of clusters per class"
|
| 152 |
+
assert len(clusters_by_class) == n_classes, "Wrong number of classes"
|
| 153 |
+
|
| 154 |
+
assert_array_almost_equal(
|
| 155 |
+
np.bincount(y) / len(y) // weights,
|
| 156 |
+
[1] * n_classes,
|
| 157 |
+
err_msg="Wrong number of samples per class",
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
# Ensure on vertices of hypercube
|
| 161 |
+
for cluster in range(len(unique_signs)):
|
| 162 |
+
centroid = X[cluster_index == cluster].mean(axis=0)
|
| 163 |
+
if hypercube:
|
| 164 |
+
assert_array_almost_equal(
|
| 165 |
+
np.abs(centroid) / class_sep,
|
| 166 |
+
np.ones(n_informative),
|
| 167 |
+
decimal=5,
|
| 168 |
+
err_msg="Clusters are not centered on hypercube vertices",
|
| 169 |
+
)
|
| 170 |
+
else:
|
| 171 |
+
with pytest.raises(AssertionError):
|
| 172 |
+
assert_array_almost_equal(
|
| 173 |
+
np.abs(centroid) / class_sep,
|
| 174 |
+
np.ones(n_informative),
|
| 175 |
+
decimal=5,
|
| 176 |
+
err_msg=(
|
| 177 |
+
"Clusters should not be centered on hypercube vertices"
|
| 178 |
+
),
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
with pytest.raises(ValueError):
|
| 182 |
+
make(n_features=2, n_informative=2, n_classes=5, n_clusters_per_class=1)
|
| 183 |
+
with pytest.raises(ValueError):
|
| 184 |
+
make(n_features=2, n_informative=2, n_classes=3, n_clusters_per_class=2)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
@pytest.mark.parametrize(
|
| 188 |
+
"weights, err_type, err_msg",
|
| 189 |
+
[
|
| 190 |
+
([], ValueError, "Weights specified but incompatible with number of classes."),
|
| 191 |
+
(
|
| 192 |
+
[0.25, 0.75, 0.1],
|
| 193 |
+
ValueError,
|
| 194 |
+
"Weights specified but incompatible with number of classes.",
|
| 195 |
+
),
|
| 196 |
+
(
|
| 197 |
+
np.array([]),
|
| 198 |
+
ValueError,
|
| 199 |
+
"Weights specified but incompatible with number of classes.",
|
| 200 |
+
),
|
| 201 |
+
(
|
| 202 |
+
np.array([0.25, 0.75, 0.1]),
|
| 203 |
+
ValueError,
|
| 204 |
+
"Weights specified but incompatible with number of classes.",
|
| 205 |
+
),
|
| 206 |
+
(
|
| 207 |
+
np.random.random(3),
|
| 208 |
+
ValueError,
|
| 209 |
+
"Weights specified but incompatible with number of classes.",
|
| 210 |
+
),
|
| 211 |
+
],
|
| 212 |
+
)
|
| 213 |
+
def test_make_classification_weights_type(weights, err_type, err_msg):
|
| 214 |
+
with pytest.raises(err_type, match=err_msg):
|
| 215 |
+
make_classification(weights=weights)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@pytest.mark.parametrize("kwargs", [{}, {"n_classes": 3, "n_informative": 3}])
|
| 219 |
+
def test_make_classification_weights_array_or_list_ok(kwargs):
|
| 220 |
+
X1, y1 = make_classification(weights=[0.1, 0.9], random_state=0, **kwargs)
|
| 221 |
+
X2, y2 = make_classification(weights=np.array([0.1, 0.9]), random_state=0, **kwargs)
|
| 222 |
+
assert_almost_equal(X1, X2)
|
| 223 |
+
assert_almost_equal(y1, y2)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def test_make_multilabel_classification_return_sequences():
|
| 227 |
+
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
|
| 228 |
+
X, Y = make_multilabel_classification(
|
| 229 |
+
n_samples=100,
|
| 230 |
+
n_features=20,
|
| 231 |
+
n_classes=3,
|
| 232 |
+
random_state=0,
|
| 233 |
+
return_indicator=False,
|
| 234 |
+
allow_unlabeled=allow_unlabeled,
|
| 235 |
+
)
|
| 236 |
+
assert X.shape == (100, 20), "X shape mismatch"
|
| 237 |
+
if not allow_unlabeled:
|
| 238 |
+
assert max([max(y) for y in Y]) == 2
|
| 239 |
+
assert min([len(y) for y in Y]) == min_length
|
| 240 |
+
assert max([len(y) for y in Y]) <= 3
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def test_make_multilabel_classification_return_indicator():
|
| 244 |
+
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
|
| 245 |
+
X, Y = make_multilabel_classification(
|
| 246 |
+
n_samples=25,
|
| 247 |
+
n_features=20,
|
| 248 |
+
n_classes=3,
|
| 249 |
+
random_state=0,
|
| 250 |
+
allow_unlabeled=allow_unlabeled,
|
| 251 |
+
)
|
| 252 |
+
assert X.shape == (25, 20), "X shape mismatch"
|
| 253 |
+
assert Y.shape == (25, 3), "Y shape mismatch"
|
| 254 |
+
assert np.all(np.sum(Y, axis=0) > min_length)
|
| 255 |
+
|
| 256 |
+
# Also test return_distributions and return_indicator with True
|
| 257 |
+
X2, Y2, p_c, p_w_c = make_multilabel_classification(
|
| 258 |
+
n_samples=25,
|
| 259 |
+
n_features=20,
|
| 260 |
+
n_classes=3,
|
| 261 |
+
random_state=0,
|
| 262 |
+
allow_unlabeled=allow_unlabeled,
|
| 263 |
+
return_distributions=True,
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
assert_array_almost_equal(X, X2)
|
| 267 |
+
assert_array_equal(Y, Y2)
|
| 268 |
+
assert p_c.shape == (3,)
|
| 269 |
+
assert_almost_equal(p_c.sum(), 1)
|
| 270 |
+
assert p_w_c.shape == (20, 3)
|
| 271 |
+
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def test_make_multilabel_classification_return_indicator_sparse():
|
| 275 |
+
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
|
| 276 |
+
X, Y = make_multilabel_classification(
|
| 277 |
+
n_samples=25,
|
| 278 |
+
n_features=20,
|
| 279 |
+
n_classes=3,
|
| 280 |
+
random_state=0,
|
| 281 |
+
return_indicator="sparse",
|
| 282 |
+
allow_unlabeled=allow_unlabeled,
|
| 283 |
+
)
|
| 284 |
+
assert X.shape == (25, 20), "X shape mismatch"
|
| 285 |
+
assert Y.shape == (25, 3), "Y shape mismatch"
|
| 286 |
+
assert sp.issparse(Y)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def test_make_hastie_10_2():
|
| 290 |
+
X, y = make_hastie_10_2(n_samples=100, random_state=0)
|
| 291 |
+
assert X.shape == (100, 10), "X shape mismatch"
|
| 292 |
+
assert y.shape == (100,), "y shape mismatch"
|
| 293 |
+
assert np.unique(y).shape == (2,), "Unexpected number of classes"
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def test_make_regression():
|
| 297 |
+
X, y, c = make_regression(
|
| 298 |
+
n_samples=100,
|
| 299 |
+
n_features=10,
|
| 300 |
+
n_informative=3,
|
| 301 |
+
effective_rank=5,
|
| 302 |
+
coef=True,
|
| 303 |
+
bias=0.0,
|
| 304 |
+
noise=1.0,
|
| 305 |
+
random_state=0,
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
assert X.shape == (100, 10), "X shape mismatch"
|
| 309 |
+
assert y.shape == (100,), "y shape mismatch"
|
| 310 |
+
assert c.shape == (10,), "coef shape mismatch"
|
| 311 |
+
assert sum(c != 0.0) == 3, "Unexpected number of informative features"
|
| 312 |
+
|
| 313 |
+
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
|
| 314 |
+
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
|
| 315 |
+
|
| 316 |
+
# Test with small number of features.
|
| 317 |
+
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
|
| 318 |
+
assert X.shape == (100, 1)
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def test_make_regression_multitarget():
|
| 322 |
+
X, y, c = make_regression(
|
| 323 |
+
n_samples=100,
|
| 324 |
+
n_features=10,
|
| 325 |
+
n_informative=3,
|
| 326 |
+
n_targets=3,
|
| 327 |
+
coef=True,
|
| 328 |
+
noise=1.0,
|
| 329 |
+
random_state=0,
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
assert X.shape == (100, 10), "X shape mismatch"
|
| 333 |
+
assert y.shape == (100, 3), "y shape mismatch"
|
| 334 |
+
assert c.shape == (10, 3), "coef shape mismatch"
|
| 335 |
+
assert_array_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
|
| 336 |
+
|
| 337 |
+
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
|
| 338 |
+
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def test_make_blobs():
|
| 342 |
+
cluster_stds = np.array([0.05, 0.2, 0.4])
|
| 343 |
+
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
|
| 344 |
+
X, y = make_blobs(
|
| 345 |
+
random_state=0,
|
| 346 |
+
n_samples=50,
|
| 347 |
+
n_features=2,
|
| 348 |
+
centers=cluster_centers,
|
| 349 |
+
cluster_std=cluster_stds,
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
assert X.shape == (50, 2), "X shape mismatch"
|
| 353 |
+
assert y.shape == (50,), "y shape mismatch"
|
| 354 |
+
assert np.unique(y).shape == (3,), "Unexpected number of blobs"
|
| 355 |
+
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
|
| 356 |
+
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def test_make_blobs_n_samples_list():
|
| 360 |
+
n_samples = [50, 30, 20]
|
| 361 |
+
X, y = make_blobs(n_samples=n_samples, n_features=2, random_state=0)
|
| 362 |
+
|
| 363 |
+
assert X.shape == (sum(n_samples), 2), "X shape mismatch"
|
| 364 |
+
assert all(
|
| 365 |
+
np.bincount(y, minlength=len(n_samples)) == n_samples
|
| 366 |
+
), "Incorrect number of samples per blob"
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def test_make_blobs_n_samples_list_with_centers():
|
| 370 |
+
n_samples = [20, 20, 20]
|
| 371 |
+
centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
|
| 372 |
+
cluster_stds = np.array([0.05, 0.2, 0.4])
|
| 373 |
+
X, y = make_blobs(
|
| 374 |
+
n_samples=n_samples, centers=centers, cluster_std=cluster_stds, random_state=0
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
assert X.shape == (sum(n_samples), 2), "X shape mismatch"
|
| 378 |
+
assert all(
|
| 379 |
+
np.bincount(y, minlength=len(n_samples)) == n_samples
|
| 380 |
+
), "Incorrect number of samples per blob"
|
| 381 |
+
for i, (ctr, std) in enumerate(zip(centers, cluster_stds)):
|
| 382 |
+
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
@pytest.mark.parametrize(
|
| 386 |
+
"n_samples", [[5, 3, 0], np.array([5, 3, 0]), tuple([5, 3, 0])]
|
| 387 |
+
)
|
| 388 |
+
def test_make_blobs_n_samples_centers_none(n_samples):
|
| 389 |
+
centers = None
|
| 390 |
+
X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=0)
|
| 391 |
+
|
| 392 |
+
assert X.shape == (sum(n_samples), 2), "X shape mismatch"
|
| 393 |
+
assert all(
|
| 394 |
+
np.bincount(y, minlength=len(n_samples)) == n_samples
|
| 395 |
+
), "Incorrect number of samples per blob"
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def test_make_blobs_return_centers():
|
| 399 |
+
n_samples = [10, 20]
|
| 400 |
+
n_features = 3
|
| 401 |
+
X, y, centers = make_blobs(
|
| 402 |
+
n_samples=n_samples, n_features=n_features, return_centers=True, random_state=0
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
assert centers.shape == (len(n_samples), n_features)
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def test_make_blobs_error():
|
| 409 |
+
n_samples = [20, 20, 20]
|
| 410 |
+
centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
|
| 411 |
+
cluster_stds = np.array([0.05, 0.2, 0.4])
|
| 412 |
+
wrong_centers_msg = re.escape(
|
| 413 |
+
"Length of `n_samples` not consistent with number of centers. "
|
| 414 |
+
f"Got n_samples = {n_samples} and centers = {centers[:-1]}"
|
| 415 |
+
)
|
| 416 |
+
with pytest.raises(ValueError, match=wrong_centers_msg):
|
| 417 |
+
make_blobs(n_samples, centers=centers[:-1])
|
| 418 |
+
wrong_std_msg = re.escape(
|
| 419 |
+
"Length of `clusters_std` not consistent with number of centers. "
|
| 420 |
+
f"Got centers = {centers} and cluster_std = {cluster_stds[:-1]}"
|
| 421 |
+
)
|
| 422 |
+
with pytest.raises(ValueError, match=wrong_std_msg):
|
| 423 |
+
make_blobs(n_samples, centers=centers, cluster_std=cluster_stds[:-1])
|
| 424 |
+
wrong_type_msg = "Parameter `centers` must be array-like. Got {!r} instead".format(
|
| 425 |
+
3
|
| 426 |
+
)
|
| 427 |
+
with pytest.raises(ValueError, match=wrong_type_msg):
|
| 428 |
+
make_blobs(n_samples, centers=3)
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
def test_make_friedman1():
|
| 432 |
+
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0, random_state=0)
|
| 433 |
+
|
| 434 |
+
assert X.shape == (5, 10), "X shape mismatch"
|
| 435 |
+
assert y.shape == (5,), "y shape mismatch"
|
| 436 |
+
|
| 437 |
+
assert_array_almost_equal(
|
| 438 |
+
y,
|
| 439 |
+
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
|
| 440 |
+
+ 20 * (X[:, 2] - 0.5) ** 2
|
| 441 |
+
+ 10 * X[:, 3]
|
| 442 |
+
+ 5 * X[:, 4],
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
def test_make_friedman2():
|
| 447 |
+
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
|
| 448 |
+
|
| 449 |
+
assert X.shape == (5, 4), "X shape mismatch"
|
| 450 |
+
assert y.shape == (5,), "y shape mismatch"
|
| 451 |
+
|
| 452 |
+
assert_array_almost_equal(
|
| 453 |
+
y, (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def test_make_friedman3():
|
| 458 |
+
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
|
| 459 |
+
|
| 460 |
+
assert X.shape == (5, 4), "X shape mismatch"
|
| 461 |
+
assert y.shape == (5,), "y shape mismatch"
|
| 462 |
+
|
| 463 |
+
assert_array_almost_equal(
|
| 464 |
+
y, np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0])
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def test_make_low_rank_matrix():
|
| 469 |
+
X = make_low_rank_matrix(
|
| 470 |
+
n_samples=50,
|
| 471 |
+
n_features=25,
|
| 472 |
+
effective_rank=5,
|
| 473 |
+
tail_strength=0.01,
|
| 474 |
+
random_state=0,
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
assert X.shape == (50, 25), "X shape mismatch"
|
| 478 |
+
|
| 479 |
+
from numpy.linalg import svd
|
| 480 |
+
|
| 481 |
+
u, s, v = svd(X)
|
| 482 |
+
assert sum(s) - 5 < 0.1, "X rank is not approximately 5"
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def test_make_sparse_coded_signal():
|
| 486 |
+
Y, D, X = make_sparse_coded_signal(
|
| 487 |
+
n_samples=5,
|
| 488 |
+
n_components=8,
|
| 489 |
+
n_features=10,
|
| 490 |
+
n_nonzero_coefs=3,
|
| 491 |
+
random_state=0,
|
| 492 |
+
)
|
| 493 |
+
assert Y.shape == (5, 10), "Y shape mismatch"
|
| 494 |
+
assert D.shape == (8, 10), "D shape mismatch"
|
| 495 |
+
assert X.shape == (5, 8), "X shape mismatch"
|
| 496 |
+
for row in X:
|
| 497 |
+
assert len(np.flatnonzero(row)) == 3, "Non-zero coefs mismatch"
|
| 498 |
+
assert_allclose(Y, X @ D)
|
| 499 |
+
assert_allclose(np.sqrt((D**2).sum(axis=1)), np.ones(D.shape[0]))
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def test_make_sparse_uncorrelated():
|
| 503 |
+
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
|
| 504 |
+
|
| 505 |
+
assert X.shape == (5, 10), "X shape mismatch"
|
| 506 |
+
assert y.shape == (5,), "y shape mismatch"
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
def test_make_spd_matrix():
|
| 510 |
+
X = make_spd_matrix(n_dim=5, random_state=0)
|
| 511 |
+
|
| 512 |
+
assert X.shape == (5, 5), "X shape mismatch"
|
| 513 |
+
assert_array_almost_equal(X, X.T)
|
| 514 |
+
|
| 515 |
+
from numpy.linalg import eig
|
| 516 |
+
|
| 517 |
+
eigenvalues, _ = eig(X)
|
| 518 |
+
assert np.all(eigenvalues > 0), "X is not positive-definite"
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
@pytest.mark.parametrize("norm_diag", [True, False])
|
| 522 |
+
@pytest.mark.parametrize(
|
| 523 |
+
"sparse_format", [None, "bsr", "coo", "csc", "csr", "dia", "dok", "lil"]
|
| 524 |
+
)
|
| 525 |
+
def test_make_sparse_spd_matrix(norm_diag, sparse_format, global_random_seed):
|
| 526 |
+
n_dim = 5
|
| 527 |
+
X = make_sparse_spd_matrix(
|
| 528 |
+
n_dim=n_dim,
|
| 529 |
+
norm_diag=norm_diag,
|
| 530 |
+
sparse_format=sparse_format,
|
| 531 |
+
random_state=global_random_seed,
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
assert X.shape == (n_dim, n_dim), "X shape mismatch"
|
| 535 |
+
if sparse_format is None:
|
| 536 |
+
assert not sp.issparse(X)
|
| 537 |
+
assert_allclose(X, X.T)
|
| 538 |
+
Xarr = X
|
| 539 |
+
else:
|
| 540 |
+
assert sp.issparse(X) and X.format == sparse_format
|
| 541 |
+
assert_allclose_dense_sparse(X, X.T)
|
| 542 |
+
Xarr = X.toarray()
|
| 543 |
+
|
| 544 |
+
from numpy.linalg import eig
|
| 545 |
+
|
| 546 |
+
# Do not use scipy.sparse.linalg.eigs because it cannot find all eigenvalues
|
| 547 |
+
eigenvalues, _ = eig(Xarr)
|
| 548 |
+
assert np.all(eigenvalues > 0), "X is not positive-definite"
|
| 549 |
+
|
| 550 |
+
if norm_diag:
|
| 551 |
+
# Check that leading diagonal elements are 1
|
| 552 |
+
assert_array_almost_equal(Xarr.diagonal(), np.ones(n_dim))
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
@pytest.mark.parametrize("hole", [False, True])
|
| 556 |
+
def test_make_swiss_roll(hole):
|
| 557 |
+
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0, hole=hole)
|
| 558 |
+
|
| 559 |
+
assert X.shape == (5, 3)
|
| 560 |
+
assert t.shape == (5,)
|
| 561 |
+
assert_array_almost_equal(X[:, 0], t * np.cos(t))
|
| 562 |
+
assert_array_almost_equal(X[:, 2], t * np.sin(t))
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
def test_make_s_curve():
|
| 566 |
+
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
|
| 567 |
+
|
| 568 |
+
assert X.shape == (5, 3), "X shape mismatch"
|
| 569 |
+
assert t.shape == (5,), "t shape mismatch"
|
| 570 |
+
assert_array_almost_equal(X[:, 0], np.sin(t))
|
| 571 |
+
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
def test_make_biclusters():
|
| 575 |
+
X, rows, cols = make_biclusters(
|
| 576 |
+
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0
|
| 577 |
+
)
|
| 578 |
+
assert X.shape == (100, 100), "X shape mismatch"
|
| 579 |
+
assert rows.shape == (4, 100), "rows shape mismatch"
|
| 580 |
+
assert cols.shape == (
|
| 581 |
+
4,
|
| 582 |
+
100,
|
| 583 |
+
), "columns shape mismatch"
|
| 584 |
+
assert_all_finite(X)
|
| 585 |
+
assert_all_finite(rows)
|
| 586 |
+
assert_all_finite(cols)
|
| 587 |
+
|
| 588 |
+
X2, _, _ = make_biclusters(
|
| 589 |
+
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0
|
| 590 |
+
)
|
| 591 |
+
assert_array_almost_equal(X, X2)
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
def test_make_checkerboard():
|
| 595 |
+
X, rows, cols = make_checkerboard(
|
| 596 |
+
shape=(100, 100), n_clusters=(20, 5), shuffle=True, random_state=0
|
| 597 |
+
)
|
| 598 |
+
assert X.shape == (100, 100), "X shape mismatch"
|
| 599 |
+
assert rows.shape == (100, 100), "rows shape mismatch"
|
| 600 |
+
assert cols.shape == (
|
| 601 |
+
100,
|
| 602 |
+
100,
|
| 603 |
+
), "columns shape mismatch"
|
| 604 |
+
|
| 605 |
+
X, rows, cols = make_checkerboard(
|
| 606 |
+
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0
|
| 607 |
+
)
|
| 608 |
+
assert_all_finite(X)
|
| 609 |
+
assert_all_finite(rows)
|
| 610 |
+
assert_all_finite(cols)
|
| 611 |
+
|
| 612 |
+
X1, _, _ = make_checkerboard(
|
| 613 |
+
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0
|
| 614 |
+
)
|
| 615 |
+
X2, _, _ = make_checkerboard(
|
| 616 |
+
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0
|
| 617 |
+
)
|
| 618 |
+
assert_array_almost_equal(X1, X2)
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
def test_make_moons():
|
| 622 |
+
X, y = make_moons(3, shuffle=False)
|
| 623 |
+
for x, label in zip(X, y):
|
| 624 |
+
center = [0.0, 0.0] if label == 0 else [1.0, 0.5]
|
| 625 |
+
dist_sqr = ((x - center) ** 2).sum()
|
| 626 |
+
assert_almost_equal(
|
| 627 |
+
dist_sqr, 1.0, err_msg="Point is not on expected unit circle"
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def test_make_moons_unbalanced():
|
| 632 |
+
X, y = make_moons(n_samples=(7, 5))
|
| 633 |
+
assert (
|
| 634 |
+
np.sum(y == 0) == 7 and np.sum(y == 1) == 5
|
| 635 |
+
), "Number of samples in a moon is wrong"
|
| 636 |
+
assert X.shape == (12, 2), "X shape mismatch"
|
| 637 |
+
assert y.shape == (12,), "y shape mismatch"
|
| 638 |
+
|
| 639 |
+
with pytest.raises(
|
| 640 |
+
ValueError,
|
| 641 |
+
match=r"`n_samples` can be either an int " r"or a two-element tuple.",
|
| 642 |
+
):
|
| 643 |
+
make_moons(n_samples=(10,))
|
| 644 |
+
|
| 645 |
+
|
| 646 |
+
def test_make_circles():
|
| 647 |
+
factor = 0.3
|
| 648 |
+
|
| 649 |
+
for n_samples, n_outer, n_inner in [(7, 3, 4), (8, 4, 4)]:
|
| 650 |
+
# Testing odd and even case, because in the past make_circles always
|
| 651 |
+
# created an even number of samples.
|
| 652 |
+
X, y = make_circles(n_samples, shuffle=False, noise=None, factor=factor)
|
| 653 |
+
assert X.shape == (n_samples, 2), "X shape mismatch"
|
| 654 |
+
assert y.shape == (n_samples,), "y shape mismatch"
|
| 655 |
+
center = [0.0, 0.0]
|
| 656 |
+
for x, label in zip(X, y):
|
| 657 |
+
dist_sqr = ((x - center) ** 2).sum()
|
| 658 |
+
dist_exp = 1.0 if label == 0 else factor**2
|
| 659 |
+
dist_exp = 1.0 if label == 0 else factor**2
|
| 660 |
+
assert_almost_equal(
|
| 661 |
+
dist_sqr, dist_exp, err_msg="Point is not on expected circle"
|
| 662 |
+
)
|
| 663 |
+
|
| 664 |
+
assert X[y == 0].shape == (
|
| 665 |
+
n_outer,
|
| 666 |
+
2,
|
| 667 |
+
), "Samples not correctly distributed across circles."
|
| 668 |
+
assert X[y == 1].shape == (
|
| 669 |
+
n_inner,
|
| 670 |
+
2,
|
| 671 |
+
), "Samples not correctly distributed across circles."
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
def test_make_circles_unbalanced():
|
| 675 |
+
X, y = make_circles(n_samples=(2, 8))
|
| 676 |
+
|
| 677 |
+
assert np.sum(y == 0) == 2, "Number of samples in inner circle is wrong"
|
| 678 |
+
assert np.sum(y == 1) == 8, "Number of samples in outer circle is wrong"
|
| 679 |
+
assert X.shape == (10, 2), "X shape mismatch"
|
| 680 |
+
assert y.shape == (10,), "y shape mismatch"
|
| 681 |
+
|
| 682 |
+
with pytest.raises(
|
| 683 |
+
ValueError,
|
| 684 |
+
match="When a tuple, n_samples must have exactly two elements.",
|
| 685 |
+
):
|
| 686 |
+
make_circles(n_samples=(10,))
|
openflamingo/lib/python3.10/site-packages/sklearn/datasets/tests/test_svmlight_format.py
ADDED
|
@@ -0,0 +1,613 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gzip
|
| 2 |
+
import os
|
| 3 |
+
import shutil
|
| 4 |
+
from bz2 import BZ2File
|
| 5 |
+
from importlib import resources
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
from tempfile import NamedTemporaryFile
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import pytest
|
| 11 |
+
import scipy.sparse as sp
|
| 12 |
+
|
| 13 |
+
import sklearn
|
| 14 |
+
from sklearn.datasets import dump_svmlight_file, load_svmlight_file, load_svmlight_files
|
| 15 |
+
from sklearn.utils._testing import (
|
| 16 |
+
assert_allclose,
|
| 17 |
+
assert_array_almost_equal,
|
| 18 |
+
assert_array_equal,
|
| 19 |
+
create_memmap_backed_data,
|
| 20 |
+
)
|
| 21 |
+
from sklearn.utils.fixes import CSR_CONTAINERS
|
| 22 |
+
|
| 23 |
+
TEST_DATA_MODULE = "sklearn.datasets.tests.data"
|
| 24 |
+
datafile = "svmlight_classification.txt"
|
| 25 |
+
multifile = "svmlight_multilabel.txt"
|
| 26 |
+
invalidfile = "svmlight_invalid.txt"
|
| 27 |
+
invalidfile2 = "svmlight_invalid_order.txt"
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _svmlight_local_test_file_path(filename):
|
| 31 |
+
return resources.files(TEST_DATA_MODULE) / filename
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _load_svmlight_local_test_file(filename, **kwargs):
|
| 35 |
+
"""
|
| 36 |
+
Helper to load resource `filename` with `importlib.resources`
|
| 37 |
+
"""
|
| 38 |
+
data_path = _svmlight_local_test_file_path(filename)
|
| 39 |
+
with data_path.open("rb") as f:
|
| 40 |
+
return load_svmlight_file(f, **kwargs)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def test_load_svmlight_file():
|
| 44 |
+
X, y = _load_svmlight_local_test_file(datafile)
|
| 45 |
+
|
| 46 |
+
# test X's shape
|
| 47 |
+
assert X.indptr.shape[0] == 7
|
| 48 |
+
assert X.shape[0] == 6
|
| 49 |
+
assert X.shape[1] == 21
|
| 50 |
+
assert y.shape[0] == 6
|
| 51 |
+
|
| 52 |
+
# test X's non-zero values
|
| 53 |
+
for i, j, val in (
|
| 54 |
+
(0, 2, 2.5),
|
| 55 |
+
(0, 10, -5.2),
|
| 56 |
+
(0, 15, 1.5),
|
| 57 |
+
(1, 5, 1.0),
|
| 58 |
+
(1, 12, -3),
|
| 59 |
+
(2, 20, 27),
|
| 60 |
+
):
|
| 61 |
+
assert X[i, j] == val
|
| 62 |
+
|
| 63 |
+
# tests X's zero values
|
| 64 |
+
assert X[0, 3] == 0
|
| 65 |
+
assert X[0, 5] == 0
|
| 66 |
+
assert X[1, 8] == 0
|
| 67 |
+
assert X[1, 16] == 0
|
| 68 |
+
assert X[2, 18] == 0
|
| 69 |
+
|
| 70 |
+
# test can change X's values
|
| 71 |
+
X[0, 2] *= 2
|
| 72 |
+
assert X[0, 2] == 5
|
| 73 |
+
|
| 74 |
+
# test y
|
| 75 |
+
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def test_load_svmlight_file_fd():
|
| 79 |
+
# test loading from file descriptor
|
| 80 |
+
|
| 81 |
+
# GH20081: testing equality between path-based and
|
| 82 |
+
# fd-based load_svmlight_file
|
| 83 |
+
|
| 84 |
+
data_path = resources.files(TEST_DATA_MODULE) / datafile
|
| 85 |
+
data_path = str(data_path)
|
| 86 |
+
X1, y1 = load_svmlight_file(data_path)
|
| 87 |
+
|
| 88 |
+
fd = os.open(data_path, os.O_RDONLY)
|
| 89 |
+
try:
|
| 90 |
+
X2, y2 = load_svmlight_file(fd)
|
| 91 |
+
assert_array_almost_equal(X1.data, X2.data)
|
| 92 |
+
assert_array_almost_equal(y1, y2)
|
| 93 |
+
finally:
|
| 94 |
+
os.close(fd)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def test_load_svmlight_pathlib():
|
| 98 |
+
# test loading from file descriptor
|
| 99 |
+
data_path = _svmlight_local_test_file_path(datafile)
|
| 100 |
+
X1, y1 = load_svmlight_file(str(data_path))
|
| 101 |
+
X2, y2 = load_svmlight_file(data_path)
|
| 102 |
+
|
| 103 |
+
assert_allclose(X1.data, X2.data)
|
| 104 |
+
assert_allclose(y1, y2)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def test_load_svmlight_file_multilabel():
|
| 108 |
+
X, y = _load_svmlight_local_test_file(multifile, multilabel=True)
|
| 109 |
+
assert y == [(0, 1), (2,), (), (1, 2)]
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def test_load_svmlight_files():
|
| 113 |
+
data_path = _svmlight_local_test_file_path(datafile)
|
| 114 |
+
X_train, y_train, X_test, y_test = load_svmlight_files(
|
| 115 |
+
[str(data_path)] * 2, dtype=np.float32
|
| 116 |
+
)
|
| 117 |
+
assert_array_equal(X_train.toarray(), X_test.toarray())
|
| 118 |
+
assert_array_almost_equal(y_train, y_test)
|
| 119 |
+
assert X_train.dtype == np.float32
|
| 120 |
+
assert X_test.dtype == np.float32
|
| 121 |
+
|
| 122 |
+
X1, y1, X2, y2, X3, y3 = load_svmlight_files([str(data_path)] * 3, dtype=np.float64)
|
| 123 |
+
assert X1.dtype == X2.dtype
|
| 124 |
+
assert X2.dtype == X3.dtype
|
| 125 |
+
assert X3.dtype == np.float64
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def test_load_svmlight_file_n_features():
|
| 129 |
+
X, y = _load_svmlight_local_test_file(datafile, n_features=22)
|
| 130 |
+
|
| 131 |
+
# test X'shape
|
| 132 |
+
assert X.indptr.shape[0] == 7
|
| 133 |
+
assert X.shape[0] == 6
|
| 134 |
+
assert X.shape[1] == 22
|
| 135 |
+
|
| 136 |
+
# test X's non-zero values
|
| 137 |
+
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (1, 5, 1.0), (1, 12, -3)):
|
| 138 |
+
assert X[i, j] == val
|
| 139 |
+
|
| 140 |
+
# 21 features in file
|
| 141 |
+
with pytest.raises(ValueError):
|
| 142 |
+
_load_svmlight_local_test_file(datafile, n_features=20)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def test_load_compressed():
|
| 146 |
+
X, y = _load_svmlight_local_test_file(datafile)
|
| 147 |
+
|
| 148 |
+
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
|
| 149 |
+
tmp.close() # necessary under windows
|
| 150 |
+
with _svmlight_local_test_file_path(datafile).open("rb") as f:
|
| 151 |
+
with gzip.open(tmp.name, "wb") as fh_out:
|
| 152 |
+
shutil.copyfileobj(f, fh_out)
|
| 153 |
+
Xgz, ygz = load_svmlight_file(tmp.name)
|
| 154 |
+
# because we "close" it manually and write to it,
|
| 155 |
+
# we need to remove it manually.
|
| 156 |
+
os.remove(tmp.name)
|
| 157 |
+
assert_array_almost_equal(X.toarray(), Xgz.toarray())
|
| 158 |
+
assert_array_almost_equal(y, ygz)
|
| 159 |
+
|
| 160 |
+
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
|
| 161 |
+
tmp.close() # necessary under windows
|
| 162 |
+
with _svmlight_local_test_file_path(datafile).open("rb") as f:
|
| 163 |
+
with BZ2File(tmp.name, "wb") as fh_out:
|
| 164 |
+
shutil.copyfileobj(f, fh_out)
|
| 165 |
+
Xbz, ybz = load_svmlight_file(tmp.name)
|
| 166 |
+
# because we "close" it manually and write to it,
|
| 167 |
+
# we need to remove it manually.
|
| 168 |
+
os.remove(tmp.name)
|
| 169 |
+
assert_array_almost_equal(X.toarray(), Xbz.toarray())
|
| 170 |
+
assert_array_almost_equal(y, ybz)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def test_load_invalid_file():
|
| 174 |
+
with pytest.raises(ValueError):
|
| 175 |
+
_load_svmlight_local_test_file(invalidfile)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def test_load_invalid_order_file():
|
| 179 |
+
with pytest.raises(ValueError):
|
| 180 |
+
_load_svmlight_local_test_file(invalidfile2)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def test_load_zero_based():
|
| 184 |
+
f = BytesIO(b"-1 4:1.\n1 0:1\n")
|
| 185 |
+
with pytest.raises(ValueError):
|
| 186 |
+
load_svmlight_file(f, zero_based=False)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def test_load_zero_based_auto():
|
| 190 |
+
data1 = b"-1 1:1 2:2 3:3\n"
|
| 191 |
+
data2 = b"-1 0:0 1:1\n"
|
| 192 |
+
|
| 193 |
+
f1 = BytesIO(data1)
|
| 194 |
+
X, y = load_svmlight_file(f1, zero_based="auto")
|
| 195 |
+
assert X.shape == (1, 3)
|
| 196 |
+
|
| 197 |
+
f1 = BytesIO(data1)
|
| 198 |
+
f2 = BytesIO(data2)
|
| 199 |
+
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
|
| 200 |
+
assert X1.shape == (1, 4)
|
| 201 |
+
assert X2.shape == (1, 4)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def test_load_with_qid():
|
| 205 |
+
# load svmfile with qid attribute
|
| 206 |
+
data = b"""
|
| 207 |
+
3 qid:1 1:0.53 2:0.12
|
| 208 |
+
2 qid:1 1:0.13 2:0.1
|
| 209 |
+
7 qid:2 1:0.87 2:0.12"""
|
| 210 |
+
X, y = load_svmlight_file(BytesIO(data), query_id=False)
|
| 211 |
+
assert_array_equal(y, [3, 2, 7])
|
| 212 |
+
assert_array_equal(X.toarray(), [[0.53, 0.12], [0.13, 0.1], [0.87, 0.12]])
|
| 213 |
+
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
|
| 214 |
+
res2 = load_svmlight_file(BytesIO(data), query_id=True)
|
| 215 |
+
for X, y, qid in (res1, res2):
|
| 216 |
+
assert_array_equal(y, [3, 2, 7])
|
| 217 |
+
assert_array_equal(qid, [1, 1, 2])
|
| 218 |
+
assert_array_equal(X.toarray(), [[0.53, 0.12], [0.13, 0.1], [0.87, 0.12]])
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
@pytest.mark.skip(
|
| 222 |
+
"testing the overflow of 32 bit sparse indexing requires a large amount of memory"
|
| 223 |
+
)
|
| 224 |
+
def test_load_large_qid():
|
| 225 |
+
"""
|
| 226 |
+
load large libsvm / svmlight file with qid attribute. Tests 64-bit query ID
|
| 227 |
+
"""
|
| 228 |
+
data = b"\n".join(
|
| 229 |
+
(
|
| 230 |
+
"3 qid:{0} 1:0.53 2:0.12\n2 qid:{0} 1:0.13 2:0.1".format(i).encode()
|
| 231 |
+
for i in range(1, 40 * 1000 * 1000)
|
| 232 |
+
)
|
| 233 |
+
)
|
| 234 |
+
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
|
| 235 |
+
assert_array_equal(y[-4:], [3, 2, 3, 2])
|
| 236 |
+
assert_array_equal(np.unique(qid), np.arange(1, 40 * 1000 * 1000))
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def test_load_invalid_file2():
|
| 240 |
+
with pytest.raises(ValueError):
|
| 241 |
+
data_path = _svmlight_local_test_file_path(datafile)
|
| 242 |
+
invalid_path = _svmlight_local_test_file_path(invalidfile)
|
| 243 |
+
load_svmlight_files([str(data_path), str(invalid_path), str(data_path)])
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def test_not_a_filename():
|
| 247 |
+
# in python 3 integers are valid file opening arguments (taken as unix
|
| 248 |
+
# file descriptors)
|
| 249 |
+
with pytest.raises(TypeError):
|
| 250 |
+
load_svmlight_file(0.42)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def test_invalid_filename():
|
| 254 |
+
with pytest.raises(OSError):
|
| 255 |
+
load_svmlight_file("trou pic nic douille")
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
| 259 |
+
def test_dump(csr_container):
|
| 260 |
+
X_sparse, y_dense = _load_svmlight_local_test_file(datafile)
|
| 261 |
+
X_dense = X_sparse.toarray()
|
| 262 |
+
y_sparse = csr_container(np.atleast_2d(y_dense))
|
| 263 |
+
|
| 264 |
+
# slicing a csr_matrix can unsort its .indices, so test that we sort
|
| 265 |
+
# those correctly
|
| 266 |
+
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
|
| 267 |
+
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
|
| 268 |
+
|
| 269 |
+
for X in (X_sparse, X_dense, X_sliced):
|
| 270 |
+
for y in (y_sparse, y_dense, y_sliced):
|
| 271 |
+
for zero_based in (True, False):
|
| 272 |
+
for dtype in [np.float32, np.float64, np.int32, np.int64]:
|
| 273 |
+
f = BytesIO()
|
| 274 |
+
# we need to pass a comment to get the version info in;
|
| 275 |
+
# LibSVM doesn't grok comments so they're not put in by
|
| 276 |
+
# default anymore.
|
| 277 |
+
|
| 278 |
+
if sp.issparse(y) and y.shape[0] == 1:
|
| 279 |
+
# make sure y's shape is: (n_samples, n_labels)
|
| 280 |
+
# when it is sparse
|
| 281 |
+
y = y.T
|
| 282 |
+
|
| 283 |
+
# Note: with dtype=np.int32 we are performing unsafe casts,
|
| 284 |
+
# where X.astype(dtype) overflows. The result is
|
| 285 |
+
# then platform dependent and X_dense.astype(dtype) may be
|
| 286 |
+
# different from X_sparse.astype(dtype).asarray().
|
| 287 |
+
X_input = X.astype(dtype)
|
| 288 |
+
|
| 289 |
+
dump_svmlight_file(
|
| 290 |
+
X_input, y, f, comment="test", zero_based=zero_based
|
| 291 |
+
)
|
| 292 |
+
f.seek(0)
|
| 293 |
+
|
| 294 |
+
comment = f.readline()
|
| 295 |
+
comment = str(comment, "utf-8")
|
| 296 |
+
|
| 297 |
+
assert "scikit-learn %s" % sklearn.__version__ in comment
|
| 298 |
+
|
| 299 |
+
comment = f.readline()
|
| 300 |
+
comment = str(comment, "utf-8")
|
| 301 |
+
|
| 302 |
+
assert ["one", "zero"][zero_based] + "-based" in comment
|
| 303 |
+
|
| 304 |
+
X2, y2 = load_svmlight_file(f, dtype=dtype, zero_based=zero_based)
|
| 305 |
+
assert X2.dtype == dtype
|
| 306 |
+
assert_array_equal(X2.sorted_indices().indices, X2.indices)
|
| 307 |
+
|
| 308 |
+
X2_dense = X2.toarray()
|
| 309 |
+
if sp.issparse(X_input):
|
| 310 |
+
X_input_dense = X_input.toarray()
|
| 311 |
+
else:
|
| 312 |
+
X_input_dense = X_input
|
| 313 |
+
|
| 314 |
+
if dtype == np.float32:
|
| 315 |
+
# allow a rounding error at the last decimal place
|
| 316 |
+
assert_array_almost_equal(X_input_dense, X2_dense, 4)
|
| 317 |
+
assert_array_almost_equal(
|
| 318 |
+
y_dense.astype(dtype, copy=False), y2, 4
|
| 319 |
+
)
|
| 320 |
+
else:
|
| 321 |
+
# allow a rounding error at the last decimal place
|
| 322 |
+
assert_array_almost_equal(X_input_dense, X2_dense, 15)
|
| 323 |
+
assert_array_almost_equal(
|
| 324 |
+
y_dense.astype(dtype, copy=False), y2, 15
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
| 329 |
+
def test_dump_multilabel(csr_container):
|
| 330 |
+
X = [[1, 0, 3, 0, 5], [0, 0, 0, 0, 0], [0, 5, 0, 1, 0]]
|
| 331 |
+
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
|
| 332 |
+
y_sparse = csr_container(y_dense)
|
| 333 |
+
for y in [y_dense, y_sparse]:
|
| 334 |
+
f = BytesIO()
|
| 335 |
+
dump_svmlight_file(X, y, f, multilabel=True)
|
| 336 |
+
f.seek(0)
|
| 337 |
+
# make sure it dumps multilabel correctly
|
| 338 |
+
assert f.readline() == b"1 0:1 2:3 4:5\n"
|
| 339 |
+
assert f.readline() == b"0,2 \n"
|
| 340 |
+
assert f.readline() == b"0,1 1:5 3:1\n"
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def test_dump_concise():
|
| 344 |
+
one = 1
|
| 345 |
+
two = 2.1
|
| 346 |
+
three = 3.01
|
| 347 |
+
exact = 1.000000000000001
|
| 348 |
+
# loses the last decimal place
|
| 349 |
+
almost = 1.0000000000000001
|
| 350 |
+
X = [
|
| 351 |
+
[one, two, three, exact, almost],
|
| 352 |
+
[1e9, 2e18, 3e27, 0, 0],
|
| 353 |
+
[0, 0, 0, 0, 0],
|
| 354 |
+
[0, 0, 0, 0, 0],
|
| 355 |
+
[0, 0, 0, 0, 0],
|
| 356 |
+
]
|
| 357 |
+
y = [one, two, three, exact, almost]
|
| 358 |
+
f = BytesIO()
|
| 359 |
+
dump_svmlight_file(X, y, f)
|
| 360 |
+
f.seek(0)
|
| 361 |
+
# make sure it's using the most concise format possible
|
| 362 |
+
assert f.readline() == b"1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"
|
| 363 |
+
assert f.readline() == b"2.1 0:1000000000 1:2e+18 2:3e+27\n"
|
| 364 |
+
assert f.readline() == b"3.01 \n"
|
| 365 |
+
assert f.readline() == b"1.000000000000001 \n"
|
| 366 |
+
assert f.readline() == b"1 \n"
|
| 367 |
+
f.seek(0)
|
| 368 |
+
# make sure it's correct too :)
|
| 369 |
+
X2, y2 = load_svmlight_file(f)
|
| 370 |
+
assert_array_almost_equal(X, X2.toarray())
|
| 371 |
+
assert_array_almost_equal(y, y2)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def test_dump_comment():
|
| 375 |
+
X, y = _load_svmlight_local_test_file(datafile)
|
| 376 |
+
X = X.toarray()
|
| 377 |
+
|
| 378 |
+
f = BytesIO()
|
| 379 |
+
ascii_comment = "This is a comment\nspanning multiple lines."
|
| 380 |
+
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
|
| 381 |
+
f.seek(0)
|
| 382 |
+
|
| 383 |
+
X2, y2 = load_svmlight_file(f, zero_based=False)
|
| 384 |
+
assert_array_almost_equal(X, X2.toarray())
|
| 385 |
+
assert_array_almost_equal(y, y2)
|
| 386 |
+
|
| 387 |
+
# XXX we have to update this to support Python 3.x
|
| 388 |
+
utf8_comment = b"It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc"
|
| 389 |
+
f = BytesIO()
|
| 390 |
+
with pytest.raises(UnicodeDecodeError):
|
| 391 |
+
dump_svmlight_file(X, y, f, comment=utf8_comment)
|
| 392 |
+
|
| 393 |
+
unicode_comment = utf8_comment.decode("utf-8")
|
| 394 |
+
f = BytesIO()
|
| 395 |
+
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
|
| 396 |
+
f.seek(0)
|
| 397 |
+
|
| 398 |
+
X2, y2 = load_svmlight_file(f, zero_based=False)
|
| 399 |
+
assert_array_almost_equal(X, X2.toarray())
|
| 400 |
+
assert_array_almost_equal(y, y2)
|
| 401 |
+
|
| 402 |
+
f = BytesIO()
|
| 403 |
+
with pytest.raises(ValueError):
|
| 404 |
+
dump_svmlight_file(X, y, f, comment="I've got a \0.")
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def test_dump_invalid():
|
| 408 |
+
X, y = _load_svmlight_local_test_file(datafile)
|
| 409 |
+
|
| 410 |
+
f = BytesIO()
|
| 411 |
+
y2d = [y]
|
| 412 |
+
with pytest.raises(ValueError):
|
| 413 |
+
dump_svmlight_file(X, y2d, f)
|
| 414 |
+
|
| 415 |
+
f = BytesIO()
|
| 416 |
+
with pytest.raises(ValueError):
|
| 417 |
+
dump_svmlight_file(X, y[:-1], f)
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def test_dump_query_id():
|
| 421 |
+
# test dumping a file with query_id
|
| 422 |
+
X, y = _load_svmlight_local_test_file(datafile)
|
| 423 |
+
X = X.toarray()
|
| 424 |
+
query_id = np.arange(X.shape[0]) // 2
|
| 425 |
+
f = BytesIO()
|
| 426 |
+
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
|
| 427 |
+
|
| 428 |
+
f.seek(0)
|
| 429 |
+
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
|
| 430 |
+
assert_array_almost_equal(X, X1.toarray())
|
| 431 |
+
assert_array_almost_equal(y, y1)
|
| 432 |
+
assert_array_almost_equal(query_id, query_id1)
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def test_load_with_long_qid():
|
| 436 |
+
# load svmfile with longint qid attribute
|
| 437 |
+
data = b"""
|
| 438 |
+
1 qid:0 0:1 1:2 2:3
|
| 439 |
+
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
|
| 440 |
+
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
|
| 441 |
+
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985"""
|
| 442 |
+
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
|
| 443 |
+
|
| 444 |
+
true_X = [
|
| 445 |
+
[1, 2, 3],
|
| 446 |
+
[1440446648, 72048431380967004, 236784985],
|
| 447 |
+
[1440446648, 72048431380967004, 236784985],
|
| 448 |
+
[1440446648, 72048431380967004, 236784985],
|
| 449 |
+
]
|
| 450 |
+
|
| 451 |
+
true_y = [1, 0, 0, 3]
|
| 452 |
+
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
|
| 453 |
+
assert_array_equal(y, true_y)
|
| 454 |
+
assert_array_equal(X.toarray(), true_X)
|
| 455 |
+
assert_array_equal(qid, trueQID)
|
| 456 |
+
|
| 457 |
+
f = BytesIO()
|
| 458 |
+
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
|
| 459 |
+
f.seek(0)
|
| 460 |
+
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
|
| 461 |
+
assert_array_equal(y, true_y)
|
| 462 |
+
assert_array_equal(X.toarray(), true_X)
|
| 463 |
+
assert_array_equal(qid, trueQID)
|
| 464 |
+
|
| 465 |
+
f.seek(0)
|
| 466 |
+
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
|
| 467 |
+
assert_array_equal(y, true_y)
|
| 468 |
+
assert_array_equal(X.toarray(), true_X)
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
| 472 |
+
def test_load_zeros(csr_container):
|
| 473 |
+
f = BytesIO()
|
| 474 |
+
true_X = csr_container(np.zeros(shape=(3, 4)))
|
| 475 |
+
true_y = np.array([0, 1, 0])
|
| 476 |
+
dump_svmlight_file(true_X, true_y, f)
|
| 477 |
+
|
| 478 |
+
for zero_based in ["auto", True, False]:
|
| 479 |
+
f.seek(0)
|
| 480 |
+
X, y = load_svmlight_file(f, n_features=4, zero_based=zero_based)
|
| 481 |
+
assert_array_almost_equal(y, true_y)
|
| 482 |
+
assert_array_almost_equal(X.toarray(), true_X.toarray())
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
@pytest.mark.parametrize("sparsity", [0, 0.1, 0.5, 0.99, 1])
|
| 486 |
+
@pytest.mark.parametrize("n_samples", [13, 101])
|
| 487 |
+
@pytest.mark.parametrize("n_features", [2, 7, 41])
|
| 488 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
| 489 |
+
def test_load_with_offsets(sparsity, n_samples, n_features, csr_container):
|
| 490 |
+
rng = np.random.RandomState(0)
|
| 491 |
+
X = rng.uniform(low=0.0, high=1.0, size=(n_samples, n_features))
|
| 492 |
+
if sparsity:
|
| 493 |
+
X[X < sparsity] = 0.0
|
| 494 |
+
X = csr_container(X)
|
| 495 |
+
y = rng.randint(low=0, high=2, size=n_samples)
|
| 496 |
+
|
| 497 |
+
f = BytesIO()
|
| 498 |
+
dump_svmlight_file(X, y, f)
|
| 499 |
+
f.seek(0)
|
| 500 |
+
|
| 501 |
+
size = len(f.getvalue())
|
| 502 |
+
|
| 503 |
+
# put some marks that are likely to happen anywhere in a row
|
| 504 |
+
mark_0 = 0
|
| 505 |
+
mark_1 = size // 3
|
| 506 |
+
length_0 = mark_1 - mark_0
|
| 507 |
+
mark_2 = 4 * size // 5
|
| 508 |
+
length_1 = mark_2 - mark_1
|
| 509 |
+
|
| 510 |
+
# load the original sparse matrix into 3 independent CSR matrices
|
| 511 |
+
X_0, y_0 = load_svmlight_file(
|
| 512 |
+
f, n_features=n_features, offset=mark_0, length=length_0
|
| 513 |
+
)
|
| 514 |
+
X_1, y_1 = load_svmlight_file(
|
| 515 |
+
f, n_features=n_features, offset=mark_1, length=length_1
|
| 516 |
+
)
|
| 517 |
+
X_2, y_2 = load_svmlight_file(f, n_features=n_features, offset=mark_2)
|
| 518 |
+
|
| 519 |
+
y_concat = np.concatenate([y_0, y_1, y_2])
|
| 520 |
+
X_concat = sp.vstack([X_0, X_1, X_2])
|
| 521 |
+
assert_array_almost_equal(y, y_concat)
|
| 522 |
+
assert_array_almost_equal(X.toarray(), X_concat.toarray())
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
| 526 |
+
def test_load_offset_exhaustive_splits(csr_container):
|
| 527 |
+
rng = np.random.RandomState(0)
|
| 528 |
+
X = np.array(
|
| 529 |
+
[
|
| 530 |
+
[0, 0, 0, 0, 0, 0],
|
| 531 |
+
[1, 2, 3, 4, 0, 6],
|
| 532 |
+
[1, 2, 3, 4, 0, 6],
|
| 533 |
+
[0, 0, 0, 0, 0, 0],
|
| 534 |
+
[1, 0, 3, 0, 0, 0],
|
| 535 |
+
[0, 0, 0, 0, 0, 1],
|
| 536 |
+
[1, 0, 0, 0, 0, 0],
|
| 537 |
+
]
|
| 538 |
+
)
|
| 539 |
+
X = csr_container(X)
|
| 540 |
+
n_samples, n_features = X.shape
|
| 541 |
+
y = rng.randint(low=0, high=2, size=n_samples)
|
| 542 |
+
query_id = np.arange(n_samples) // 2
|
| 543 |
+
|
| 544 |
+
f = BytesIO()
|
| 545 |
+
dump_svmlight_file(X, y, f, query_id=query_id)
|
| 546 |
+
f.seek(0)
|
| 547 |
+
|
| 548 |
+
size = len(f.getvalue())
|
| 549 |
+
|
| 550 |
+
# load the same data in 2 parts with all the possible byte offsets to
|
| 551 |
+
# locate the split so has to test for particular boundary cases
|
| 552 |
+
for mark in range(size):
|
| 553 |
+
f.seek(0)
|
| 554 |
+
X_0, y_0, q_0 = load_svmlight_file(
|
| 555 |
+
f, n_features=n_features, query_id=True, offset=0, length=mark
|
| 556 |
+
)
|
| 557 |
+
X_1, y_1, q_1 = load_svmlight_file(
|
| 558 |
+
f, n_features=n_features, query_id=True, offset=mark, length=-1
|
| 559 |
+
)
|
| 560 |
+
q_concat = np.concatenate([q_0, q_1])
|
| 561 |
+
y_concat = np.concatenate([y_0, y_1])
|
| 562 |
+
X_concat = sp.vstack([X_0, X_1])
|
| 563 |
+
assert_array_almost_equal(y, y_concat)
|
| 564 |
+
assert_array_equal(query_id, q_concat)
|
| 565 |
+
assert_array_almost_equal(X.toarray(), X_concat.toarray())
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def test_load_with_offsets_error():
|
| 569 |
+
with pytest.raises(ValueError, match="n_features is required"):
|
| 570 |
+
_load_svmlight_local_test_file(datafile, offset=3, length=3)
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
|
| 574 |
+
def test_multilabel_y_explicit_zeros(tmp_path, csr_container):
|
| 575 |
+
"""
|
| 576 |
+
Ensure that if y contains explicit zeros (i.e. elements of y.data equal to
|
| 577 |
+
0) then those explicit zeros are not encoded.
|
| 578 |
+
"""
|
| 579 |
+
save_path = str(tmp_path / "svm_explicit_zero")
|
| 580 |
+
rng = np.random.RandomState(42)
|
| 581 |
+
X = rng.randn(3, 5).astype(np.float64)
|
| 582 |
+
indptr = np.array([0, 2, 3, 6])
|
| 583 |
+
indices = np.array([0, 2, 2, 0, 1, 2])
|
| 584 |
+
# The first and last element are explicit zeros.
|
| 585 |
+
data = np.array([0, 1, 1, 1, 1, 0])
|
| 586 |
+
y = csr_container((data, indices, indptr), shape=(3, 3))
|
| 587 |
+
# y as a dense array would look like
|
| 588 |
+
# [[0, 0, 1],
|
| 589 |
+
# [0, 0, 1],
|
| 590 |
+
# [1, 1, 0]]
|
| 591 |
+
|
| 592 |
+
dump_svmlight_file(X, y, save_path, multilabel=True)
|
| 593 |
+
|
| 594 |
+
_, y_load = load_svmlight_file(save_path, multilabel=True)
|
| 595 |
+
y_true = [(2.0,), (2.0,), (0.0, 1.0)]
|
| 596 |
+
assert y_load == y_true
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
def test_dump_read_only(tmp_path):
|
| 600 |
+
"""Ensure that there is no ValueError when dumping a read-only `X`.
|
| 601 |
+
|
| 602 |
+
Non-regression test for:
|
| 603 |
+
https://github.com/scikit-learn/scikit-learn/issues/28026
|
| 604 |
+
"""
|
| 605 |
+
rng = np.random.RandomState(42)
|
| 606 |
+
X = rng.randn(5, 2)
|
| 607 |
+
y = rng.randn(5)
|
| 608 |
+
|
| 609 |
+
# Convert to memmap-backed which are read-only
|
| 610 |
+
X, y = create_memmap_backed_data([X, y])
|
| 611 |
+
|
| 612 |
+
save_path = str(tmp_path / "svm_read_only")
|
| 613 |
+
dump_svmlight_file(X, y, save_path)
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _autocast_to_reduced_precision {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, bool, bool, at::ScalarType, at::ScalarType);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
static constexpr const char* name = "aten::_autocast_to_reduced_precision";
|
| 22 |
+
static constexpr const char* overload_name = "";
|
| 23 |
+
static constexpr const char* schema_str = "_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)";
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat_native.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _chunk_cat(at::TensorList tensors, int64_t dim, int64_t num_chunks);
|
| 20 |
+
TORCH_API at::Tensor & _chunk_cat_out(at::TensorList tensors, int64_t dim, int64_t num_chunks, at::Tensor & out);
|
| 21 |
+
TORCH_API at::Tensor _chunk_cat_cuda(at::TensorList tensors, int64_t dim, int64_t num_chunks);
|
| 22 |
+
TORCH_API at::Tensor & _chunk_cat_out_cuda(at::TensorList tensors, int64_t dim, int64_t num_chunks, at::Tensor & out);
|
| 23 |
+
} // namespace native
|
| 24 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_dirichlet_grad_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _dirichlet_grad {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
static constexpr const char* name = "aten::_dirichlet_grad";
|
| 22 |
+
static constexpr const char* overload_name = "";
|
| 23 |
+
static constexpr const char* schema_str = "_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor";
|
| 24 |
+
static at::Tensor call(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _dirichlet_grad_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
static constexpr const char* name = "aten::_dirichlet_grad";
|
| 33 |
+
static constexpr const char* overload_name = "out";
|
| 34 |
+
static constexpr const char* schema_str = "_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)";
|
| 35 |
+
static at::Tensor & call(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const ::std::optional<at::Tensor> & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1);
|
| 21 |
+
|
| 22 |
+
} // namespace cuda
|
| 23 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_cos(at::TensorList self);
|
| 21 |
+
TORCH_API void _foreach_cos_out(at::TensorList out, at::TensorList self);
|
| 22 |
+
TORCH_API void _foreach_cos_outf(at::TensorList self, at::TensorList out);
|
| 23 |
+
TORCH_API void _foreach_cos_(at::TensorList self);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeexplicitautograd
|
| 26 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _foreach_log2 {
|
| 18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
static constexpr const char* name = "aten::_foreach_log2";
|
| 22 |
+
static constexpr const char* overload_name = "";
|
| 23 |
+
static constexpr const char* schema_str = "_foreach_log2(Tensor[] self) -> Tensor[]";
|
| 24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self);
|
| 25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _foreach_log2_ {
|
| 29 |
+
using schema = void (at::TensorList);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
static constexpr const char* name = "aten::_foreach_log2_";
|
| 33 |
+
static constexpr const char* overload_name = "";
|
| 34 |
+
static constexpr const char* schema_str = "_foreach_log2_(Tensor(a!)[] self) -> ()";
|
| 35 |
+
static void call(at::TensorList self);
|
| 36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API _foreach_log2_out {
|
| 40 |
+
using schema = void (at::TensorList, at::TensorList);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
static constexpr const char* name = "aten::_foreach_log2";
|
| 44 |
+
static constexpr const char* overload_name = "out";
|
| 45 |
+
static constexpr const char* schema_str = "_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()";
|
| 46 |
+
static void call(at::TensorList self, at::TensorList out);
|
| 47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _foreach_sqrt {
|
| 18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
static constexpr const char* name = "aten::_foreach_sqrt";
|
| 22 |
+
static constexpr const char* overload_name = "";
|
| 23 |
+
static constexpr const char* schema_str = "_foreach_sqrt(Tensor[] self) -> Tensor[]";
|
| 24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self);
|
| 25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _foreach_sqrt_ {
|
| 29 |
+
using schema = void (at::TensorList);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
static constexpr const char* name = "aten::_foreach_sqrt_";
|
| 33 |
+
static constexpr const char* overload_name = "";
|
| 34 |
+
static constexpr const char* schema_str = "_foreach_sqrt_(Tensor(a!)[] self) -> ()";
|
| 35 |
+
static void call(at::TensorList self);
|
| 36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API _foreach_sqrt_out {
|
| 40 |
+
using schema = void (at::TensorList, at::TensorList);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
static constexpr const char* name = "aten::_foreach_sqrt";
|
| 44 |
+
static constexpr const char* overload_name = "out";
|
| 45 |
+
static constexpr const char* schema_str = "_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()";
|
| 46 |
+
static void call(at::TensorList self, at::TensorList out);
|
| 47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_tanh_native.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_tanh_slow(at::TensorList self);
|
| 20 |
+
TORCH_API void _foreach_tanh_out(at::TensorList self, at::TensorList out);
|
| 21 |
+
TORCH_API void foreach_tensor_tanh_slow_(at::TensorList self);
|
| 22 |
+
TORCH_API ::std::vector<at::Tensor> foreach_tensor_tanh_cuda(at::TensorList self);
|
| 23 |
+
TORCH_API void foreach_tensor_tanh_cuda_(at::TensorList self);
|
| 24 |
+
} // namespace native
|
| 25 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_scalar_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _functional_assert_scalar(const at::Scalar & self, c10::string_view assert_msg, const at::Tensor & dep_token);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_native.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API int64_t _fused_sdp_choice_cpp(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool is_causal=false, ::std::optional<double> scale=::std::nullopt, bool enable_gqa=false);
|
| 20 |
+
TORCH_API int64_t _fused_sdp_choice_cuda(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool is_causal=false, ::std::optional<double> scale=::std::nullopt, bool enable_gqa=false);
|
| 21 |
+
TORCH_API int64_t _fused_sdp_choice_meta(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool is_causal=false, ::std::optional<double> scale=::std::nullopt, bool enable_gqa=false);
|
| 22 |
+
} // namespace native
|
| 23 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={});
|
| 21 |
+
TORCH_API at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeimplicitautograd
|
| 24 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_semi_structured_linear_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _sparse_semi_structured_linear {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional<at::Tensor> &, ::std::optional<c10::string_view>, ::std::optional<at::ScalarType>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
static constexpr const char* name = "aten::_sparse_semi_structured_linear";
|
| 22 |
+
static constexpr const char* overload_name = "";
|
| 23 |
+
static constexpr const char* schema_str = "_sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor";
|
| 24 |
+
static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const ::std::optional<at::Tensor> & bias, ::std::optional<c10::string_view> activation, ::std::optional<at::ScalarType> out_dtype);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const ::std::optional<at::Tensor> & bias, ::std::optional<c10::string_view> activation, ::std::optional<at::ScalarType> out_dtype);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_filled_intlist_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _test_optional_filled_intlist_out(at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends);
|
| 21 |
+
TORCH_API at::Tensor & _test_optional_filled_intlist_outf(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)
|
| 26 |
+
inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl(const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
|
| 27 |
+
return at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
|
| 31 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
|
| 32 |
+
return at::_ops::_thnn_fused_lstm_cell_backward_impl_out::call(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2);
|
| 33 |
+
}
|
| 34 |
+
// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
|
| 35 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _thnn_fused_lstm_cell_backward_impl_outf(const ::std::optional<at::Tensor> & grad_hy, const ::std::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
|
| 36 |
+
return at::_ops::_thnn_fused_lstm_cell_backward_impl_out::call(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_native.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <ATen/ops/_upsample_nearest_exact3d_backward_meta.h>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
struct TORCH_API structured__upsample_nearest_exact3d_backward_out_cpu : public at::meta::structured__upsample_nearest_exact3d_backward {
|
| 20 |
+
void impl(const at::Tensor & grad_output, at::ArrayRef<int64_t> output_size, at::ArrayRef<int64_t> input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, const at::Tensor & grad_input);
|
| 21 |
+
};
|
| 22 |
+
struct TORCH_API structured__upsample_nearest_exact3d_backward_out_cuda : public at::meta::structured__upsample_nearest_exact3d_backward {
|
| 23 |
+
void impl(const at::Tensor & grad_output, at::ArrayRef<int64_t> output_size, at::ArrayRef<int64_t> input_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, const at::Tensor & grad_input);
|
| 24 |
+
};
|
| 25 |
+
} // namespace native
|
| 26 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_meta_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d=::std::nullopt, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 21 |
+
TORCH_API at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d=::std::nullopt, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & _upsample_nearest_exact3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d=::std::nullopt, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & _upsample_nearest_exact3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d=::std::nullopt, ::std::optional<double> scales_h=::std::nullopt, ::std::optional<double> scales_w=::std::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & _upsample_nearest_exact3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace meta
|
| 28 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::vector<at::Tensor> align_tensors(at::TensorList tensors);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_meta_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, ::std::optional<int64_t> divisor_override=::std::nullopt);
|
| 21 |
+
TORCH_API at::Tensor & avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, ::std::optional<int64_t> divisor_override=::std::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional<int64_t> divisor_override, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace meta
|
| 25 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, bool update, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reserve);
|
| 21 |
+
|
| 22 |
+
} // namespace cuda
|
| 23 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/binary_cross_entropy_with_logits_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor
|
| 26 |
+
inline at::Tensor binary_cross_entropy_with_logits(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight={}, const ::std::optional<at::Tensor> & pos_weight={}, int64_t reduction=at::Reduction::Mean) {
|
| 27 |
+
return at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & binary_cross_entropy_with_logits_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight={}, const ::std::optional<at::Tensor> & pos_weight={}, int64_t reduction=at::Reduction::Mean) {
|
| 32 |
+
return at::_ops::binary_cross_entropy_with_logits_out::call(self, target, weight, pos_weight, reduction, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & binary_cross_entropy_with_logits_outf(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out) {
|
| 36 |
+
return at::_ops::binary_cross_entropy_with_logits_out::call(self, target, weight, pos_weight, reduction, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_dense_backward_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & embedding_dense_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
|
| 21 |
+
TORCH_API at::Tensor & embedding_dense_backward_outf(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor & out);
|
| 22 |
+
TORCH_API at::Tensor & embedding_dense_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq);
|
| 23 |
+
TORCH_API at::Tensor & embedding_dense_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, at::Tensor & out);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeexplicitautograd
|
| 26 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_outf(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional<c10::string_view> norm=::std::nullopt);
|
| 21 |
+
TORCH_API at::Tensor fft_fftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional<c10::string_view> norm=::std::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional<c10::string_view> norm=::std::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & fft_fftn_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional<c10::string_view> norm=::std::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & fft_fftn_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace compositeimplicitautograd
|
| 28 |
+
} // namespace at
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API fft_hfft {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, ::std::optional<c10::SymInt>, int64_t, ::std::optional<c10::string_view>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
static constexpr const char* name = "aten::fft_hfft";
|
| 22 |
+
static constexpr const char* overload_name = "";
|
| 23 |
+
static constexpr const char* schema_str = "fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor";
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API fft_hfft_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, ::std::optional<c10::SymInt>, int64_t, ::std::optional<c10::string_view>, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
static constexpr const char* name = "aten::fft_hfft";
|
| 33 |
+
static constexpr const char* overload_name = "out";
|
| 34 |
+
static constexpr const char* schema_str = "fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)";
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
phi4/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor fft_ifft_symint(const at::Tensor & self, ::std::optional<c10::SymInt> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt);
|
| 20 |
+
TORCH_API at::Tensor & fft_ifft_symint_out(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|