repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_lfw.py | sklearn/datasets/tests/test_lfw.py | """This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
from functools import partial
import numpy as np
import pytest
from sklearn.datasets import fetch_lfw_pairs, fetch_lfw_people
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.utils._testing import assert_array_equal
FAKE_NAMES = [
"Abdelatif_Smith",
"Abhati_Kepler",
"Camara_Alvaro",
"Chen_Dupont",
"John_Lee",
"Lin_Bauman",
"Onur_Lopez",
]
@pytest.fixture(scope="module")
def mock_empty_data_home(tmp_path_factory):
data_dir = tmp_path_factory.mktemp("scikit_learn_empty_test")
yield data_dir
@pytest.fixture(scope="module")
def mock_data_home(tmp_path_factory):
"""Test fixture run once and common to all tests of this module"""
Image = pytest.importorskip("PIL.Image")
data_dir = tmp_path_factory.mktemp("scikit_learn_lfw_test")
lfw_home = data_dir / "lfw_home"
lfw_home.mkdir(parents=True, exist_ok=True)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = lfw_home / "lfw_funneled" / name
folder_name.mkdir(parents=True, exist_ok=True)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = folder_name / (name + "_%04d.jpg" % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
img = Image.fromarray(uniface.astype(np.uint8))
img.save(file_path)
# add some random file pollution to test robustness
(lfw_home / "lfw_funneled" / ".test.swp").write_bytes(
b"Text file to be ignored by the dataset loader."
)
# generate some pairing metadata files using the same format as LFW
with open(lfw_home / "pairsDevTrain.txt", "wb") as f:
f.write(b"10\n")
more_than_two = [name for name, count in counts.items() if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(("%s\t%d\t%d\n" % (name, first, second)).encode())
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = np_rng.choice(np.arange(counts[first_name]))
second_index = np_rng.choice(np.arange(counts[second_name]))
f.write(
(
"%s\t%d\t%s\t%d\n"
% (first_name, first_index, second_name, second_index)
).encode()
)
(lfw_home / "pairsDevTest.txt").write_bytes(
b"Fake place holder that won't be tested"
)
(lfw_home / "pairs.txt").write_bytes(b"Fake place holder that won't be tested")
yield data_dir
def test_load_empty_lfw_people(mock_empty_data_home):
with pytest.raises(OSError):
fetch_lfw_people(data_home=mock_empty_data_home, download_if_missing=False)
def test_load_fake_lfw_people(mock_data_home):
lfw_people = fetch_lfw_people(
data_home=mock_data_home, min_faces_per_person=3, download_if_missing=False
)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert lfw_people.images.shape == (10, 62, 47)
assert lfw_people.data.shape == (10, 2914)
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ["Abdelatif Smith", "Abhati Kepler", "Onur Lopez"]
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(
data_home=mock_data_home,
resize=None,
slice_=None,
color=True,
download_if_missing=False,
)
assert lfw_people.images.shape == (17, 250, 250, 3)
assert lfw_people.DESCR.startswith(".. _labeled_faces_in_the_wild_dataset:")
# the ids and class names are the same as previously
assert_array_equal(
lfw_people.target, [0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2]
)
assert_array_equal(
lfw_people.target_names,
[
"Abdelatif Smith",
"Abhati Kepler",
"Camara Alvaro",
"Chen Dupont",
"John Lee",
"Lin Bauman",
"Onur Lopez",
],
)
# test return_X_y option
fetch_func = partial(
fetch_lfw_people,
data_home=mock_data_home,
resize=None,
slice_=None,
color=True,
download_if_missing=False,
)
check_return_X_y(lfw_people, fetch_func)
def test_load_fake_lfw_people_too_restrictive(mock_data_home):
with pytest.raises(ValueError):
fetch_lfw_people(
data_home=mock_data_home,
min_faces_per_person=100,
download_if_missing=False,
)
def test_load_empty_lfw_pairs(mock_empty_data_home):
with pytest.raises(OSError):
fetch_lfw_pairs(data_home=mock_empty_data_home, download_if_missing=False)
def test_load_fake_lfw_pairs(mock_data_home):
lfw_pairs_train = fetch_lfw_pairs(
data_home=mock_data_home, download_if_missing=False
)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert lfw_pairs_train.pairs.shape == (10, 2, 62, 47)
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ["Different persons", "Same person"]
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(
data_home=mock_data_home,
resize=None,
slice_=None,
color=True,
download_if_missing=False,
)
assert lfw_pairs_train.pairs.shape == (10, 2, 250, 250, 3)
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
assert lfw_pairs_train.DESCR.startswith(".. _labeled_faces_in_the_wild_dataset:")
def test_fetch_lfw_people_internal_cropping(mock_data_home):
"""Check that we properly crop the images.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/24942
"""
# If cropping was not done properly and we don't resize the images, the images would
# have their original size (250x250) and the image would not fit in the NumPy array
# pre-allocated based on `slice_` parameter.
slice_ = (slice(70, 195), slice(78, 172))
lfw = fetch_lfw_people(
data_home=mock_data_home,
min_faces_per_person=3,
download_if_missing=False,
resize=None,
slice_=slice_,
)
assert lfw.images[0].shape == (
slice_[0].stop - slice_[0].start,
slice_[1].stop - slice_[1].start,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_olivetti_faces.py | sklearn/datasets/tests/test_olivetti_faces.py | """Test Olivetti faces fetcher, if the data is available,
or if specifically requested via environment variable
(e.g. for CI jobs)."""
import numpy as np
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.utils import Bunch
from sklearn.utils._testing import assert_array_equal
def test_olivetti_faces(fetch_olivetti_faces_fxt):
data = fetch_olivetti_faces_fxt(shuffle=True, random_state=0)
assert isinstance(data, Bunch)
for expected_keys in ("data", "images", "target", "DESCR"):
assert expected_keys in data.keys()
assert data.data.shape == (400, 4096)
assert data.images.shape == (400, 64, 64)
assert data.target.shape == (400,)
assert_array_equal(np.unique(np.sort(data.target)), np.arange(40))
assert data.DESCR.startswith(".. _olivetti_faces_dataset:")
# test the return_X_y option
check_return_X_y(data, fetch_olivetti_faces_fxt)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_california_housing.py | sklearn/datasets/tests/test_california_housing.py | """Test the california_housing loader, if the data is available,
or if specifically requested via environment variable
(e.g. for CI jobs)."""
from functools import partial
import pytest
from sklearn.datasets.tests.test_common import check_return_X_y
def test_fetch(fetch_california_housing_fxt):
data = fetch_california_housing_fxt()
assert (20640, 8) == data.data.shape
assert (20640,) == data.target.shape
assert data.DESCR.startswith(".. _california_housing_dataset:")
# test return_X_y option
fetch_func = partial(fetch_california_housing_fxt)
check_return_X_y(data, fetch_func)
def test_fetch_asframe(fetch_california_housing_fxt):
pd = pytest.importorskip("pandas")
bunch = fetch_california_housing_fxt(as_frame=True)
frame = bunch.frame
assert hasattr(bunch, "frame") is True
assert frame.shape == (20640, 9)
assert isinstance(bunch.data, pd.DataFrame)
assert isinstance(bunch.target, pd.Series)
def test_pandas_dependency_message(fetch_california_housing_fxt, hide_available_pandas):
# Check that pandas is imported lazily and that an informative error
# message is raised when pandas is missing:
expected_msg = "fetch_california_housing with as_frame=True requires pandas"
with pytest.raises(ImportError, match=expected_msg):
fetch_california_housing_fxt(as_frame=True)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_openml.py | sklearn/datasets/tests/test_openml.py | """Test the openml loader."""
import gzip
import json
import os
import re
from functools import partial
from importlib import resources
from io import BytesIO
from urllib.error import HTTPError
import numpy as np
import pytest
import scipy.sparse
import sklearn
from sklearn import config_context
from sklearn.datasets import fetch_openml as fetch_openml_orig
from sklearn.datasets._openml import (
_get_local_path,
_open_openml_url,
_retry_with_clean_cache,
)
from sklearn.utils import Bunch
from sklearn.utils._optional_dependencies import check_pandas_support
from sklearn.utils._testing import (
SkipTest,
assert_allclose,
assert_array_equal,
)
OPENML_TEST_DATA_MODULE = "sklearn.datasets.tests.data.openml"
# if True, urlopen will be monkey patched to only use local files
test_offline = True
_MONKEY_PATCH_LOCAL_OPENML_PATH = "data/v1/download/{}"
class _MockHTTPResponse:
def __init__(self, data, is_gzip):
self.data = data
self.is_gzip = is_gzip
def read(self, amt=-1):
return self.data.read(amt)
def close(self):
self.data.close()
def info(self):
if self.is_gzip:
return {"Content-Encoding": "gzip"}
return {}
def __iter__(self):
return iter(self.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
# Disable the disk-based cache when testing `fetch_openml`:
# the mock data in sklearn/datasets/tests/data/openml/ is not always consistent
# with the version on openml.org. If one were to load the dataset outside of
# the tests, it may result in data that does not represent openml.org.
fetch_openml = partial(fetch_openml_orig, data_home=None)
def _monkey_patch_webbased_functions(context, data_id, gzip_response):
# monkey patches the urlopen function. Important note: Do NOT use this
# in combination with a regular cache directory, as the files that are
# stored as cache should not be mixed up with real openml datasets
url_prefix_data_description = "https://api.openml.org/api/v1/json/data/"
url_prefix_data_features = "https://api.openml.org/api/v1/json/data/features/"
url_prefix_download_data = "https://www.openml.org/data/v1/download"
url_prefix_data_list = "https://api.openml.org/api/v1/json/data/list/"
path_suffix = ".gz"
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}"
def _file_name(url, suffix):
output = (
re.sub(r"\W", "-", url[len("https://api.openml.org/") :])
+ suffix
+ path_suffix
)
# Shorten the filenames to have better compatibility with windows 10
# and filenames > 260 characters
return (
output.replace("-json-data-list", "-jdl")
.replace("-json-data-features", "-jdf")
.replace("-json-data-qualities", "-jdq")
.replace("-json-data", "-jd")
.replace("-data_name", "-dn")
.replace("-download", "-dl")
.replace("-limit", "-l")
.replace("-data_version", "-dv")
.replace("-status", "-s")
.replace("-deactivated", "-dact")
.replace("-active", "-act")
)
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix), (
f"{expected_prefix!r} does not match {url!r}"
)
data_file_name = _file_name(url, suffix)
data_file_path = resources.files(data_module) / data_file_name
with data_file_path.open("rb") as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, "rb")
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(
url=url,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_data_description,
suffix=".json",
)
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(
url=url,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_data_features,
suffix=".json",
)
def _mock_urlopen_download_data(url, has_gzip_header):
# For simplicity the mock filenames don't contain the filename, i.e.
# the last part of the data description url after the last /.
# For example for id_1, data description download url is:
# gunzip -c sklearn/datasets/tests/data/openml/id_1/api-v1-jd-1.json.gz | grep '"url" # noqa: E501
# "https:\/\/www.openml.org\/data\/v1\/download\/1\/anneal.arff"
# but the mock filename does not contain anneal.arff and is:
# sklearn/datasets/tests/data/openml/id_1/data-v1-dl-1.arff.gz.
# We only keep the part of the url before the last /
url_without_filename = url.rsplit("/", 1)[0]
return _mock_urlopen_shared(
url=url_without_filename,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_download_data,
suffix=".arff",
)
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list), (
f"{url_prefix_data_list!r} does not match {url!r}"
)
data_file_name = _file_name(url, ".json")
data_file_path = resources.files(data_module) / data_file_name
# load the file itself, to simulate a http error
with data_file_path.open("rb") as f:
decompressed_f = read_fn(f, "rb")
decoded_s = decompressed_f.read().decode("utf-8")
json_data = json.loads(decoded_s)
if "error" in json_data:
raise HTTPError(
url=None, code=412, msg="Simulated mock error", hdrs=None, fp=BytesIO()
)
with data_file_path.open("rb") as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, "rb")
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header("Accept-encoding") == "gzip"
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError("Unknown mocking URL pattern: %s" % url)
# XXX: Global variable
if test_offline:
context.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen)
###############################################################################
# Test the behaviour of `fetch_openml` depending of the input parameters.
@pytest.mark.parametrize(
"data_id, dataset_params, n_samples, n_features, n_targets",
[
# iris
(61, {"data_id": 61}, 150, 4, 1),
(61, {"name": "iris", "version": 1}, 150, 4, 1),
# anneal
(2, {"data_id": 2}, 11, 38, 1),
(2, {"name": "anneal", "version": 1}, 11, 38, 1),
# cpu
(561, {"data_id": 561}, 209, 7, 1),
(561, {"name": "cpu", "version": 1}, 209, 7, 1),
# emotions
(40589, {"data_id": 40589}, 13, 72, 6),
# adult-census
(1119, {"data_id": 1119}, 10, 14, 1),
(1119, {"name": "adult-census"}, 10, 14, 1),
# miceprotein
(40966, {"data_id": 40966}, 7, 77, 1),
(40966, {"name": "MiceProtein"}, 7, 77, 1),
# titanic
(40945, {"data_id": 40945}, 1309, 13, 1),
],
)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_as_frame_true(
monkeypatch,
data_id,
dataset_params,
n_samples,
n_features,
n_targets,
parser,
gzip_response,
):
"""Check the behaviour of `fetch_openml` with `as_frame=True`.
Fetch by ID and/or name (depending if the file was previously cached).
"""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
bunch = fetch_openml(
as_frame=True,
cache=False,
parser=parser,
**dataset_params,
)
assert int(bunch.details["id"]) == data_id
assert isinstance(bunch, Bunch)
assert isinstance(bunch.frame, pd.DataFrame)
assert bunch.frame.shape == (n_samples, n_features + n_targets)
assert isinstance(bunch.data, pd.DataFrame)
assert bunch.data.shape == (n_samples, n_features)
if n_targets == 1:
assert isinstance(bunch.target, pd.Series)
assert bunch.target.shape == (n_samples,)
else:
assert isinstance(bunch.target, pd.DataFrame)
assert bunch.target.shape == (n_samples, n_targets)
assert bunch.categories is None
@pytest.mark.parametrize(
"data_id, dataset_params, n_samples, n_features, n_targets",
[
# iris
(61, {"data_id": 61}, 150, 4, 1),
(61, {"name": "iris", "version": 1}, 150, 4, 1),
# anneal
(2, {"data_id": 2}, 11, 38, 1),
(2, {"name": "anneal", "version": 1}, 11, 38, 1),
# cpu
(561, {"data_id": 561}, 209, 7, 1),
(561, {"name": "cpu", "version": 1}, 209, 7, 1),
# emotions
(40589, {"data_id": 40589}, 13, 72, 6),
# adult-census
(1119, {"data_id": 1119}, 10, 14, 1),
(1119, {"name": "adult-census"}, 10, 14, 1),
# miceprotein
(40966, {"data_id": 40966}, 7, 77, 1),
(40966, {"name": "MiceProtein"}, 7, 77, 1),
],
)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_as_frame_false(
monkeypatch,
data_id,
dataset_params,
n_samples,
n_features,
n_targets,
parser,
):
"""Check the behaviour of `fetch_openml` with `as_frame=False`.
Fetch both by ID and/or name + version.
"""
pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
as_frame=False,
cache=False,
parser=parser,
**dataset_params,
)
assert int(bunch.details["id"]) == data_id
assert isinstance(bunch, Bunch)
assert bunch.frame is None
assert isinstance(bunch.data, np.ndarray)
assert bunch.data.shape == (n_samples, n_features)
assert isinstance(bunch.target, np.ndarray)
if n_targets == 1:
assert bunch.target.shape == (n_samples,)
else:
assert bunch.target.shape == (n_samples, n_targets)
assert isinstance(bunch.categories, dict)
@pytest.mark.parametrize("data_id", [61, 1119, 40945])
def test_fetch_openml_consistency_parser(monkeypatch, data_id):
"""Check the consistency of the LIAC-ARFF and pandas parsers."""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch_liac = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="liac-arff",
)
bunch_pandas = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="pandas",
)
# The data frames for the input features should match up to some numerical
# dtype conversions (e.g. float64 <=> Int64) due to limitations of the
# LIAC-ARFF parser.
data_liac, data_pandas = bunch_liac.data, bunch_pandas.data
def convert_numerical_dtypes(series):
pandas_series = data_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
else:
return series
data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes)
pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas)
# Let's also check that the .frame attributes also match
frame_liac, frame_pandas = bunch_liac.frame, bunch_pandas.frame
# Note that the .frame attribute is a superset of the .data attribute:
pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas)
# However the remaining columns, typically the target(s), are not necessarily
# dtyped similarly by both parsers due to limitations of the LIAC-ARFF parser.
# Therefore, extra dtype conversions are required for those columns:
def convert_numerical_and_categorical_dtypes(series):
pandas_series = frame_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
elif isinstance(pandas_series.dtype, pd.CategoricalDtype):
# Compare categorical features by converting categorical liac uses
# strings to denote the categories, we rename the categories to make
# them comparable to the pandas parser. Fixing this behavior in
# LIAC-ARFF would allow to check the consistency in the future but
# we do not plan to maintain the LIAC-ARFF on the long term.
return series.cat.rename_categories(pandas_series.cat.categories)
else:
return series
frame_liac_with_fixed_dtypes = frame_liac.apply(
convert_numerical_and_categorical_dtypes
)
pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser):
"""Check the equivalence of the dataset when using `as_frame=False` and
`as_frame=True`.
"""
pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch_as_frame_true = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
bunch_as_frame_false = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
parser=parser,
)
assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data)
assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_iris_pandas(monkeypatch, parser):
"""Check fetching on a numerical only dataset with string labels."""
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 4)
target_shape = (150,)
frame_shape = (150, 5)
target_dtype = CategoricalDtype(
["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
)
data_dtypes = [np.float64] * 4
data_names = ["sepallength", "sepalwidth", "petallength", "petalwidth"]
target_name = "class"
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.dtype == target_dtype
assert target.shape == target_shape
assert target.name == target_name
assert target.index.is_unique
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == data_dtypes + [target_dtype])
assert frame.index.is_unique
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
@pytest.mark.parametrize("target_column", ["petalwidth", ["petalwidth", "petallength"]])
def test_fetch_openml_forcing_targets(monkeypatch, parser, target_column):
"""Check that we can force the target to not be the default target."""
pd = pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch_forcing_target = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
target_column=target_column,
parser=parser,
)
bunch_default = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
pd.testing.assert_frame_equal(bunch_forcing_target.frame, bunch_default.frame)
if isinstance(target_column, list):
pd.testing.assert_index_equal(
bunch_forcing_target.target.columns, pd.Index(target_column)
)
assert bunch_forcing_target.data.shape == (150, 3)
else:
assert bunch_forcing_target.target.name == target_column
assert bunch_forcing_target.data.shape == (150, 4)
@pytest.mark.parametrize("data_id", [61, 2, 561, 40589, 1119])
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=True`."""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
return_X_y=False,
parser=parser,
)
X, y = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
return_X_y=True,
parser=parser,
)
pd.testing.assert_frame_equal(bunch.data, X)
if isinstance(y, pd.Series):
pd.testing.assert_series_equal(bunch.target, y)
else:
pd.testing.assert_frame_equal(bunch.target, y)
@pytest.mark.parametrize("data_id", [61, 561, 40589, 1119])
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_equivalence_array_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=False`."""
pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
return_X_y=False,
parser=parser,
)
X, y = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
return_X_y=True,
parser=parser,
)
assert_array_equal(bunch.data, X)
assert_array_equal(bunch.target, y)
def test_fetch_openml_difference_parsers(monkeypatch):
"""Check the difference between liac-arff and pandas parser."""
pytest.importorskip("pandas")
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
# When `as_frame=False`, the categories will be ordinally encoded with
# liac-arff parser while this is not the case with pandas parser.
as_frame = False
bunch_liac_arff = fetch_openml(
data_id=data_id,
as_frame=as_frame,
cache=False,
parser="liac-arff",
)
bunch_pandas = fetch_openml(
data_id=data_id,
as_frame=as_frame,
cache=False,
parser="pandas",
)
assert bunch_liac_arff.data.dtype.kind == "f"
assert bunch_pandas.data.dtype == "O"
###############################################################################
# Test the ARFF parsing on several dataset to check if detect the correct
# types (categories, integers, floats).
@pytest.fixture(scope="module")
def datasets_column_names():
"""Returns the columns names for each dataset."""
return {
61: ["sepallength", "sepalwidth", "petallength", "petalwidth", "class"],
2: [
"family",
"product-type",
"steel",
"carbon",
"hardness",
"temper_rolling",
"condition",
"formability",
"strength",
"non-ageing",
"surface-finish",
"surface-quality",
"enamelability",
"bc",
"bf",
"bt",
"bw%2Fme",
"bl",
"m",
"chrom",
"phos",
"cbond",
"marvi",
"exptl",
"ferro",
"corr",
"blue%2Fbright%2Fvarn%2Fclean",
"lustre",
"jurofm",
"s",
"p",
"shape",
"thick",
"width",
"len",
"oil",
"bore",
"packing",
"class",
],
561: ["vendor", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "class"],
40589: [
"Mean_Acc1298_Mean_Mem40_Centroid",
"Mean_Acc1298_Mean_Mem40_Rolloff",
"Mean_Acc1298_Mean_Mem40_Flux",
"Mean_Acc1298_Mean_Mem40_MFCC_0",
"Mean_Acc1298_Mean_Mem40_MFCC_1",
"Mean_Acc1298_Mean_Mem40_MFCC_2",
"Mean_Acc1298_Mean_Mem40_MFCC_3",
"Mean_Acc1298_Mean_Mem40_MFCC_4",
"Mean_Acc1298_Mean_Mem40_MFCC_5",
"Mean_Acc1298_Mean_Mem40_MFCC_6",
"Mean_Acc1298_Mean_Mem40_MFCC_7",
"Mean_Acc1298_Mean_Mem40_MFCC_8",
"Mean_Acc1298_Mean_Mem40_MFCC_9",
"Mean_Acc1298_Mean_Mem40_MFCC_10",
"Mean_Acc1298_Mean_Mem40_MFCC_11",
"Mean_Acc1298_Mean_Mem40_MFCC_12",
"Mean_Acc1298_Std_Mem40_Centroid",
"Mean_Acc1298_Std_Mem40_Rolloff",
"Mean_Acc1298_Std_Mem40_Flux",
"Mean_Acc1298_Std_Mem40_MFCC_0",
"Mean_Acc1298_Std_Mem40_MFCC_1",
"Mean_Acc1298_Std_Mem40_MFCC_2",
"Mean_Acc1298_Std_Mem40_MFCC_3",
"Mean_Acc1298_Std_Mem40_MFCC_4",
"Mean_Acc1298_Std_Mem40_MFCC_5",
"Mean_Acc1298_Std_Mem40_MFCC_6",
"Mean_Acc1298_Std_Mem40_MFCC_7",
"Mean_Acc1298_Std_Mem40_MFCC_8",
"Mean_Acc1298_Std_Mem40_MFCC_9",
"Mean_Acc1298_Std_Mem40_MFCC_10",
"Mean_Acc1298_Std_Mem40_MFCC_11",
"Mean_Acc1298_Std_Mem40_MFCC_12",
"Std_Acc1298_Mean_Mem40_Centroid",
"Std_Acc1298_Mean_Mem40_Rolloff",
"Std_Acc1298_Mean_Mem40_Flux",
"Std_Acc1298_Mean_Mem40_MFCC_0",
"Std_Acc1298_Mean_Mem40_MFCC_1",
"Std_Acc1298_Mean_Mem40_MFCC_2",
"Std_Acc1298_Mean_Mem40_MFCC_3",
"Std_Acc1298_Mean_Mem40_MFCC_4",
"Std_Acc1298_Mean_Mem40_MFCC_5",
"Std_Acc1298_Mean_Mem40_MFCC_6",
"Std_Acc1298_Mean_Mem40_MFCC_7",
"Std_Acc1298_Mean_Mem40_MFCC_8",
"Std_Acc1298_Mean_Mem40_MFCC_9",
"Std_Acc1298_Mean_Mem40_MFCC_10",
"Std_Acc1298_Mean_Mem40_MFCC_11",
"Std_Acc1298_Mean_Mem40_MFCC_12",
"Std_Acc1298_Std_Mem40_Centroid",
"Std_Acc1298_Std_Mem40_Rolloff",
"Std_Acc1298_Std_Mem40_Flux",
"Std_Acc1298_Std_Mem40_MFCC_0",
"Std_Acc1298_Std_Mem40_MFCC_1",
"Std_Acc1298_Std_Mem40_MFCC_2",
"Std_Acc1298_Std_Mem40_MFCC_3",
"Std_Acc1298_Std_Mem40_MFCC_4",
"Std_Acc1298_Std_Mem40_MFCC_5",
"Std_Acc1298_Std_Mem40_MFCC_6",
"Std_Acc1298_Std_Mem40_MFCC_7",
"Std_Acc1298_Std_Mem40_MFCC_8",
"Std_Acc1298_Std_Mem40_MFCC_9",
"Std_Acc1298_Std_Mem40_MFCC_10",
"Std_Acc1298_Std_Mem40_MFCC_11",
"Std_Acc1298_Std_Mem40_MFCC_12",
"BH_LowPeakAmp",
"BH_LowPeakBPM",
"BH_HighPeakAmp",
"BH_HighPeakBPM",
"BH_HighLowRatio",
"BHSUM1",
"BHSUM2",
"BHSUM3",
"amazed.suprised",
"happy.pleased",
"relaxing.calm",
"quiet.still",
"sad.lonely",
"angry.aggresive",
],
1119: [
"age",
"workclass",
"fnlwgt:",
"education:",
"education-num:",
"marital-status:",
"occupation:",
"relationship:",
"race:",
"sex:",
"capital-gain:",
"capital-loss:",
"hours-per-week:",
"native-country:",
"class",
],
40966: [
"DYRK1A_N",
"ITSN1_N",
"BDNF_N",
"NR1_N",
"NR2A_N",
"pAKT_N",
"pBRAF_N",
"pCAMKII_N",
"pCREB_N",
"pELK_N",
"pERK_N",
"pJNK_N",
"PKCA_N",
"pMEK_N",
"pNR1_N",
"pNR2A_N",
"pNR2B_N",
"pPKCAB_N",
"pRSK_N",
"AKT_N",
"BRAF_N",
"CAMKII_N",
"CREB_N",
"ELK_N",
"ERK_N",
"GSK3B_N",
"JNK_N",
"MEK_N",
"TRKA_N",
"RSK_N",
"APP_N",
"Bcatenin_N",
"SOD1_N",
"MTOR_N",
"P38_N",
"pMTOR_N",
"DSCR1_N",
"AMPKA_N",
"NR2B_N",
"pNUMB_N",
"RAPTOR_N",
"TIAM1_N",
"pP70S6_N",
"NUMB_N",
"P70S6_N",
"pGSK3B_N",
"pPKCG_N",
"CDK5_N",
"S6_N",
"ADARB1_N",
"AcetylH3K9_N",
"RRP1_N",
"BAX_N",
"ARC_N",
"ERBB4_N",
"nNOS_N",
"Tau_N",
"GFAP_N",
"GluR3_N",
"GluR4_N",
"IL1B_N",
"P3525_N",
"pCASP9_N",
"PSD95_N",
"SNCA_N",
"Ubiquitin_N",
"pGSK3B_Tyr216_N",
"SHH_N",
"BAD_N",
"BCL2_N",
"pS6_N",
"pCFOS_N",
"SYP_N",
"H3AcK18_N",
"EGR1_N",
"H3MeK4_N",
"CaNA_N",
"class",
],
40945: [
"pclass",
"survived",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
"boat",
"body",
"home.dest",
],
}
@pytest.fixture(scope="module")
def datasets_missing_values():
return {
61: {},
2: {
"family": 11,
"temper_rolling": 9,
"condition": 2,
"formability": 4,
"non-ageing": 10,
"surface-finish": 11,
"enamelability": 11,
"bc": 11,
"bf": 10,
"bt": 11,
"bw%2Fme": 8,
"bl": 9,
"m": 11,
"chrom": 11,
"phos": 11,
"cbond": 10,
"marvi": 11,
"exptl": 11,
"ferro": 11,
"corr": 11,
"blue%2Fbright%2Fvarn%2Fclean": 11,
"lustre": 8,
"jurofm": 11,
"s": 11,
"p": 11,
"oil": 10,
"packing": 11,
},
561: {},
40589: {},
1119: {},
40966: {"BCL2_N": 7},
40945: {
"age": 263,
"fare": 1,
"cabin": 1014,
"embarked": 2,
"boat": 823,
"body": 1188,
"home.dest": 564,
},
}
@pytest.mark.parametrize(
"data_id, parser, expected_n_categories, expected_n_floats, expected_n_ints",
[
# iris dataset
(61, "liac-arff", 1, 4, 0),
(61, "pandas", 1, 4, 0),
# anneal dataset
(2, "liac-arff", 33, 6, 0),
(2, "pandas", 33, 2, 4),
# cpu dataset
(561, "liac-arff", 1, 7, 0),
(561, "pandas", 1, 0, 7),
# emotions dataset
(40589, "liac-arff", 6, 72, 0),
(40589, "pandas", 6, 69, 3),
# adult-census dataset
(1119, "liac-arff", 9, 6, 0),
(1119, "pandas", 9, 0, 6),
# miceprotein
(40966, "liac-arff", 1, 77, 0),
(40966, "pandas", 1, 77, 0),
# titanic
(40945, "liac-arff", 3, 6, 0),
(40945, "pandas", 3, 3, 3),
],
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_types_inference(
monkeypatch,
data_id,
parser,
expected_n_categories,
expected_n_floats,
expected_n_ints,
gzip_response,
datasets_column_names,
datasets_missing_values,
):
"""Check that `fetch_openml` infer the right number of categories, integers, and
floats."""
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
frame = bunch.frame
n_categories = len(
[dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)]
)
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == "f"])
n_ints = len([dtype for dtype in frame.dtypes if dtype.kind == "i"])
assert n_categories == expected_n_categories
assert n_floats == expected_n_floats
assert n_ints == expected_n_ints
assert frame.columns.tolist() == datasets_column_names[data_id]
frame_feature_to_n_nan = frame.isna().sum().to_dict()
for name, n_missing in frame_feature_to_n_nan.items():
expected_missing = datasets_missing_values[data_id].get(name, 0)
assert n_missing == expected_missing
###############################################################################
# Test some more specific behaviour
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"parser": "unknown"},
"The 'parser' parameter of fetch_openml must be a str among",
),
(
{"as_frame": "unknown"},
"The 'as_frame' parameter of fetch_openml must be an instance",
),
],
)
def test_fetch_openml_validation_parameter(monkeypatch, params, err_msg):
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
with pytest.raises(ValueError, match=err_msg):
fetch_openml(data_id=data_id, **params)
@pytest.mark.parametrize(
"params",
[
{"as_frame": True, "parser": "auto"},
{"as_frame": "auto", "parser": "auto"},
{"as_frame": False, "parser": "pandas"},
{"as_frame": False, "parser": "auto"},
],
)
def test_fetch_openml_requires_pandas_error(monkeypatch, params):
"""Check that we raise the proper errors when we require pandas."""
data_id = 1119
try:
check_pandas_support("test_fetch_openml_requires_pandas")
except ImportError:
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
err_msg = "requires pandas to be installed. Alternatively, explicitly"
with pytest.raises(ImportError, match=err_msg):
fetch_openml(data_id=data_id, **params)
else:
raise SkipTest("This test requires pandas to not be installed.")
@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive")
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"parser": "pandas"},
"Sparse ARFF datasets cannot be loaded with parser='pandas'",
),
(
{"as_frame": True},
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
),
(
{"parser": "pandas", "as_frame": True},
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
),
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_svmlight_format.py | sklearn/datasets/tests/test_svmlight_format.py | import gzip
import os
import shutil
from bz2 import BZ2File
from importlib import resources
from io import BytesIO
from tempfile import NamedTemporaryFile
import numpy as np
import pytest
import scipy.sparse as sp
import sklearn
from sklearn.datasets import dump_svmlight_file, load_svmlight_file, load_svmlight_files
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
create_memmap_backed_data,
)
from sklearn.utils.fixes import CSR_CONTAINERS
TEST_DATA_MODULE = "sklearn.datasets.tests.data"
datafile = "svmlight_classification.txt"
multifile = "svmlight_multilabel.txt"
invalidfile = "svmlight_invalid.txt"
invalidfile2 = "svmlight_invalid_order.txt"
def _svmlight_local_test_file_path(filename):
return resources.files(TEST_DATA_MODULE) / filename
def _load_svmlight_local_test_file(filename, **kwargs):
"""
Helper to load resource `filename` with `importlib.resources`
"""
data_path = _svmlight_local_test_file_path(filename)
with data_path.open("rb") as f:
return load_svmlight_file(f, **kwargs)
def test_load_svmlight_file():
X, y = _load_svmlight_local_test_file(datafile)
# test X's shape
assert X.indptr.shape[0] == 7
assert X.shape[0] == 6
assert X.shape[1] == 21
assert y.shape[0] == 6
# test X's non-zero values
for i, j, val in (
(0, 2, 2.5),
(0, 10, -5.2),
(0, 15, 1.5),
(1, 5, 1.0),
(1, 12, -3),
(2, 20, 27),
):
assert X[i, j] == val
# tests X's zero values
assert X[0, 3] == 0
assert X[0, 5] == 0
assert X[1, 8] == 0
assert X[1, 16] == 0
assert X[2, 18] == 0
# test can change X's values
X[0, 2] *= 2
assert X[0, 2] == 5
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
# GH20081: testing equality between path-based and
# fd-based load_svmlight_file
data_path = resources.files(TEST_DATA_MODULE) / datafile
data_path = str(data_path)
X1, y1 = load_svmlight_file(data_path)
fd = os.open(data_path, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_almost_equal(X1.data, X2.data)
assert_array_almost_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_pathlib():
# test loading from file descriptor
data_path = _svmlight_local_test_file_path(datafile)
X1, y1 = load_svmlight_file(str(data_path))
X2, y2 = load_svmlight_file(data_path)
assert_allclose(X1.data, X2.data)
assert_allclose(y1, y2)
def test_load_svmlight_file_multilabel():
X, y = _load_svmlight_local_test_file(multifile, multilabel=True)
assert y == [(0, 1), (2,), (), (1, 2)]
def test_load_svmlight_files():
data_path = _svmlight_local_test_file_path(datafile)
X_train, y_train, X_test, y_test = load_svmlight_files(
[str(data_path)] * 2, dtype=np.float32
)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_almost_equal(y_train, y_test)
assert X_train.dtype == np.float32
assert X_test.dtype == np.float32
X1, y1, X2, y2, X3, y3 = load_svmlight_files([str(data_path)] * 3, dtype=np.float64)
assert X1.dtype == X2.dtype
assert X2.dtype == X3.dtype
assert X3.dtype == np.float64
def test_load_svmlight_file_n_features():
X, y = _load_svmlight_local_test_file(datafile, n_features=22)
# test X'shape
assert X.indptr.shape[0] == 7
assert X.shape[0] == 6
assert X.shape[1] == 22
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (1, 5, 1.0), (1, 12, -3)):
assert X[i, j] == val
# 21 features in file
with pytest.raises(ValueError):
_load_svmlight_local_test_file(datafile, n_features=20)
def test_load_compressed():
X, y = _load_svmlight_local_test_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with _svmlight_local_test_file_path(datafile).open("rb") as f:
with gzip.open(tmp.name, "wb") as fh_out:
shutil.copyfileobj(f, fh_out)
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xgz.toarray())
assert_array_almost_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with _svmlight_local_test_file_path(datafile).open("rb") as f:
with BZ2File(tmp.name, "wb") as fh_out:
shutil.copyfileobj(f, fh_out)
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xbz.toarray())
assert_array_almost_equal(y, ybz)
def test_load_invalid_file():
with pytest.raises(ValueError):
_load_svmlight_local_test_file(invalidfile)
def test_load_invalid_order_file():
with pytest.raises(ValueError):
_load_svmlight_local_test_file(invalidfile2)
def test_load_zero_based():
f = BytesIO(b"-1 4:1.\n1 0:1\n")
with pytest.raises(ValueError):
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b"-1 1:1 2:2 3:3\n"
data2 = b"-1 0:0 1:1\n"
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert X.shape == (1, 3)
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert X1.shape == (1, 4)
assert X2.shape == (1, 4)
def test_load_with_qid():
# load svmfile with qid attribute
data = b"""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12"""
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[0.53, 0.12], [0.13, 0.1], [0.87, 0.12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[0.53, 0.12], [0.13, 0.1], [0.87, 0.12]])
@pytest.mark.skip(
"testing the overflow of 32 bit sparse indexing requires a large amount of memory"
)
def test_load_large_qid():
"""
load large libsvm / svmlight file with qid attribute. Tests 64-bit query ID
"""
data = b"\n".join(
(
"3 qid:{0} 1:0.53 2:0.12\n2 qid:{0} 1:0.13 2:0.1".format(i).encode()
for i in range(1, 40 * 1000 * 1000)
)
)
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
assert_array_equal(y[-4:], [3, 2, 3, 2])
assert_array_equal(np.unique(qid), np.arange(1, 40 * 1000 * 1000))
def test_load_invalid_file2():
with pytest.raises(ValueError):
data_path = _svmlight_local_test_file_path(datafile)
invalid_path = _svmlight_local_test_file_path(invalidfile)
load_svmlight_files([str(data_path), str(invalid_path), str(data_path)])
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
with pytest.raises(TypeError):
load_svmlight_file(0.42)
def test_invalid_filename():
with pytest.raises(OSError):
load_svmlight_file("trou pic nic douille")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_dump(csr_container):
X_sparse, y_dense = _load_svmlight_local_test_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = csr_container(np.atleast_2d(y_dense))
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if sp.issparse(y) and y.shape[0] == 1:
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
# Note: with dtype=np.int32 we are performing unsafe casts,
# where X.astype(dtype) overflows. The result is
# then platform dependent and X_dense.astype(dtype) may be
# different from X_sparse.astype(dtype).asarray().
X_input = X.astype(dtype)
dump_svmlight_file(
X_input, y, f, comment="test", zero_based=zero_based
)
f.seek(0)
comment = f.readline()
comment = str(comment, "utf-8")
assert "scikit-learn %s" % sklearn.__version__ in comment
comment = f.readline()
comment = str(comment, "utf-8")
assert ["one", "zero"][zero_based] + "-based" in comment
X2, y2 = load_svmlight_file(f, dtype=dtype, zero_based=zero_based)
assert X2.dtype == dtype
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if sp.issparse(X_input):
X_input_dense = X_input.toarray()
else:
X_input_dense = X_input
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(X_input_dense, X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype, copy=False), y2, 4
)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(X_input_dense, X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype, copy=False), y2, 15
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_dump_multilabel(csr_container):
X = [[1, 0, 3, 0, 5], [0, 0, 0, 0, 0], [0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = csr_container(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert f.readline() == b"1 0:1 2:3 4:5\n"
assert f.readline() == b"0,2 \n"
assert f.readline() == b"0,1 1:5 3:1\n"
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [
[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert f.readline() == b"1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"
assert f.readline() == b"2.1 0:1000000000 1:2e+18 2:3e+27\n"
assert f.readline() == b"3.01 \n"
assert f.readline() == b"1.000000000000001 \n"
assert f.readline() == b"1 \n"
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
def test_dump_comment():
X, y = _load_svmlight_local_test_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b"It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc"
f = BytesIO()
with pytest.raises(UnicodeDecodeError):
dump_svmlight_file(X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
f = BytesIO()
with pytest.raises(ValueError):
dump_svmlight_file(X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = _load_svmlight_local_test_file(datafile)
f = BytesIO()
y2d = [y]
with pytest.raises(ValueError):
dump_svmlight_file(X, y2d, f)
f = BytesIO()
with pytest.raises(ValueError):
dump_svmlight_file(X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = _load_svmlight_local_test_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b"""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985"""
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [
[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_load_zeros(csr_container):
f = BytesIO()
true_X = csr_container(np.zeros(shape=(3, 4)))
true_y = np.array([0, 1, 0])
dump_svmlight_file(true_X, true_y, f)
for zero_based in ["auto", True, False]:
f.seek(0)
X, y = load_svmlight_file(f, n_features=4, zero_based=zero_based)
assert_array_almost_equal(y, true_y)
assert_array_almost_equal(X.toarray(), true_X.toarray())
@pytest.mark.parametrize("sparsity", [0, 0.1, 0.5, 0.99, 1])
@pytest.mark.parametrize("n_samples", [13, 101])
@pytest.mark.parametrize("n_features", [2, 7, 41])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_load_with_offsets(sparsity, n_samples, n_features, csr_container):
rng = np.random.RandomState(0)
X = rng.uniform(low=0.0, high=1.0, size=(n_samples, n_features))
if sparsity:
X[X < sparsity] = 0.0
X = csr_container(X)
y = rng.randint(low=0, high=2, size=n_samples)
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
size = len(f.getvalue())
# put some marks that are likely to happen anywhere in a row
mark_0 = 0
mark_1 = size // 3
length_0 = mark_1 - mark_0
mark_2 = 4 * size // 5
length_1 = mark_2 - mark_1
# load the original sparse matrix into 3 independent CSR matrices
X_0, y_0 = load_svmlight_file(
f, n_features=n_features, offset=mark_0, length=length_0
)
X_1, y_1 = load_svmlight_file(
f, n_features=n_features, offset=mark_1, length=length_1
)
X_2, y_2 = load_svmlight_file(f, n_features=n_features, offset=mark_2)
y_concat = np.concatenate([y_0, y_1, y_2])
X_concat = sp.vstack([X_0, X_1, X_2])
assert_array_almost_equal(y, y_concat)
assert_array_almost_equal(X.toarray(), X_concat.toarray())
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_load_offset_exhaustive_splits(csr_container):
rng = np.random.RandomState(0)
X = np.array(
[
[0, 0, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 6],
[1, 2, 3, 4, 0, 6],
[0, 0, 0, 0, 0, 0],
[1, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0],
]
)
X = csr_container(X)
n_samples, n_features = X.shape
y = rng.randint(low=0, high=2, size=n_samples)
query_id = np.arange(n_samples) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id)
f.seek(0)
size = len(f.getvalue())
# load the same data in 2 parts with all the possible byte offsets to
# locate the split so has to test for particular boundary cases
for mark in range(size):
f.seek(0)
X_0, y_0, q_0 = load_svmlight_file(
f, n_features=n_features, query_id=True, offset=0, length=mark
)
X_1, y_1, q_1 = load_svmlight_file(
f, n_features=n_features, query_id=True, offset=mark, length=-1
)
q_concat = np.concatenate([q_0, q_1])
y_concat = np.concatenate([y_0, y_1])
X_concat = sp.vstack([X_0, X_1])
assert_array_almost_equal(y, y_concat)
assert_array_equal(query_id, q_concat)
assert_array_almost_equal(X.toarray(), X_concat.toarray())
def test_load_with_offsets_error():
with pytest.raises(ValueError, match="n_features is required"):
_load_svmlight_local_test_file(datafile, offset=3, length=3)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_multilabel_y_explicit_zeros(tmp_path, csr_container):
"""
Ensure that if y contains explicit zeros (i.e. elements of y.data equal to
0) then those explicit zeros are not encoded.
"""
save_path = str(tmp_path / "svm_explicit_zero")
rng = np.random.RandomState(42)
X = rng.randn(3, 5).astype(np.float64)
indptr = np.array([0, 2, 3, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
# The first and last element are explicit zeros.
data = np.array([0, 1, 1, 1, 1, 0])
y = csr_container((data, indices, indptr), shape=(3, 3))
# y as a dense array would look like
# [[0, 0, 1],
# [0, 0, 1],
# [1, 1, 0]]
dump_svmlight_file(X, y, save_path, multilabel=True)
_, y_load = load_svmlight_file(save_path, multilabel=True)
y_true = [(2.0,), (2.0,), (0.0, 1.0)]
assert y_load == y_true
def test_dump_read_only(tmp_path):
"""Ensure that there is no ValueError when dumping a read-only `X`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28026
"""
rng = np.random.RandomState(42)
X = rng.randn(5, 2)
y = rng.randn(5)
# Convert to memmap-backed which are read-only
X, y = create_memmap_backed_data([X, y])
save_path = str(tmp_path / "svm_read_only")
dump_svmlight_file(X, y, save_path)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/__init__.py | sklearn/datasets/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_covtype.py | sklearn/datasets/tests/test_covtype.py | """Test the covtype loader, if the data is available,
or if specifically requested via environment variable
(e.g. for CI jobs)."""
from functools import partial
import pytest
from sklearn.datasets.tests.test_common import check_return_X_y
def test_fetch(fetch_covtype_fxt, global_random_seed):
data1 = fetch_covtype_fxt(shuffle=True, random_state=global_random_seed)
data2 = fetch_covtype_fxt(shuffle=True, random_state=global_random_seed + 1)
X1, X2 = data1["data"], data2["data"]
assert (581012, 54) == X1.shape
assert X1.shape == X2.shape
assert X1.sum() == X2.sum()
y1, y2 = data1["target"], data2["target"]
assert (X1.shape[0],) == y1.shape
assert (X1.shape[0],) == y2.shape
descr_prefix = ".. _covtype_dataset:"
assert data1.DESCR.startswith(descr_prefix)
assert data2.DESCR.startswith(descr_prefix)
# test return_X_y option
fetch_func = partial(fetch_covtype_fxt)
check_return_X_y(data1, fetch_func)
def test_fetch_asframe(fetch_covtype_fxt):
pytest.importorskip("pandas")
bunch = fetch_covtype_fxt(as_frame=True)
assert hasattr(bunch, "frame")
frame = bunch.frame
assert frame.shape == (581012, 55)
assert bunch.data.shape == (581012, 54)
assert bunch.target.shape == (581012,)
column_names = set(frame.columns)
# enumerated names are added correctly
assert set(f"Wilderness_Area_{i}" for i in range(4)) < column_names
assert set(f"Soil_Type_{i}" for i in range(40)) < column_names
def test_pandas_dependency_message(fetch_covtype_fxt, hide_available_pandas):
expected_msg = "fetch_covtype with as_frame=True requires pandas"
with pytest.raises(ImportError, match=expected_msg):
fetch_covtype_fxt(as_frame=True)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_arff_parser.py | sklearn/datasets/tests/test_arff_parser.py | import textwrap
from io import BytesIO
import pytest
from sklearn.datasets._arff_parser import (
_liac_arff_parser,
_pandas_arff_parser,
_post_process_frame,
load_arff_from_gzip_file,
)
@pytest.mark.parametrize(
"feature_names, target_names",
[
(
[
"col_int_as_integer",
"col_int_as_numeric",
"col_float_as_real",
"col_float_as_numeric",
],
["col_categorical", "col_string"],
),
(
[
"col_int_as_integer",
"col_int_as_numeric",
"col_float_as_real",
"col_float_as_numeric",
],
["col_categorical"],
),
(
[
"col_int_as_integer",
"col_int_as_numeric",
"col_float_as_real",
"col_float_as_numeric",
],
[],
),
],
)
def test_post_process_frame(feature_names, target_names):
"""Check the behaviour of the post-processing function for splitting a dataframe."""
pd = pytest.importorskip("pandas")
X_original = pd.DataFrame(
{
"col_int_as_integer": [1, 2, 3],
"col_int_as_numeric": [1, 2, 3],
"col_float_as_real": [1.0, 2.0, 3.0],
"col_float_as_numeric": [1.0, 2.0, 3.0],
"col_categorical": ["a", "b", "c"],
"col_string": ["a", "b", "c"],
}
)
X, y = _post_process_frame(X_original, feature_names, target_names)
assert isinstance(X, pd.DataFrame)
if len(target_names) >= 2:
assert isinstance(y, pd.DataFrame)
elif len(target_names) == 1:
assert isinstance(y, pd.Series)
else:
assert y is None
def test_load_arff_from_gzip_file_error_parser():
"""An error will be raised if the parser is not known."""
# None of the input parameters are required to be accurate since the check
# of the parser will be carried out first.
err_msg = "Unknown parser: 'xxx'. Should be 'liac-arff' or 'pandas'"
with pytest.raises(ValueError, match=err_msg):
load_arff_from_gzip_file("xxx", "xxx", "xxx", "xxx", "xxx", "xxx")
@pytest.mark.parametrize("parser_func", [_liac_arff_parser, _pandas_arff_parser])
def test_pandas_arff_parser_strip_single_quotes(parser_func):
"""Check that we properly strip single quotes from the data."""
pd = pytest.importorskip("pandas")
arff_file = BytesIO(
textwrap.dedent(
"""
@relation 'toy'
@attribute 'cat_single_quote' {'A', 'B', 'C'}
@attribute 'str_single_quote' string
@attribute 'str_nested_quote' string
@attribute 'class' numeric
@data
'A','some text','\"expect double quotes\"',0
"""
).encode("utf-8")
)
columns_info = {
"cat_single_quote": {
"data_type": "nominal",
"name": "cat_single_quote",
},
"str_single_quote": {
"data_type": "string",
"name": "str_single_quote",
},
"str_nested_quote": {
"data_type": "string",
"name": "str_nested_quote",
},
"class": {
"data_type": "numeric",
"name": "class",
},
}
feature_names = [
"cat_single_quote",
"str_single_quote",
"str_nested_quote",
]
target_names = ["class"]
# We don't strip single quotes for string columns with the pandas parser.
expected_values = {
"cat_single_quote": "A",
"str_single_quote": (
"some text" if parser_func is _liac_arff_parser else "'some text'"
),
"str_nested_quote": (
'"expect double quotes"'
if parser_func is _liac_arff_parser
else "'\"expect double quotes\"'"
),
"class": 0,
}
_, _, frame, _ = parser_func(
arff_file,
output_arrays_type="pandas",
openml_columns_info=columns_info,
feature_names_to_select=feature_names,
target_names_to_select=target_names,
)
assert frame.columns.tolist() == feature_names + target_names
pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0))
@pytest.mark.parametrize("parser_func", [_liac_arff_parser, _pandas_arff_parser])
def test_pandas_arff_parser_strip_double_quotes(parser_func):
"""Check that we properly strip double quotes from the data."""
pd = pytest.importorskip("pandas")
arff_file = BytesIO(
textwrap.dedent(
"""
@relation 'toy'
@attribute 'cat_double_quote' {"A", "B", "C"}
@attribute 'str_double_quote' string
@attribute 'str_nested_quote' string
@attribute 'class' numeric
@data
"A","some text","\'expect double quotes\'",0
"""
).encode("utf-8")
)
columns_info = {
"cat_double_quote": {
"data_type": "nominal",
"name": "cat_double_quote",
},
"str_double_quote": {
"data_type": "string",
"name": "str_double_quote",
},
"str_nested_quote": {
"data_type": "string",
"name": "str_nested_quote",
},
"class": {
"data_type": "numeric",
"name": "class",
},
}
feature_names = [
"cat_double_quote",
"str_double_quote",
"str_nested_quote",
]
target_names = ["class"]
expected_values = {
"cat_double_quote": "A",
"str_double_quote": "some text",
"str_nested_quote": "'expect double quotes'",
"class": 0,
}
_, _, frame, _ = parser_func(
arff_file,
output_arrays_type="pandas",
openml_columns_info=columns_info,
feature_names_to_select=feature_names,
target_names_to_select=target_names,
)
assert frame.columns.tolist() == feature_names + target_names
pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0))
@pytest.mark.parametrize(
"parser_func",
[
# internal quotes are not considered to follow the ARFF spec in LIAC ARFF
pytest.param(_liac_arff_parser, marks=pytest.mark.xfail),
_pandas_arff_parser,
],
)
def test_pandas_arff_parser_strip_no_quotes(parser_func):
"""Check that we properly parse with no quotes characters."""
pd = pytest.importorskip("pandas")
arff_file = BytesIO(
textwrap.dedent(
"""
@relation 'toy'
@attribute 'cat_without_quote' {A, B, C}
@attribute 'str_without_quote' string
@attribute 'str_internal_quote' string
@attribute 'class' numeric
@data
A,some text,'internal' quote,0
"""
).encode("utf-8")
)
columns_info = {
"cat_without_quote": {
"data_type": "nominal",
"name": "cat_without_quote",
},
"str_without_quote": {
"data_type": "string",
"name": "str_without_quote",
},
"str_internal_quote": {
"data_type": "string",
"name": "str_internal_quote",
},
"class": {
"data_type": "numeric",
"name": "class",
},
}
feature_names = [
"cat_without_quote",
"str_without_quote",
"str_internal_quote",
]
target_names = ["class"]
expected_values = {
"cat_without_quote": "A",
"str_without_quote": "some text",
"str_internal_quote": "'internal' quote",
"class": 0,
}
_, _, frame, _ = parser_func(
arff_file,
output_arrays_type="pandas",
openml_columns_info=columns_info,
feature_names_to_select=feature_names,
target_names_to_select=target_names,
)
assert frame.columns.tolist() == feature_names + target_names
pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/test_kddcup99.py | sklearn/datasets/tests/test_kddcup99.py | """Test kddcup99 loader, if the data is available,
or if specifically requested via environment variable
(e.g. for CI jobs).
Only 'percent10' mode is tested, as the full data
is too big to use in unit-testing.
"""
from functools import partial
import pytest
from sklearn.datasets.tests.test_common import (
check_as_frame,
check_pandas_dependency_message,
check_return_X_y,
)
@pytest.mark.parametrize("as_frame", [True, False])
@pytest.mark.parametrize(
"subset, n_samples, n_features",
[
(None, 494021, 41),
("SA", 100655, 41),
("SF", 73237, 4),
("http", 58725, 3),
("smtp", 9571, 3),
],
)
def test_fetch_kddcup99_percent10(
fetch_kddcup99_fxt, as_frame, subset, n_samples, n_features
):
data = fetch_kddcup99_fxt(subset=subset, as_frame=as_frame)
assert data.data.shape == (n_samples, n_features)
assert data.target.shape == (n_samples,)
if as_frame:
assert data.frame.shape == (n_samples, n_features + 1)
assert data.DESCR.startswith(".. _kddcup99_dataset:")
def test_fetch_kddcup99_return_X_y(fetch_kddcup99_fxt):
fetch_func = partial(fetch_kddcup99_fxt, subset="smtp")
data = fetch_func()
check_return_X_y(data, fetch_func)
def test_fetch_kddcup99_as_frame(fetch_kddcup99_fxt):
bunch = fetch_kddcup99_fxt()
check_as_frame(bunch, fetch_kddcup99_fxt)
def test_fetch_kddcup99_shuffle(fetch_kddcup99_fxt):
dataset = fetch_kddcup99_fxt(
random_state=0,
subset="SA",
percent10=True,
)
dataset_shuffled = fetch_kddcup99_fxt(
random_state=0,
subset="SA",
shuffle=True,
percent10=True,
)
assert set(dataset["target"]) == set(dataset_shuffled["target"])
assert dataset_shuffled.data.shape == dataset.data.shape
assert dataset_shuffled.target.shape == dataset.target.shape
def test_pandas_dependency_message(fetch_kddcup99_fxt, hide_available_pandas):
check_pandas_dependency_message(fetch_kddcup99_fxt)
def test_corrupted_file_error_message(fetch_kddcup99_fxt, tmp_path):
"""Check that a nice error message is raised when cache is corrupted."""
kddcup99_dir = tmp_path / "kddcup99_10-py3"
kddcup99_dir.mkdir()
samples_path = kddcup99_dir / "samples"
with samples_path.open("wb") as f:
f.write(b"THIS IS CORRUPTED")
msg = (
"The cache for fetch_kddcup99 is invalid, please "
f"delete {kddcup99_dir} and run the fetch_kddcup99 again"
)
with pytest.raises(OSError, match=msg):
fetch_kddcup99_fxt(data_home=str(tmp_path))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/__init__.py | sklearn/datasets/tests/data/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/__init__.py | sklearn/datasets/tests/data/openml/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_1119/__init__.py | sklearn/datasets/tests/data/openml/id_1119/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_61/__init__.py | sklearn/datasets/tests/data/openml/id_61/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_1590/__init__.py | sklearn/datasets/tests/data/openml/id_1590/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_42074/__init__.py | sklearn/datasets/tests/data/openml/id_42074/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_561/__init__.py | sklearn/datasets/tests/data/openml/id_561/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_40945/__init__.py | sklearn/datasets/tests/data/openml/id_40945/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_2/__init__.py | sklearn/datasets/tests/data/openml/id_2/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_42585/__init__.py | sklearn/datasets/tests/data/openml/id_42585/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_62/__init__.py | sklearn/datasets/tests/data/openml/id_62/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_40675/__init__.py | sklearn/datasets/tests/data/openml/id_40675/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_292/__init__.py | sklearn/datasets/tests/data/openml/id_292/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_40589/__init__.py | sklearn/datasets/tests/data/openml/id_40589/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_3/__init__.py | sklearn/datasets/tests/data/openml/id_3/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_1/__init__.py | sklearn/datasets/tests/data/openml/id_1/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/tests/data/openml/id_40966/__init__.py | sklearn/datasets/tests/data/openml/id_40966/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/data/__init__.py | sklearn/datasets/data/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/images/__init__.py | sklearn/datasets/images/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/datasets/descr/__init__.py | sklearn/datasets/descr/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_base.py | sklearn/metrics/_base.py | """
Common code for all metrics.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from itertools import combinations
import numpy as np
from sklearn.utils import check_array, check_consistent_length
from sklearn.utils.multiclass import type_of_target
def _average_binary_score(binary_metric, y_true, y_score, average, sample_weight=None):
"""Average a binary metric for multilabel classification.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro'
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, "micro", "macro", "weighted", "samples")
if average not in average_options:
raise ValueError("average has to be one of {0}".format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == "weighted":
if score_weight is not None:
average_weight = np.sum(
np.multiply(y_true, np.reshape(score_weight, (-1, 1))), axis=0
)
else:
average_weight = np.sum(y_true, axis=0)
if np.isclose(average_weight.sum(), 0.0):
return 0
elif average == "samples":
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c, sample_weight=score_weight)
# Average the results
if average is not None:
if average_weight is not None:
# Scores with 0 weights are forced to be 0, preventing the average
# score from being affected by 0-weighted NaN elements.
average_weight = np.asarray(average_weight)
score[average_weight == 0] = 0
return float(np.average(score, weights=average_weight))
else:
return score
def _average_multiclass_ovo_score(binary_metric, y_true, y_score, average="macro"):
"""Average one-versus-one scores for multiclass classification.
Uses the binary metric for one-vs-one multiclass classification,
where the score is computed according to the Hand & Till (2001) algorithm.
Parameters
----------
binary_metric : callable
The binary metric function to use that accepts the following as input:
y_true_target : array, shape = [n_samples_target]
Some sub-array of y_true for a pair of classes designated
positive and negative in the one-vs-one scheme.
y_score_target : array, shape = [n_samples_target]
Scores corresponding to the probability estimates
of a sample belonging to the designated positive class label
y_true : array-like of shape (n_samples,)
True multiclass labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores corresponding to probability estimates of a sample
belonging to a particular class.
average : {'macro', 'weighted'}, default='macro'
Determines the type of averaging performed on the pairwise binary
metric scores:
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account. Classes
are assumed to be uniformly distributed.
``'weighted'``:
Calculate metrics for each label, taking into account the
prevalence of the classes.
Returns
-------
score : float
Average of the pairwise binary metric scores.
"""
check_consistent_length(y_true, y_score)
y_true_unique = np.unique(y_true)
n_classes = y_true_unique.shape[0]
n_pairs = n_classes * (n_classes - 1) // 2
pair_scores = np.empty(n_pairs)
is_weighted = average == "weighted"
prevalence = np.empty(n_pairs) if is_weighted else None
# Compute scores treating a as positive class and b as negative class,
# then b as positive class and a as negative class
for ix, (a, b) in enumerate(combinations(y_true_unique, 2)):
a_mask = y_true == a
b_mask = y_true == b
ab_mask = np.logical_or(a_mask, b_mask)
if is_weighted:
prevalence[ix] = np.average(ab_mask)
a_true = a_mask[ab_mask]
b_true = b_mask[ab_mask]
a_true_score = binary_metric(a_true, y_score[ab_mask, a])
b_true_score = binary_metric(b_true, y_score[ab_mask, b])
pair_scores[ix] = (a_true_score + b_true_score) / 2
return np.average(pair_scores, weights=prevalence)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_classification.py | sklearn/metrics/_classification.py | """Metrics to assess performance on classification task given class prediction.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from contextlib import nullcontext
from math import sqrt
from numbers import Integral, Real
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix, issparse
from scipy.special import xlogy
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils import (
assert_all_finite,
check_array,
check_consistent_length,
check_scalar,
column_or_1d,
)
from sklearn.utils._array_api import (
_average,
_bincount,
_convert_to_numpy,
_count_nonzero,
_fill_diagonal,
_find_matching_floating_dtype,
_is_numpy_namespace,
_is_xp_namespace,
_isin,
_max_precision_float_dtype,
_union1d,
get_namespace,
get_namespace_and_device,
move_to,
supported_float_dtypes,
xpx,
)
from sklearn.utils._param_validation import (
Hidden,
Interval,
Options,
StrOptions,
validate_params,
)
from sklearn.utils._unique import attach_unique
from sklearn.utils.extmath import _nanaverage
from sklearn.utils.multiclass import type_of_target, unique_labels
from sklearn.utils.validation import (
_check_pos_label_consistency,
_check_sample_weight,
_num_samples,
)
def _check_zero_division(zero_division):
if isinstance(zero_division, str) and zero_division == "warn":
return np.float64(0.0)
elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]:
return np.float64(zero_division)
else: # np.isnan(zero_division)
return np.nan
def _check_targets(y_true, y_pred, sample_weight=None):
"""Check that y_true and y_pred belong to the same classification task.
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
sample_weight : array-like, default=None
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``.
y_true : array or indicator matrix
y_pred : array or indicator matrix
sample_weight : array or None
"""
xp, _ = get_namespace(y_true, y_pred, sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
type_true = type_of_target(y_true, input_name="y_true")
type_pred = type_of_target(y_pred, input_name="y_pred")
for array in [y_true, y_pred]:
if _num_samples(array) < 1:
raise ValueError(
"Found empty input array (e.g., `y_true` or `y_pred`) while a minimum "
"of 1 sample is required."
)
if sample_weight is not None:
sample_weight = _check_sample_weight(
sample_weight, y_true, force_float_dtype=False
)
y_type = {type_true, type_pred}
if y_type == {"binary", "multiclass"}:
y_type = {"multiclass"}
if len(y_type) > 1:
raise ValueError(
"Classification metrics can't handle a mix of {0} and {1} targets".format(
type_true, type_pred
)
)
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if y_type not in ["binary", "multiclass", "multilabel-indicator"]:
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
try:
y_true = column_or_1d(y_true, input_name="y_true")
y_pred = column_or_1d(y_pred, input_name="y_pred")
except TypeError as e:
if "Sparse data was passed" in str(e):
raise TypeError(
"Sparse input is only supported when targets are of multilabel type"
) from e
else:
raise
xp, _ = get_namespace(y_true, y_pred)
if y_type == "binary":
try:
unique_values = _union1d(y_true, y_pred, xp)
except TypeError as e:
# We expect y_true and y_pred to be of the same data type.
# If `y_true` was provided to the classifier as strings,
# `y_pred` given by the classifier will also be encoded with
# strings. So we raise a meaningful error
raise TypeError(
"Labels in y_true and y_pred should be of the same type. "
f"Got y_true={xp.unique(y_true)} and "
f"y_pred={xp.unique(y_pred)}. Make sure that the "
"predictions provided by the classifier coincides with "
"the true labels."
) from e
if unique_values.shape[0] > 2:
y_type = "multiclass"
if y_type.startswith("multilabel"):
if _is_numpy_namespace(xp):
# XXX: do we really want to sparse-encode multilabel indicators when
# they are passed as a dense arrays? This is not possible for array
# API inputs in general hence we only do it for NumPy inputs. But even
# for NumPy the usefulness is questionable.
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = "multilabel-indicator"
return y_type, y_true, y_pred, sample_weight
def _one_hot_encoding_multiclass_target(y_true, labels, target_xp, target_device):
"""Convert multi-class `y_true` into a one-hot encoded array and also ensure
that the encoded array is placed on the target API namespace and device.
Also return the classes provided by `LabelBinarizer` in additional to the
integer encoded array.
"""
xp, _ = get_namespace(y_true)
lb = LabelBinarizer()
if labels is not None:
lb = lb.fit(labels)
# LabelBinarizer does not respect the order implied by labels, which
# can be misleading.
if not xp.all(lb.classes_ == labels):
warnings.warn(
f"Labels passed were {labels}. But this function "
"assumes labels are ordered lexicographically. "
f"Pass the ordered labels={lb.classes_.tolist()} and ensure that "
"the columns of y_prob correspond to this ordering.",
UserWarning,
)
if not xp.all(_isin(y_true, labels, xp=xp)):
undeclared_labels = set(y_true) - set(labels)
raise ValueError(
f"y_true contains values {undeclared_labels} not belonging "
f"to the passed labels {labels}."
)
else:
lb = lb.fit(y_true)
if lb.classes_.shape[0] == 1:
if labels is None:
raise ValueError(
"y_true contains only one label ({0}). Please "
"provide the list of all expected class labels explicitly through the "
"labels argument.".format(lb.classes_[0])
)
else:
raise ValueError(
"The labels array needs to contain at least two "
"labels, got {0}.".format(lb.classes_)
)
transformed_labels = lb.transform(y_true)
transformed_labels = target_xp.asarray(transformed_labels, device=target_device)
if transformed_labels.shape[1] == 1:
transformed_labels = target_xp.concat(
(1 - transformed_labels, transformed_labels), axis=1
)
return transformed_labels, lb.classes_
def _validate_multiclass_probabilistic_prediction(
y_true, y_prob, sample_weight, labels
):
r"""Convert y_true and y_prob to shape (n_samples, n_classes)
1. Verify that y_true, y_prob, and sample_weights have the same first dim
2. Ensure 2 or more classes in y_true i.e. valid classification task. The
classes are provided by the labels argument, or inferred using y_true.
When inferring y_true is assumed binary if it has shape (n_samples, ).
3. Validate y_true, and y_prob have the same number of classes. Convert to
shape (n_samples, n_classes)
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_prob : array of floats, shape=(n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If `y_prob.shape = (n_samples,)`
the probabilities provided are assumed to be that of the
positive class. The labels in `y_prob` are assumed to be
ordered lexicographically, as done by
:class:`preprocessing.LabelBinarizer`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, default=None
If not provided, labels will be inferred from y_true. If `labels`
is `None` and `y_prob` has shape `(n_samples,)` the labels are
assumed to be binary and are inferred from `y_true`.
Returns
-------
transformed_labels : array of shape (n_samples, n_classes)
y_prob : array of shape (n_samples, n_classes)
"""
xp, _, device_ = get_namespace_and_device(y_prob)
if xp.max(y_prob) > 1:
raise ValueError(f"y_prob contains values greater than 1: {xp.max(y_prob)}")
if xp.min(y_prob) < 0:
raise ValueError(f"y_prob contains values lower than 0: {xp.min(y_prob)}")
check_consistent_length(y_prob, y_true, sample_weight)
if sample_weight is not None:
_check_sample_weight(sample_weight, y_prob, force_float_dtype=False)
transformed_labels, lb_classes = _one_hot_encoding_multiclass_target(
y_true=y_true, labels=labels, target_xp=xp, target_device=device_
)
# If y_prob is of single dimension, assume y_true to be binary
# and then check.
if y_prob.ndim == 1:
y_prob = y_prob[:, xp.newaxis]
if y_prob.shape[1] == 1:
y_prob = xp.concat([1 - y_prob, y_prob], axis=1)
eps = xp.finfo(y_prob.dtype).eps
# Make sure y_prob is normalized
y_prob_sum = xp.sum(y_prob, axis=1)
if not xp.all(
xpx.isclose(
y_prob_sum,
xp.asarray(1, dtype=y_prob_sum.dtype, device=device_),
rtol=sqrt(eps),
)
):
warnings.warn(
"The y_prob values do not sum to one. Make sure to pass probabilities.",
UserWarning,
)
# Check if dimensions are consistent.
if lb_classes.shape[0] != y_prob.shape[1]:
if labels is None:
raise ValueError(
"y_true and y_prob contain different number of "
"classes: {0} vs {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(
transformed_labels.shape[1], y_prob.shape[1], lb_classes
)
)
else:
raise ValueError(
"The number of classes in labels is different "
"from that in y_prob. Classes found in "
"labels: {0}".format(lb_classes)
)
return transformed_labels, y_prob
@validate_params(
{
"y_true": ["array-like", "sparse matrix"],
"y_pred": ["array-like", "sparse matrix"],
"normalize": ["boolean"],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels. Sparse matrix is only supported when
labels are of :term:`multilabel` type.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier. Sparse matrix is only
supported when labels are of :term:`multilabel` type.
normalize : bool, default=True
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
If ``normalize == True``, returns the fraction of correctly classified samples,
else returns the number of correctly classified samples.
The best performance is 1.0 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See Also
--------
balanced_accuracy_score : Compute the balanced accuracy to deal with
imbalanced datasets.
jaccard_score : Compute the Jaccard similarity coefficient score.
hamming_loss : Compute the average Hamming loss or Hamming distance between
two sets of samples.
zero_one_loss : Compute the Zero-one classification loss. By default, the
function will return the percentage of imperfectly predicted subsets.
Examples
--------
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2.0
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
xp, _, device = get_namespace_and_device(y_pred)
y_true, sample_weight = move_to(y_true, sample_weight, xp=xp, device=device)
# Compute accuracy for each possible representation
y_true, y_pred = attach_unique(y_true, y_pred)
y_type, y_true, y_pred, sample_weight = _check_targets(
y_true, y_pred, sample_weight
)
if y_type.startswith("multilabel"):
differing_labels = _count_nonzero(y_true - y_pred, xp=xp, device=device, axis=1)
score = xp.asarray(differing_labels == 0, device=device)
else:
score = y_true == y_pred
return float(_average(score, weights=sample_weight, normalize=normalize, xp=xp))
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"labels": ["array-like", None],
"sample_weight": ["array-like", None],
"normalize": [StrOptions({"true", "pred", "all"}), None],
},
prefer_skip_nested_validation=True,
)
def confusion_matrix(
y_true, y_pred, *, labels=None, sample_weight=None, normalize=None
):
"""Compute confusion matrix to evaluate the accuracy of a classification.
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` and
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated targets as returned by a classifier.
labels : array-like of shape (n_classes,), default=None
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If ``None`` is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.18
normalize : {'true', 'pred', 'all'}, default=None
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix will not be
normalized.
Returns
-------
C : ndarray of shape (n_classes, n_classes)
Confusion matrix whose i-th row and j-th
column entry indicates the number of
samples with true label being i-th class
and predicted label being j-th class.
See Also
--------
ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix
given an estimator, the data, and the label.
ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix
given the true and predicted labels.
ConfusionMatrixDisplay : Confusion Matrix visualization.
confusion_matrix_at_thresholds : For binary classification, compute true negative,
false positive, false negative and true positive counts per threshold.
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
(Wikipedia and other references may use a different
convention for axes).
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc. as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel().tolist()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
xp, _, device_ = get_namespace_and_device(y_true, y_pred, labels, sample_weight)
y_true = check_array(
y_true,
dtype=None,
ensure_2d=False,
ensure_all_finite=False,
ensure_min_samples=0,
)
y_pred = check_array(
y_pred,
dtype=None,
ensure_2d=False,
ensure_all_finite=False,
ensure_min_samples=0,
)
# Convert the input arrays to NumPy (on CPU) irrespective of the original
# namespace and device so as to be able to leverage the the efficient
# counting operations implemented by SciPy in the coo_matrix constructor.
# The final results will be converted back to the input namespace and device
# for the sake of consistency with other metric functions with array API support.
y_true = _convert_to_numpy(y_true, xp)
y_pred = _convert_to_numpy(y_pred, xp)
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int64)
else:
sample_weight = _convert_to_numpy(sample_weight, xp)
if len(sample_weight) > 0:
y_type, y_true, y_pred, sample_weight = _check_targets(
y_true, y_pred, sample_weight
)
else:
# This is needed to handle the special case where y_true, y_pred and
# sample_weight are all empty.
# In this case we don't pass sample_weight to _check_targets that would
# check that sample_weight is not empty and we don't reuse the returned
# sample_weight
y_type, y_true, y_pred, _ = _check_targets(y_true, y_pred)
y_true, y_pred = attach_unique(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = _convert_to_numpy(labels, xp)
n_labels = labels.size
if n_labels == 0:
raise ValueError("'labels' should contain at least one label.")
elif y_true.size == 0:
return np.zeros((n_labels, n_labels), dtype=int)
elif len(np.intersect1d(y_true, labels)) == 0:
raise ValueError("At least one label specified must be in y_true")
n_labels = labels.size
# If labels are not consecutive integers starting from zero, then
# y_true and y_pred must be converted into index form
need_index_conversion = not (
labels.dtype.kind in {"i", "u", "b"}
and np.all(labels == np.arange(n_labels))
and y_true.min() >= 0
and y_pred.min() >= 0
)
if need_index_conversion:
label_to_ind = {label: index for index, label in enumerate(labels)}
y_pred = np.array([label_to_ind.get(label, n_labels + 1) for label in y_pred])
y_true = np.array([label_to_ind.get(label, n_labels + 1) for label in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
if not np.all(ind):
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
# Choose the accumulator dtype to always have high precision
if sample_weight.dtype.kind in {"i", "u", "b"}:
dtype = np.int64
else:
dtype = np.float32 if str(device_).startswith("mps") else np.float64
cm = coo_matrix(
(sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels),
dtype=dtype,
).toarray()
with np.errstate(all="ignore"):
if normalize == "true":
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == "pred":
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == "all":
cm = cm / cm.sum()
cm = xpx.nan_to_num(cm)
if cm.shape == (1, 1):
warnings.warn(
(
"A single label was found in 'y_true' and 'y_pred'. For the confusion "
"matrix to have the correct shape, use the 'labels' parameter to pass "
"all known labels."
),
UserWarning,
)
return xp.asarray(cm, device=device_)
@validate_params(
{
"y_true": ["array-like", "sparse matrix"],
"y_pred": ["array-like", "sparse matrix"],
"sample_weight": ["array-like", None],
"labels": ["array-like", None],
"samplewise": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def multilabel_confusion_matrix(
y_true, y_pred, *, sample_weight=None, labels=None, samplewise=False
):
"""Compute a confusion matrix for each class or sample.
.. versionadded:: 0.21
Compute class-wise (default) or sample-wise (samplewise=True) multilabel
confusion matrix to evaluate the accuracy of a classification, and output
confusion matrices for each class or sample.
In multilabel confusion matrix :math:`MCM`, the count of true negatives
is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`,
true positives is :math:`MCM_{:,1,1}` and false positives is
:math:`MCM_{:,0,1}`.
Multiclass data will be treated as if binarized under a one-vs-rest
transformation. Returned confusion matrices will be in the order of
sorted unique labels in the union of (y_true, y_pred).
Read more in the :ref:`User Guide <multilabel_confusion_matrix>`.
Parameters
----------
y_true : {array-like, sparse matrix} of shape (n_samples, n_outputs) or \
(n_samples,)
Ground truth (correct) target values. Sparse matrix is only supported when
labels are of :term:`multilabel` type.
y_pred : {array-like, sparse matrix} of shape (n_samples, n_outputs) or \
(n_samples,)
Estimated targets as returned by a classifier. Sparse matrix is only
supported when labels are of :term:`multilabel` type.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like of shape (n_classes,), default=None
A list of classes or column indices to select some (or to force
inclusion of classes absent from the data).
samplewise : bool, default=False
In the multilabel case, this calculates a confusion matrix per sample.
Returns
-------
multi_confusion : ndarray of shape (n_outputs, 2, 2)
A 2x2 confusion matrix corresponding to each output in the input.
When calculating class-wise multi_confusion (default), then
n_outputs = n_labels; when calculating sample-wise multi_confusion
(samplewise=True), n_outputs = n_samples. If ``labels`` is defined,
the results will be returned in the order specified in ``labels``,
otherwise the results will be returned in sorted order by default.
See Also
--------
confusion_matrix : Compute confusion matrix to evaluate the accuracy of a
classifier.
Notes
-----
The `multilabel_confusion_matrix` calculates class-wise or sample-wise
multilabel confusion matrices, and in multiclass tasks, labels are
binarized under a one-vs-rest way; while
:func:`~sklearn.metrics.confusion_matrix` calculates one confusion matrix
for confusion between every two classes.
Examples
--------
Multilabel-indicator case:
>>> import numpy as np
>>> from sklearn.metrics import multilabel_confusion_matrix
>>> y_true = np.array([[1, 0, 1],
... [0, 1, 0]])
>>> y_pred = np.array([[1, 0, 0],
... [0, 1, 1]])
>>> multilabel_confusion_matrix(y_true, y_pred)
array([[[1, 0],
[0, 1]],
<BLANKLINE>
[[1, 0],
[0, 1]],
<BLANKLINE>
[[0, 1],
[1, 0]]])
Multiclass case:
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> multilabel_confusion_matrix(y_true, y_pred,
... labels=["ant", "bird", "cat"])
array([[[3, 1],
[0, 2]],
<BLANKLINE>
[[5, 0],
[1, 0]],
<BLANKLINE>
[[2, 1],
[1, 2]]])
"""
y_true, y_pred = attach_unique(y_true, y_pred)
xp, _, device_ = get_namespace_and_device(y_true, y_pred, sample_weight)
y_type, y_true, y_pred, sample_weight = _check_targets(
y_true, y_pred, sample_weight
)
if y_type not in ("binary", "multiclass", "multilabel-indicator"):
raise ValueError("%s is not supported" % y_type)
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
labels = xp.asarray(labels, device=device_)
n_labels = labels.shape[0]
labels = xp.concat(
[labels, xpx.setdiff1d(present_labels, labels, assume_unique=True, xp=xp)],
axis=-1,
)
if y_true.ndim == 1:
if samplewise:
raise ValueError(
"Samplewise metrics are not available outside of "
"multilabel classification."
)
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = sample_weight[tp]
else:
tp_bins_weights = None
if tp_bins.shape[0]:
tp_sum = _bincount(
tp_bins, weights=tp_bins_weights, minlength=labels.shape[0], xp=xp
)
else:
# Pathological case
true_sum = pred_sum = tp_sum = xp.zeros(labels.shape[0])
if y_pred.shape[0]:
pred_sum = _bincount(
y_pred, weights=sample_weight, minlength=labels.shape[0], xp=xp
)
if y_true.shape[0]:
true_sum = _bincount(
y_true, weights=sample_weight, minlength=labels.shape[0], xp=xp
)
# Retain only selected labels
indices = xp.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = xp.take(tp_sum, indices, axis=0)
true_sum = xp.take(true_sum, indices, axis=0)
pred_sum = xp.take(pred_sum, indices, axis=0)
else:
sum_axis = 1 if samplewise else 0
# All labels are index integers for multilabel.
# Select labels:
if labels.shape != present_labels.shape or xp.any(
xp.not_equal(labels, present_labels)
):
if xp.max(labels) > xp.max(present_labels):
raise ValueError(
"All labels must be in [0, n labels) for "
"multilabel targets. "
"Got %d > %d" % (xp.max(labels), xp.max(present_labels))
)
if xp.min(labels) < 0:
raise ValueError(
"All labels must be in [0, n labels) for "
"multilabel targets. "
"Got %d < 0" % xp.min(labels)
)
if n_labels is not None:
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
if issparse(y_true) or issparse(y_pred):
true_and_pred = y_true.multiply(y_pred)
else:
true_and_pred = xp.multiply(y_true, y_pred)
# calculate weighted counts
tp_sum = _count_nonzero(
true_and_pred,
axis=sum_axis,
sample_weight=sample_weight,
xp=xp,
device=device_,
)
pred_sum = _count_nonzero(
y_pred,
axis=sum_axis,
sample_weight=sample_weight,
xp=xp,
device=device_,
)
true_sum = _count_nonzero(
y_true,
axis=sum_axis,
sample_weight=sample_weight,
xp=xp,
device=device_,
)
fp = pred_sum - tp_sum
fn = true_sum - tp_sum
tp = tp_sum
if sample_weight is not None and samplewise:
tp = xp.asarray(tp)
fp = xp.asarray(fp)
fn = xp.asarray(fn)
tn = sample_weight * y_true.shape[1] - tp - fp - fn
elif sample_weight is not None:
tn = xp.sum(sample_weight) - tp - fp - fn
elif samplewise:
tn = y_true.shape[1] - tp - fp - fn
else:
tn = y_true.shape[0] - tp - fp - fn
return xp.reshape(xp.stack([tn, fp, fn, tp]).T, (-1, 2, 2))
@validate_params(
{
"y1": ["array-like"],
"y2": ["array-like"],
"labels": ["array-like", None],
"weights": [StrOptions({"linear", "quadratic"}), None],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def cohen_kappa_score(y1, y2, *, labels=None, weights=None, sample_weight=None):
r"""Compute Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array-like of shape (n_samples,)
Labels assigned by the first annotator.
y2 : array-like of shape (n_samples,)
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array-like of shape (n_classes,), default=None
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_scorer.py | sklearn/metrics/_scorer.py | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`~sklearn.model_selection.GridSearchCV` or
:func:`sklearn.model_selection.cross_val_score` as the ``scoring``
parameter, to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import copy
import warnings
from collections import Counter
from functools import partial
from inspect import signature
from numbers import Integral
from traceback import format_exc
import numpy as np
from sklearn.base import is_regressor
from sklearn.metrics import (
accuracy_score,
average_precision_score,
balanced_accuracy_score,
brier_score_loss,
class_likelihood_ratios,
d2_absolute_error_score,
d2_brier_score,
d2_log_loss_score,
explained_variance_score,
f1_score,
jaccard_score,
log_loss,
matthews_corrcoef,
max_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_gamma_deviance,
mean_poisson_deviance,
mean_squared_error,
mean_squared_log_error,
median_absolute_error,
precision_score,
r2_score,
recall_score,
roc_auc_score,
root_mean_squared_error,
root_mean_squared_log_error,
top_k_accuracy_score,
)
from sklearn.metrics.cluster import (
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
fowlkes_mallows_score,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
rand_score,
v_measure_score,
)
from sklearn.utils import Bunch
from sklearn.utils._param_validation import (
HasMethods,
StrOptions,
validate_params,
)
from sklearn.utils._response import _get_response_values
from sklearn.utils.metadata_routing import (
MetadataRequest,
MetadataRouter,
MethodMapping,
_MetadataRequester,
_raise_for_params,
_routing_enabled,
get_routing_for_object,
process_routing,
)
from sklearn.utils.validation import _check_response_method
def _cached_call(cache, estimator, response_method, *args, **kwargs):
"""Call estimator with method and args and kwargs."""
if cache is not None and response_method in cache:
return cache[response_method]
result, _ = _get_response_values(
estimator, *args, response_method=response_method, **kwargs
)
if cache is not None:
cache[response_method] = result
return result
def _get_func_repr_or_name(func):
"""Returns the name of the function or repr of a partial."""
if isinstance(func, partial):
return repr(func)
return func.__name__
class _MultimetricScorer:
"""Callable for multimetric scoring used to avoid repeated calls
to `predict_proba`, `predict`, and `decision_function`.
`_MultimetricScorer` will return a dictionary of scores corresponding to
the scorers in the dictionary. Note that `_MultimetricScorer` can be
created with a dictionary with one key (i.e. only one actual scorer).
Parameters
----------
scorers : dict
Dictionary mapping names to callable scorers.
raise_exc : bool, default=True
Whether to raise the exception in `__call__` or not. If set to `False`
a formatted string of the exception details is passed as result of
the failing scorer.
"""
def __init__(self, *, scorers, raise_exc=True):
self._scorers = scorers
self._raise_exc = raise_exc
def __call__(self, estimator, *args, **kwargs):
"""Evaluate predicted target values."""
scores = {}
cache = {} if self._use_cache(estimator) else None
cached_call = partial(_cached_call, cache)
if _routing_enabled():
routed_params = process_routing(self, "score", **kwargs)
else:
# Scorers all get the same args, and get all of them except sample_weight.
# Only the ones having `sample_weight` in their signature will receive it.
# This does not work for metadata other than sample_weight, and for those
# users have to enable metadata routing.
common_kwargs = {
arg: value for arg, value in kwargs.items() if arg != "sample_weight"
}
routed_params = Bunch(
**{name: Bunch(score=common_kwargs.copy()) for name in self._scorers}
)
if "sample_weight" in kwargs:
for name, scorer in self._scorers.items():
if scorer._accept_sample_weight():
routed_params[name].score["sample_weight"] = kwargs[
"sample_weight"
]
for name, scorer in self._scorers.items():
try:
if isinstance(scorer, _BaseScorer):
score = scorer._score(
cached_call, estimator, *args, **routed_params.get(name).score
)
else:
score = scorer(estimator, *args, **routed_params.get(name).score)
scores[name] = score
except Exception as e:
if self._raise_exc:
raise e
else:
scores[name] = format_exc()
return scores
def __repr__(self):
scorers = ", ".join([f'"{s}"' for s in self._scorers])
return f"MultiMetricScorer({scorers})"
def _accept_sample_weight(self):
# TODO(slep006): remove when metadata routing is the only way
return any(scorer._accept_sample_weight() for scorer in self._scorers.values())
def _use_cache(self, estimator):
"""Return True if using a cache is beneficial, thus when a response method will
be called several time.
"""
if len(self._scorers) == 1: # Only one scorer
return False
counter = Counter(
[
_check_response_method(estimator, scorer._response_method).__name__
for scorer in self._scorers.values()
if isinstance(scorer, _BaseScorer)
]
)
if any(val > 1 for val in counter.values()):
# The exact same response method or iterable of response methods
# will be called more than once.
return True
return False
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.3
Returns
-------
routing : MetadataRouter
A :class:`~utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
return MetadataRouter(owner=self).add(
**self._scorers,
method_mapping=MethodMapping().add(caller="score", callee="score"),
)
class _BaseScorer(_MetadataRequester):
"""Base scorer that is used as `scorer(estimator, X, y_true)`.
Parameters
----------
score_func : callable
The score function to use. It will be called as
`score_func(y_true, y_pred, **kwargs)`.
sign : int
Either 1 or -1 to returns the score with `sign * score_func(estimator, X, y)`.
Thus, `sign` defined if higher scores are better or worse.
kwargs : dict
Additional parameters to pass to the score function.
response_method : str
The method to call on the estimator to get the response values.
"""
def __init__(self, score_func, sign, kwargs, response_method="predict"):
self._score_func = score_func
self._sign = sign
self._kwargs = kwargs
self._response_method = response_method
def _get_pos_label(self):
if "pos_label" in self._kwargs:
return self._kwargs["pos_label"]
score_func_params = signature(self._score_func).parameters
if "pos_label" in score_func_params:
return score_func_params["pos_label"].default
return None
def _accept_sample_weight(self):
# TODO(slep006): remove when metadata routing is the only way
return "sample_weight" in signature(self._score_func).parameters
def __repr__(self):
sign_string = "" if self._sign > 0 else ", greater_is_better=False"
response_method_string = f", response_method={self._response_method!r}"
kwargs_string = "".join([f", {k}={v}" for k, v in self._kwargs.items()])
return (
f"make_scorer({_get_func_repr_or_name(self._score_func)}{sign_string}"
f"{response_method_string}{kwargs_string})"
)
def _routing_repr(self):
return repr(self)
def __call__(self, estimator, X, y_true, sample_weight=None, **kwargs):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : {array-like, sparse matrix}
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
**kwargs : dict
Other parameters passed to the scorer. Refer to
:func:`set_score_request` for more details.
Only available if `enable_metadata_routing=True`. See the
:ref:`User Guide <metadata_routing>`.
.. versionadded:: 1.3
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
_raise_for_params(kwargs, self, None)
_kwargs = copy.deepcopy(kwargs)
if sample_weight is not None:
_kwargs["sample_weight"] = sample_weight
return self._score(partial(_cached_call, None), estimator, X, y_true, **_kwargs)
def _warn_overlap(self, message, kwargs):
"""Warn if there is any overlap between ``self._kwargs`` and ``kwargs``.
This method is intended to be used to check for overlap between
``self._kwargs`` and ``kwargs`` passed as metadata.
"""
_kwargs = set() if self._kwargs is None else set(self._kwargs.keys())
overlap = _kwargs.intersection(kwargs.keys())
if overlap:
warnings.warn(
f"{message} Overlapping parameters are: {overlap}", UserWarning
)
def set_score_request(self, **kwargs):
"""Set requested parameters by the scorer.
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.3
Parameters
----------
kwargs : dict
Arguments should be of the form ``param_name=alias``, and `alias`
can be one of ``{True, False, None, str}``.
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
self._warn_overlap(
message=(
"You are setting metadata request for parameters which are "
"already set as kwargs for this metric. These set values will be "
"overridden by passed metadata if provided. Please pass them either "
"as metadata or kwargs to `make_scorer`."
),
kwargs=kwargs,
)
self._metadata_request = MetadataRequest(owner=self)
for param, alias in kwargs.items():
self._metadata_request.score.add_request(param=param, alias=alias)
return self
class _Scorer(_BaseScorer):
def _score(self, method_caller, estimator, X, y_true, **kwargs):
"""Evaluate the response method of `estimator` on `X` and `y_true`.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : {array-like, sparse matrix}
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y_true : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
**kwargs : dict
Other parameters passed to the scorer. Refer to
:func:`set_score_request` for more details.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
self._warn_overlap(
message=(
"There is an overlap between set kwargs of this scorer instance and"
" passed metadata. Please pass them either as kwargs to `make_scorer`"
" or metadata, but not both."
),
kwargs=kwargs,
)
pos_label = None if is_regressor(estimator) else self._get_pos_label()
response_method = _check_response_method(estimator, self._response_method)
y_pred = method_caller(
estimator,
_get_response_method_name(response_method),
X,
pos_label=pos_label,
)
scoring_kwargs = {**self._kwargs, **kwargs}
return self._sign * self._score_func(y_true, y_pred, **scoring_kwargs)
@validate_params(
{
"scoring": [str, callable, None],
},
prefer_skip_nested_validation=True,
)
def get_scorer(scoring):
"""Get a scorer from string.
Read more in the :ref:`User Guide <scoring_parameter>`.
:func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names
of all available scorers.
Parameters
----------
scoring : str, callable or None
Scoring method as string. If callable it is returned as is.
If None, returns None.
Returns
-------
scorer : callable
The scorer.
Notes
-----
When passed a string, this function always returns a copy of the scorer
object. Calling `get_scorer` twice for the same scorer results in two
separate scorer objects.
Examples
--------
>>> import numpy as np
>>> from sklearn.dummy import DummyClassifier
>>> from sklearn.metrics import get_scorer
>>> X = np.reshape([0, 1, -1, -0.5, 2], (-1, 1))
>>> y = np.array([0, 1, 1, 0, 1])
>>> classifier = DummyClassifier(strategy="constant", constant=0).fit(X, y)
>>> accuracy = get_scorer("accuracy")
>>> accuracy(classifier, X, y)
0.4
"""
if isinstance(scoring, str):
try:
scorer = copy.deepcopy(_SCORERS[scoring])
except KeyError:
raise ValueError(
"%r is not a valid scoring value. "
"Use sklearn.metrics.get_scorer_names() "
"to get valid options." % scoring
)
else:
scorer = scoring
return scorer
class _PassthroughScorer(_MetadataRequester):
# Passes scoring of estimator's `score` method back to estimator if scoring
# is `None`.
def __init__(self, estimator):
self._estimator = estimator
def __call__(self, estimator, *args, **kwargs):
"""Method that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def __repr__(self):
return f"{type(self._estimator).__name__}.score"
def _routing_repr(self):
return repr(self)
def _accept_sample_weight(self):
# TODO(slep006): remove when metadata routing is the only way
return "sample_weight" in signature(self._estimator.score).parameters
def get_metadata_routing(self):
"""Get requested data properties.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.3
Returns
-------
routing : MetadataRouter
A :class:`~utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
return get_routing_for_object(self._estimator)
def _check_multimetric_scoring(estimator, scoring):
"""Check the scoring parameter in cases when multiple metrics are allowed.
In addition, multimetric scoring leverages a caching mechanism to not call the same
estimator response method multiple times. Hence, the scorer is modified to only use
a single response method given a list of response methods and the estimator.
Parameters
----------
estimator : sklearn estimator instance
The estimator for which the scoring will be applied.
scoring : list, tuple or dict
Strategy to evaluate the performance of the cross-validated model on
the test set.
The possibilities are:
- a list or tuple of unique strings;
- a callable returning a dictionary where they keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
See :ref:`multimetric_grid_search` for an example.
Returns
-------
scorers_dict : dict
A dict mapping each scorer name to its validated scorer.
"""
err_msg_generic = (
f"scoring is invalid (got {scoring!r}). Refer to the "
"scoring glossary for details: "
"https://scikit-learn.org/stable/glossary.html#term-scoring"
)
if isinstance(scoring, (list, tuple, set)):
err_msg = (
"The list/tuple elements must be unique strings of predefined scorers. "
)
try:
keys = set(scoring)
except TypeError as e:
raise ValueError(err_msg) from e
if len(keys) != len(scoring):
raise ValueError(
f"{err_msg} Duplicate elements were found in"
f" the given list. {scoring!r}"
)
elif len(keys) > 0:
if not all(isinstance(k, str) for k in keys):
if any(callable(k) for k in keys):
raise ValueError(
f"{err_msg} One or more of the elements "
"were callables. Use a dict of score "
"name mapped to the scorer callable. "
f"Got {scoring!r}"
)
else:
raise ValueError(
f"{err_msg} Non-string types were found "
f"in the given list. Got {scoring!r}"
)
scorers = {
scorer: check_scoring(estimator, scoring=scorer) for scorer in scoring
}
else:
raise ValueError(f"{err_msg} Empty list was given. {scoring!r}")
elif isinstance(scoring, dict):
keys = set(scoring)
if not all(isinstance(k, str) for k in keys):
raise ValueError(
"Non-string types were found in the keys of "
f"the given dict. scoring={scoring!r}"
)
if len(keys) == 0:
raise ValueError(f"An empty dict was passed. {scoring!r}")
scorers = {
key: check_scoring(estimator, scoring=scorer)
for key, scorer in scoring.items()
}
else:
raise ValueError(err_msg_generic)
return scorers
def _get_response_method_name(response_method):
try:
return response_method.__name__
except AttributeError:
return _get_response_method_name(response_method.func)
@validate_params(
{
"score_func": [callable],
"response_method": [
list,
tuple,
StrOptions({"predict", "predict_proba", "decision_function"}),
],
"greater_is_better": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def make_scorer(
score_func, *, response_method="predict", greater_is_better=True, **kwargs
):
"""Make a scorer from a performance metric or loss function.
A scorer is a wrapper around an arbitrary metric or loss function that is called
with the signature `scorer(estimator, X, y_true, **kwargs)`.
It is accepted in all scikit-learn estimators or functions allowing a `scoring`
parameter.
The parameter `response_method` allows to specify which method of the estimator
should be used to feed the scoring/loss function.
Read more in the :ref:`User Guide <scoring_callable>`.
Parameters
----------
score_func : callable
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
response_method : {"predict_proba", "decision_function", "predict"} or \
list/tuple of such str, default="predict"
Specifies the response method to use get prediction from an estimator
(i.e. :term:`predict_proba`, :term:`decision_function` or
:term:`predict`). Possible choices are:
- if `str`, it corresponds to the name to the method to return;
- if a list or tuple of `str`, it provides the method names in order of
preference. The method returned corresponds to the first method in
the list and which is implemented by `estimator`.
.. versionadded:: 1.4
greater_is_better : bool, default=True
Whether `score_func` is a score function (default), meaning high is
good, or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the `score_func`.
**kwargs : additional arguments
Additional parameters to be passed to `score_func`.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, response_method='predict', beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
return _Scorer(score_func, sign, kwargs, response_method)
# Standard regression scores
explained_variance_scorer = make_scorer(explained_variance_score)
r2_scorer = make_scorer(r2_score)
neg_max_error_scorer = make_scorer(max_error, greater_is_better=False)
neg_mean_squared_error_scorer = make_scorer(mean_squared_error, greater_is_better=False)
neg_mean_squared_log_error_scorer = make_scorer(
mean_squared_log_error, greater_is_better=False
)
neg_mean_absolute_error_scorer = make_scorer(
mean_absolute_error, greater_is_better=False
)
neg_mean_absolute_percentage_error_scorer = make_scorer(
mean_absolute_percentage_error, greater_is_better=False
)
neg_median_absolute_error_scorer = make_scorer(
median_absolute_error, greater_is_better=False
)
neg_root_mean_squared_error_scorer = make_scorer(
root_mean_squared_error, greater_is_better=False
)
neg_root_mean_squared_log_error_scorer = make_scorer(
root_mean_squared_log_error, greater_is_better=False
)
neg_mean_poisson_deviance_scorer = make_scorer(
mean_poisson_deviance, greater_is_better=False
)
neg_mean_gamma_deviance_scorer = make_scorer(
mean_gamma_deviance, greater_is_better=False
)
d2_absolute_error_scorer = make_scorer(d2_absolute_error_score)
d2_brier_score_scorer = make_scorer(d2_brier_score, response_method="predict_proba")
d2_log_loss_scorer = make_scorer(d2_log_loss_score, response_method="predict_proba")
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
balanced_accuracy_scorer = make_scorer(balanced_accuracy_score)
matthews_corrcoef_scorer = make_scorer(matthews_corrcoef)
def positive_likelihood_ratio(y_true, y_pred):
return class_likelihood_ratios(y_true, y_pred, replace_undefined_by=1.0)[0]
def negative_likelihood_ratio(y_true, y_pred):
return class_likelihood_ratios(y_true, y_pred, replace_undefined_by=1.0)[1]
positive_likelihood_ratio_scorer = make_scorer(positive_likelihood_ratio)
neg_negative_likelihood_ratio_scorer = make_scorer(
negative_likelihood_ratio, greater_is_better=False
)
# Score functions that need decision values
top_k_accuracy_scorer = make_scorer(
top_k_accuracy_score,
greater_is_better=True,
response_method=("decision_function", "predict_proba"),
)
roc_auc_scorer = make_scorer(
roc_auc_score,
greater_is_better=True,
response_method=("decision_function", "predict_proba"),
)
average_precision_scorer = make_scorer(
average_precision_score,
response_method=("decision_function", "predict_proba"),
)
roc_auc_ovo_scorer = make_scorer(
roc_auc_score, response_method="predict_proba", multi_class="ovo"
)
roc_auc_ovo_weighted_scorer = make_scorer(
roc_auc_score,
response_method="predict_proba",
multi_class="ovo",
average="weighted",
)
roc_auc_ovr_scorer = make_scorer(
roc_auc_score, response_method="predict_proba", multi_class="ovr"
)
roc_auc_ovr_weighted_scorer = make_scorer(
roc_auc_score,
response_method="predict_proba",
multi_class="ovr",
average="weighted",
)
# Score function for probabilistic classification
neg_log_loss_scorer = make_scorer(
log_loss, greater_is_better=False, response_method="predict_proba"
)
neg_brier_score_scorer = make_scorer(
brier_score_loss, greater_is_better=False, response_method="predict_proba"
)
brier_score_loss_scorer = make_scorer(
brier_score_loss, greater_is_better=False, response_method="predict_proba"
)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
rand_scorer = make_scorer(rand_score)
homogeneity_scorer = make_scorer(homogeneity_score)
completeness_scorer = make_scorer(completeness_score)
v_measure_scorer = make_scorer(v_measure_score)
mutual_info_scorer = make_scorer(mutual_info_score)
adjusted_mutual_info_scorer = make_scorer(adjusted_mutual_info_score)
normalized_mutual_info_scorer = make_scorer(normalized_mutual_info_score)
fowlkes_mallows_scorer = make_scorer(fowlkes_mallows_score)
_SCORERS = dict(
explained_variance=explained_variance_scorer,
r2=r2_scorer,
neg_max_error=neg_max_error_scorer,
matthews_corrcoef=matthews_corrcoef_scorer,
neg_median_absolute_error=neg_median_absolute_error_scorer,
neg_mean_absolute_error=neg_mean_absolute_error_scorer,
neg_mean_absolute_percentage_error=neg_mean_absolute_percentage_error_scorer,
neg_mean_squared_error=neg_mean_squared_error_scorer,
neg_mean_squared_log_error=neg_mean_squared_log_error_scorer,
neg_root_mean_squared_error=neg_root_mean_squared_error_scorer,
neg_root_mean_squared_log_error=neg_root_mean_squared_log_error_scorer,
neg_mean_poisson_deviance=neg_mean_poisson_deviance_scorer,
neg_mean_gamma_deviance=neg_mean_gamma_deviance_scorer,
d2_absolute_error_score=d2_absolute_error_scorer,
d2_log_loss_score=d2_log_loss_scorer,
d2_brier_score=d2_brier_score_scorer,
accuracy=accuracy_scorer,
top_k_accuracy=top_k_accuracy_scorer,
roc_auc=roc_auc_scorer,
roc_auc_ovr=roc_auc_ovr_scorer,
roc_auc_ovo=roc_auc_ovo_scorer,
roc_auc_ovr_weighted=roc_auc_ovr_weighted_scorer,
roc_auc_ovo_weighted=roc_auc_ovo_weighted_scorer,
balanced_accuracy=balanced_accuracy_scorer,
average_precision=average_precision_scorer,
neg_log_loss=neg_log_loss_scorer,
neg_brier_score=neg_brier_score_scorer,
positive_likelihood_ratio=positive_likelihood_ratio_scorer,
neg_negative_likelihood_ratio=neg_negative_likelihood_ratio_scorer,
# Cluster metrics that use supervised evaluation
adjusted_rand_score=adjusted_rand_scorer,
rand_score=rand_scorer,
homogeneity_score=homogeneity_scorer,
completeness_score=completeness_scorer,
v_measure_score=v_measure_scorer,
mutual_info_score=mutual_info_scorer,
adjusted_mutual_info_score=adjusted_mutual_info_scorer,
normalized_mutual_info_score=normalized_mutual_info_scorer,
fowlkes_mallows_score=fowlkes_mallows_scorer,
)
def get_scorer_names():
"""Get the names of all available scorers.
These names can be passed to :func:`~sklearn.metrics.get_scorer` to
retrieve the scorer object.
Returns
-------
list of str
Names of all available scorers.
Examples
--------
>>> from sklearn.metrics import get_scorer_names
>>> all_scorers = get_scorer_names()
>>> type(all_scorers)
<class 'list'>
>>> all_scorers[:3]
['accuracy', 'adjusted_mutual_info_score', 'adjusted_rand_score']
>>> "roc_auc" in all_scorers
True
"""
return sorted(_SCORERS.keys())
for name, metric in [
("precision", precision_score),
("recall", recall_score),
("f1", f1_score),
("jaccard", jaccard_score),
]:
_SCORERS[name] = make_scorer(metric, average="binary")
for average in ["macro", "micro", "samples", "weighted"]:
qualified_name = "{0}_{1}".format(name, average)
_SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average)
@validate_params(
{
"estimator": [HasMethods("fit"), None],
"scoring": [
StrOptions(set(get_scorer_names())),
callable,
list,
set,
tuple,
dict,
None,
],
"allow_none": ["boolean"],
"raise_exc": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def check_scoring(estimator=None, scoring=None, *, allow_none=False, raise_exc=True):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit' or None, default=None
The object to use to fit the data. If `None`, then this function may error
depending on `allow_none`.
scoring : str, callable, list, tuple, set, or dict, default=None
Scorer to use. If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_string_names`);
- a callable (see :ref:`scoring_callable`) that returns a single value;
- `None`, the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
If `scoring` represents multiple scores, one can use:
- a list, tuple or set of unique strings;
- a callable returning a dictionary where the keys are the metric names and the
values are the metric scorers;
- a dictionary with metric names as keys and callables a values. The callables
need to have the signature `callable(estimator, X, y)`.
allow_none : bool, default=False
Whether to return None or raise an error if no `scoring` is specified and the
estimator has no `score` method.
raise_exc : bool, default=True
Whether to raise an exception (if a subset of the scorers in multimetric scoring
fails) or to return an error code.
- If set to `True`, raises the failing scorer's exception.
- If set to `False`, a formatted string of the exception details is passed as
result of the failing scorer(s).
This applies if `scoring` is list, tuple, set, or dict. Ignored if `scoring` is
a str or a callable.
.. versionadded:: 1.6
Returns
-------
scoring : callable
A scorer callable object / function with signature ``scorer(estimator, X, y)``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.metrics import check_scoring
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> classifier = DecisionTreeClassifier(max_depth=2).fit(X, y)
>>> scorer = check_scoring(classifier, scoring='accuracy')
>>> scorer(classifier, X, y)
0.96...
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/__init__.py | sklearn/metrics/__init__.py | """Score functions, performance metrics, pairwise metrics and distance computations."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.metrics import cluster
from sklearn.metrics._classification import (
accuracy_score,
balanced_accuracy_score,
brier_score_loss,
class_likelihood_ratios,
classification_report,
cohen_kappa_score,
confusion_matrix,
d2_brier_score,
d2_log_loss_score,
f1_score,
fbeta_score,
hamming_loss,
hinge_loss,
jaccard_score,
log_loss,
matthews_corrcoef,
multilabel_confusion_matrix,
precision_recall_fscore_support,
precision_score,
recall_score,
zero_one_loss,
)
from sklearn.metrics._dist_metrics import DistanceMetric
from sklearn.metrics._plot.confusion_matrix import ConfusionMatrixDisplay
from sklearn.metrics._plot.det_curve import DetCurveDisplay
from sklearn.metrics._plot.precision_recall_curve import PrecisionRecallDisplay
from sklearn.metrics._plot.regression import PredictionErrorDisplay
from sklearn.metrics._plot.roc_curve import RocCurveDisplay
from sklearn.metrics._ranking import (
auc,
average_precision_score,
confusion_matrix_at_thresholds,
coverage_error,
dcg_score,
det_curve,
label_ranking_average_precision_score,
label_ranking_loss,
ndcg_score,
precision_recall_curve,
roc_auc_score,
roc_curve,
top_k_accuracy_score,
)
from sklearn.metrics._regression import (
d2_absolute_error_score,
d2_pinball_score,
d2_tweedie_score,
explained_variance_score,
max_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_gamma_deviance,
mean_pinball_loss,
mean_poisson_deviance,
mean_squared_error,
mean_squared_log_error,
mean_tweedie_deviance,
median_absolute_error,
r2_score,
root_mean_squared_error,
root_mean_squared_log_error,
)
from sklearn.metrics._scorer import (
check_scoring,
get_scorer,
get_scorer_names,
make_scorer,
)
from sklearn.metrics.cluster import (
adjusted_mutual_info_score,
adjusted_rand_score,
calinski_harabasz_score,
completeness_score,
consensus_score,
davies_bouldin_score,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
silhouette_samples,
silhouette_score,
v_measure_score,
)
from sklearn.metrics.pairwise import (
euclidean_distances,
nan_euclidean_distances,
pairwise_distances,
pairwise_distances_argmin,
pairwise_distances_argmin_min,
pairwise_distances_chunked,
pairwise_kernels,
)
__all__ = [
"ConfusionMatrixDisplay",
"DetCurveDisplay",
"DistanceMetric",
"PrecisionRecallDisplay",
"PredictionErrorDisplay",
"RocCurveDisplay",
"accuracy_score",
"adjusted_mutual_info_score",
"adjusted_rand_score",
"auc",
"average_precision_score",
"balanced_accuracy_score",
"brier_score_loss",
"calinski_harabasz_score",
"check_scoring",
"class_likelihood_ratios",
"classification_report",
"cluster",
"cohen_kappa_score",
"completeness_score",
"confusion_matrix",
"confusion_matrix_at_thresholds",
"consensus_score",
"coverage_error",
"d2_absolute_error_score",
"d2_brier_score",
"d2_log_loss_score",
"d2_pinball_score",
"d2_tweedie_score",
"davies_bouldin_score",
"dcg_score",
"det_curve",
"euclidean_distances",
"explained_variance_score",
"f1_score",
"fbeta_score",
"fowlkes_mallows_score",
"get_scorer",
"get_scorer_names",
"hamming_loss",
"hinge_loss",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"jaccard_score",
"label_ranking_average_precision_score",
"label_ranking_loss",
"log_loss",
"make_scorer",
"matthews_corrcoef",
"max_error",
"mean_absolute_error",
"mean_absolute_percentage_error",
"mean_gamma_deviance",
"mean_pinball_loss",
"mean_poisson_deviance",
"mean_squared_error",
"mean_squared_log_error",
"mean_tweedie_deviance",
"median_absolute_error",
"multilabel_confusion_matrix",
"mutual_info_score",
"nan_euclidean_distances",
"ndcg_score",
"normalized_mutual_info_score",
"pair_confusion_matrix",
"pairwise_distances",
"pairwise_distances_argmin",
"pairwise_distances_argmin_min",
"pairwise_distances_chunked",
"pairwise_kernels",
"precision_recall_curve",
"precision_recall_fscore_support",
"precision_score",
"r2_score",
"rand_score",
"recall_score",
"roc_auc_score",
"roc_curve",
"root_mean_squared_error",
"root_mean_squared_log_error",
"silhouette_samples",
"silhouette_score",
"top_k_accuracy_score",
"v_measure_score",
"zero_one_loss",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/pairwise.py | sklearn/metrics/pairwise.py | """Metrics for pairwise distances and affinity of sets of samples."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import math
import warnings
from functools import partial
from numbers import Integral, Real
import numpy as np
from joblib import effective_n_jobs
from scipy.sparse import csr_matrix, issparse
from scipy.spatial import distance
from sklearn import config_context
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics._pairwise_distances_reduction import ArgKmin
from sklearn.metrics._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
from sklearn.preprocessing import normalize
from sklearn.utils import check_array, gen_batches, gen_even_slices
from sklearn.utils._array_api import (
_fill_diagonal,
_find_matching_floating_dtype,
_is_numpy_namespace,
_max_precision_float_dtype,
_modify_in_place_if_numpy,
get_namespace,
get_namespace_and_device,
)
from sklearn.utils._chunking import get_chunk_n_rows
from sklearn.utils._mask import _get_mask
from sklearn.utils._missing import is_scalar_nan
from sklearn.utils._param_validation import (
Hidden,
Interval,
MissingValues,
Options,
StrOptions,
validate_params,
)
from sklearn.utils.extmath import row_norms, safe_sparse_dot
from sklearn.utils.fixes import parse_version, sp_base_version
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import _num_samples, check_non_negative
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = float
return X, Y, dtype
def _find_floating_dtype_allow_sparse(X, Y, xp=None):
"""Find matching floating type, allowing for sparse input."""
if any([issparse(X), issparse(Y)]) or _is_numpy_namespace(xp):
X, Y, dtype_float = _return_float_dtype(X, Y)
else:
dtype_float = _find_matching_floating_dtype(X, Y, xp=xp)
return X, Y, dtype_float
def check_pairwise_arrays(
X,
Y,
*,
precomputed=False,
dtype="infer_float",
accept_sparse="csr",
ensure_all_finite=True,
ensure_2d=True,
copy=False,
):
"""Set X and Y appropriately and checks inputs.
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of type or None default="infer_float"
Data type required for X and Y. If "infer_float", the dtype will be an
appropriate float type selected by _return_float_dtype. If None, the
dtype of the input is preserved.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of str, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise an error when the input arrays are not 2-dimensional. Setting
this to `False` is necessary when using a custom metric with certain
non-numerical inputs (e.g. a list of strings).
.. versionadded:: 1.5
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
xp, _ = get_namespace(X, Y)
X, Y, dtype_float = _find_floating_dtype_allow_sparse(X, Y, xp=xp)
estimator = "check_pairwise_arrays"
if dtype == "infer_float":
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
estimator=estimator,
ensure_2d=ensure_2d,
)
else:
X = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
estimator=estimator,
ensure_2d=ensure_2d,
)
Y = check_array(
Y,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
estimator=estimator,
ensure_2d=ensure_2d,
)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError(
"Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." % (X.shape[0], X.shape[1], Y.shape[0])
)
elif ensure_2d and X.shape[1] != Y.shape[1]:
# Only check the number of features if 2d arrays are enforced. Otherwise,
# validation is left to the user for custom metrics.
raise ValueError(
"Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (X.shape[1], Y.shape[1])
)
return X, Y
def check_paired_arrays(X, Y):
"""Set X and Y appropriately and checks inputs for paired distances.
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError(
"X and Y should be of same shape. They were respectively %r and %r long."
% (X.shape, Y.shape)
)
return X, Y
# Pairwise distances
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"Y_norm_squared": ["array-like", None],
"squared": ["boolean"],
"X_norm_squared": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def euclidean_distances(
X, Y=None, *, Y_norm_squared=None, squared=False, X_norm_squared=None
):
"""
Compute the distance matrix between each pair from a feature array X and Y.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation,
because this equation potentially suffers from "catastrophic cancellation".
Also, the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., :mod:`scipy.spatial.distance` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
Y_norm_squared : array-like of shape (n_samples_Y,) or (n_samples_Y, 1) \
or (1, n_samples_Y), default=None
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
squared : bool, default=False
Return squared Euclidean distances.
X_norm_squared : array-like of shape (n_samples_X,) or (n_samples_X, 1) \
or (1, n_samples_X), default=None
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
See Also
--------
paired_distances : Distances between pairs of elements of X and Y.
Notes
-----
To achieve a better accuracy, `X_norm_squared` and `Y_norm_squared` may be
unused if they are passed as `np.float32`.
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[0., 1.],
[1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
xp, _ = get_namespace(X, Y)
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
X_norm_squared = check_array(X_norm_squared, ensure_2d=False)
original_shape = X_norm_squared.shape
if X_norm_squared.shape == (X.shape[0],):
X_norm_squared = xp.reshape(X_norm_squared, (-1, 1))
if X_norm_squared.shape == (1, X.shape[0]):
X_norm_squared = X_norm_squared.T
if X_norm_squared.shape != (X.shape[0], 1):
raise ValueError(
f"Incompatible dimensions for X of shape {X.shape} and "
f"X_norm_squared of shape {original_shape}."
)
if Y_norm_squared is not None:
Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False)
original_shape = Y_norm_squared.shape
if Y_norm_squared.shape == (Y.shape[0],):
Y_norm_squared = xp.reshape(Y_norm_squared, (1, -1))
if Y_norm_squared.shape == (Y.shape[0], 1):
Y_norm_squared = Y_norm_squared.T
if Y_norm_squared.shape != (1, Y.shape[0]):
raise ValueError(
f"Incompatible dimensions for Y of shape {Y.shape} and "
f"Y_norm_squared of shape {original_shape}."
)
return _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, squared)
def _euclidean_distances(X, Y, X_norm_squared=None, Y_norm_squared=None, squared=False):
"""Computational part of euclidean_distances
Assumes inputs are already checked.
If norms are passed as float32, they are unused. If arrays are passed as
float32, norms needs to be recomputed on upcast chunks.
TODO: use a float64 accumulator in row_norms to avoid the latter.
"""
xp, _, device_ = get_namespace_and_device(X, Y)
if X_norm_squared is not None and X_norm_squared.dtype != xp.float32:
XX = xp.reshape(X_norm_squared, (-1, 1))
elif X.dtype != xp.float32:
XX = row_norms(X, squared=True)[:, None]
else:
XX = None
if Y is X:
YY = None if XX is None else XX.T
else:
if Y_norm_squared is not None and Y_norm_squared.dtype != xp.float32:
YY = xp.reshape(Y_norm_squared, (1, -1))
elif Y.dtype != xp.float32:
YY = row_norms(Y, squared=True)[None, :]
else:
YY = None
if X.dtype == xp.float32 or Y.dtype == xp.float32:
# To minimize precision issues with float32, we compute the distance
# matrix on chunks of X and Y upcast to float64
distances = _euclidean_distances_upcast(X, XX, Y, YY)
else:
# if dtype is already float64, no need to chunk and upcast
distances = -2 * safe_sparse_dot(X, Y.T, dense_output=True)
distances += XX
distances += YY
xp_zero = xp.asarray(0, device=device_, dtype=distances.dtype)
distances = _modify_in_place_if_numpy(
xp, xp.maximum, distances, xp_zero, out=distances
)
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
if X is Y:
_fill_diagonal(distances, 0, xp=xp)
if squared:
return distances
distances = _modify_in_place_if_numpy(xp, xp.sqrt, distances, out=distances)
return distances
@validate_params(
{
"X": ["array-like"],
"Y": ["array-like", None],
"squared": ["boolean"],
"missing_values": [MissingValues(numeric_only=True)],
"copy": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def nan_euclidean_distances(
X, Y=None, *, squared=False, missing_values=np.nan, copy=True
):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
.. code-block:: text
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where:
.. code-block:: text
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]`` is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
.. versionadded:: 0.22
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples_Y, n_features), default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan, float or int, default=np.nan
Representation of missing value.
copy : bool, default=True
Make and use a deep copy of X and Y (if Y exists).
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
See Also
--------
paired_distances : Distances between pairs of elements of X and Y.
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
Examples
--------
>>> from sklearn.metrics.pairwise import nan_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> nan_euclidean_distances(X, X) # distance between rows of X
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> nan_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
ensure_all_finite = "allow-nan" if is_scalar_nan(missing_values) else True
X, Y = check_pairwise_arrays(
X, Y, accept_sparse=False, ensure_all_finite=ensure_all_finite, copy=copy
)
# Get missing mask for X
missing_X = _get_mask(X, missing_values)
# Get missing mask for Y
missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)
# set missing values to zero
X[missing_X] = 0
Y[missing_Y] = 0
distances = euclidean_distances(X, Y, squared=True)
# Adjust distances for missing values
XX = X * X
YY = Y * Y
distances -= np.dot(XX, missing_Y.T)
distances -= np.dot(missing_X, YY.T)
np.clip(distances, 0, None, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
np.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y is X else ~missing_Y
present_count = np.dot(present_X, present_Y.T)
distances[present_count == 0] = np.nan
# avoid divide by zero
np.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X.shape[1]
if not squared:
np.sqrt(distances, out=distances)
return distances
def _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None):
"""Euclidean distances between X and Y.
Assumes X and Y have float32 dtype.
Assumes XX and YY have float64 dtype or are None.
X and Y are upcast to float64 by chunks, which size is chosen to limit
memory increase by approximately 10% (at least 10MiB).
"""
xp, _, device_ = get_namespace_and_device(X, Y)
n_samples_X = X.shape[0]
n_samples_Y = Y.shape[0]
n_features = X.shape[1]
distances = xp.empty((n_samples_X, n_samples_Y), dtype=xp.float32, device=device_)
if batch_size is None:
x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1
y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1
# Allow 10% more memory than X, Y and the distance matrix take (at
# least 10MiB)
maxmem = max(
(
(x_density * n_samples_X + y_density * n_samples_Y) * n_features
+ (x_density * n_samples_X * y_density * n_samples_Y)
)
/ 10,
10 * 2**17,
)
# The increase amount of memory in 8-byte blocks is:
# - x_density * batch_size * n_features (copy of chunk of X)
# - y_density * batch_size * n_features (copy of chunk of Y)
# - batch_size * batch_size (chunk of distance matrix)
# Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem
# xd=x_density and yd=y_density
tmp = (x_density + y_density) * n_features
batch_size = (-tmp + math.sqrt(tmp**2 + 4 * maxmem)) / 2
batch_size = max(int(batch_size), 1)
x_batches = gen_batches(n_samples_X, batch_size)
xp_max_float = _max_precision_float_dtype(xp=xp, device=device_)
for i, x_slice in enumerate(x_batches):
X_chunk = xp.astype(X[x_slice, :], xp_max_float)
if XX is None:
XX_chunk = row_norms(X_chunk, squared=True)[:, None]
else:
XX_chunk = XX[x_slice]
y_batches = gen_batches(n_samples_Y, batch_size)
for j, y_slice in enumerate(y_batches):
if X is Y and j < i:
# when X is Y the distance matrix is symmetric so we only need
# to compute half of it.
d = distances[y_slice, x_slice].T
else:
Y_chunk = xp.astype(Y[y_slice, :], xp_max_float)
if YY is None:
YY_chunk = row_norms(Y_chunk, squared=True)[None, :]
else:
YY_chunk = YY[:, y_slice]
d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True)
d += XX_chunk
d += YY_chunk
distances[x_slice, y_slice] = xp.astype(d, xp.float32, copy=False)
return distances
def _argmin_min_reduce(dist, start):
# `start` is specified in the signature but not used. This is because the higher
# order `pairwise_distances_chunked` function needs reduction functions that are
# passed as argument to have a two arguments signature.
indices = dist.argmin(axis=1)
values = dist[np.arange(dist.shape[0]), indices]
return indices, values
def _argmin_reduce(dist, start):
# `start` is specified in the signature but not used. This is because the higher
# order `pairwise_distances_chunked` function needs reduction functions that are
# passed as argument to have a two arguments signature.
return dist.argmin(axis=1)
_VALID_METRICS = [
"euclidean",
"l2",
"l1",
"manhattan",
"cityblock",
"braycurtis",
"canberra",
"chebyshev",
"correlation",
"cosine",
"dice",
"hamming",
"jaccard",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalsneath",
"sqeuclidean",
"yule",
"wminkowski",
"nan_euclidean",
"haversine",
]
if sp_base_version < parse_version("1.17"): # pragma: no cover
# Deprecated in SciPy 1.15 and removed in SciPy 1.17
_VALID_METRICS += ["sokalmichener"]
if sp_base_version < parse_version("1.11"): # pragma: no cover
# Deprecated in SciPy 1.9 and removed in SciPy 1.11
_VALID_METRICS += ["kulsinski"]
if sp_base_version < parse_version("1.9"):
# Deprecated in SciPy 1.0 and removed in SciPy 1.9
_VALID_METRICS += ["matching"]
_NAN_METRICS = ["nan_euclidean"]
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix"],
"axis": [Options(Integral, {0, 1})],
"metric": [
StrOptions(set(_VALID_METRICS).union(ArgKmin.valid_metrics())),
callable,
],
"metric_kwargs": [dict, None],
},
prefer_skip_nested_validation=False, # metric is not validated yet
)
def pairwise_distances_argmin_min(
X, Y, *, axis=1, metric="euclidean", metric_kwargs=None
):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling::
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Array containing points.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Array containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default='euclidean'
Metric to use for distance computation. Any metric from scikit-learn
or :mod:`scipy.spatial.distance` can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan', 'nan_euclidean']
- from :mod:`scipy.spatial.distance`: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for :mod:`scipy.spatial.distance` for details on these
metrics.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
.. note::
`'matching'` has been removed in SciPy 1.9 (use `'hamming'` instead).
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : ndarray
The array of minimum distances. `distances[i]` is the distance between
the i-th row in X and the argmin[i]-th row in Y.
See Also
--------
pairwise_distances : Distances between every pair of samples of X and Y.
pairwise_distances_argmin : Same as `pairwise_distances_argmin_min` but only
returns the argmins.
Examples
--------
>>> from sklearn.metrics.pairwise import pairwise_distances_argmin_min
>>> X = [[0, 0, 0], [1, 1, 1]]
>>> Y = [[1, 0, 0], [1, 1, 0]]
>>> argmin, distances = pairwise_distances_argmin_min(X, Y)
>>> argmin
array([0, 1])
>>> distances
array([1., 1.])
"""
ensure_all_finite = "allow-nan" if metric == "nan_euclidean" else True
X, Y = check_pairwise_arrays(X, Y, ensure_all_finite=ensure_all_finite)
if axis == 0:
X, Y = Y, X
if metric_kwargs is None:
metric_kwargs = {}
if ArgKmin.is_usable_for(X, Y, metric):
# This is an adaptor for one "sqeuclidean" specification.
# For this backend, we can directly use "sqeuclidean".
if metric_kwargs.get("squared", False) and metric == "euclidean":
metric = "sqeuclidean"
metric_kwargs = {}
values, indices = ArgKmin.compute(
X=X,
Y=Y,
k=1,
metric=metric,
metric_kwargs=metric_kwargs,
strategy="auto",
return_distance=True,
)
values = values.flatten()
indices = indices.flatten()
else:
# Joblib-based backend, which is used when user-defined callable
# are passed for metric.
# This won't be used in the future once PairwiseDistancesReductions support:
# - DistanceMetrics which work on supposedly binary data
# - CSR-dense and dense-CSR case if 'euclidean' in metric.
# Turn off check for finiteness because this is costly and because arrays
# have already been validated.
with config_context(assume_finite=True):
indices, values = zip(
*pairwise_distances_chunked(
X, Y, reduce_func=_argmin_min_reduce, metric=metric, **metric_kwargs
)
)
indices = np.concatenate(indices)
values = np.concatenate(values)
return indices, values
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix"],
"axis": [Options(Integral, {0, 1})],
"metric": [
StrOptions(set(_VALID_METRICS).union(ArgKmin.valid_metrics())),
callable,
],
"metric_kwargs": [dict, None],
},
prefer_skip_nested_validation=False, # metric is not validated yet
)
def pairwise_distances_argmin(X, Y, *, axis=1, metric="euclidean", metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling::
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Array containing points.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Arrays containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default="euclidean"
Metric to use for distance computation. Any metric from scikit-learn
or :mod:`scipy.spatial.distance` can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan', 'nan_euclidean']
- from :mod:`scipy.spatial.distance`: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for :mod:`scipy.spatial.distance` for details on these
metrics.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
.. note::
`'matching'` has been removed in SciPy 1.9 (use `'hamming'` instead).
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See Also
--------
pairwise_distances : Distances between every pair of samples of X and Y.
pairwise_distances_argmin_min : Same as `pairwise_distances_argmin` but also
returns the distances.
Examples
--------
>>> from sklearn.metrics.pairwise import pairwise_distances_argmin
>>> X = [[0, 0, 0], [1, 1, 1]]
>>> Y = [[1, 0, 0], [1, 1, 0]]
>>> pairwise_distances_argmin(X, Y)
array([0, 1])
"""
ensure_all_finite = "allow-nan" if metric == "nan_euclidean" else True
X, Y = check_pairwise_arrays(X, Y, ensure_all_finite=ensure_all_finite)
if axis == 0:
X, Y = Y, X
if metric_kwargs is None:
metric_kwargs = {}
if ArgKmin.is_usable_for(X, Y, metric):
# This is an adaptor for one "sqeuclidean" specification.
# For this backend, we can directly use "sqeuclidean".
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_ranking.py | sklearn/metrics/_ranking.py | """Metrics to assess performance on classification task given scores.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from functools import partial
from numbers import Integral, Real
import numpy as np
from scipy.integrate import trapezoid
from scipy.sparse import csr_matrix, issparse
from scipy.stats import rankdata
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics._base import _average_binary_score, _average_multiclass_ovo_score
from sklearn.preprocessing import label_binarize
from sklearn.utils import (
assert_all_finite,
check_array,
check_consistent_length,
column_or_1d,
)
from sklearn.utils._array_api import (
_max_precision_float_dtype,
get_namespace_and_device,
size,
)
from sklearn.utils._encode import _encode, _unique
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.sparsefuncs import count_nonzero
from sklearn.utils.validation import _check_pos_label_consistency, _check_sample_weight
@validate_params(
{"x": ["array-like"], "y": ["array-like"]},
prefer_skip_nested_validation=True,
)
def auc(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule.
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`. For an alternative
way to summarize a precision-recall curve, see
:func:`average_precision_score`.
Parameters
----------
x : array-like of shape (n,)
X coordinates. These must be either monotonic increasing or monotonic
decreasing.
y : array-like of shape (n,)
Y coordinates.
Returns
-------
auc : float
Area Under the Curve.
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
average_precision_score : Compute average precision from prediction scores.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y_true = np.array([1, 1, 2, 2])
>>> y_score = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError(
"At least 2 points are needed to compute area under curve, but x.shape = %s"
% x.shape
)
direction = 1
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("x is neither increasing nor decreasing : {}.".format(x))
area = direction * trapezoid(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in trapezoid do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return float(area)
@validate_params(
{
"y_true": ["array-like"],
"y_score": ["array-like"],
"average": [StrOptions({"micro", "samples", "weighted", "macro"}), None],
"pos_label": [Real, str, "boolean"],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def average_precision_score(
y_true, y_score, *, average="macro", pos_label=1, sample_weight=None
):
"""Compute average precision (AP) from prediction scores.
AP summarizes a precision-recall curve as the weighted mean of precisions
achieved at each threshold, with the increase in recall from the previous
threshold used as the weight:
.. math::
\\text{AP} = \\sum_n (R_n - R_{n-1}) P_n
where :math:`P_n` and :math:`R_n` are the precision and recall at the nth
threshold [1]_. This implementation is not interpolated and is different
from computing the area under the precision-recall curve with the
trapezoidal rule, which uses linear interpolation and can be too
optimistic.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_classes)
True binary labels, :term:`multi-label` indicators (as a
:term:`multilabel indicator matrix`) or :term:`multi-class` labels.
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by :term:`decision_function` on some classifiers).
For :term:`decision_function` scores, values greater than or equal to
zero should indicate the positive class.
average : {'micro', 'samples', 'weighted', 'macro'} or None, \
default='macro'
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
pos_label : int, float, bool or str, default=1
The label of the positive class. Only applied to binary ``y_true``.
For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
average_precision : float
Average precision score.
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
PrecisionRecallDisplay.from_estimator : Plot the precision recall curve
using an estimator and data.
PrecisionRecallDisplay.from_predictions : Plot the precision recall curve
using true and predicted labels.
Notes
-----
.. versionchanged:: 0.19
Instead of linearly interpolating between operating points, precisions
are weighted by the change in recall since the last operating point.
References
----------
.. [1] `Wikipedia entry for the Average precision
<https://en.wikipedia.org/w/index.php?title=Information_retrieval&
oldid=793358396#Average_precision>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores)
0.83
>>> y_true = np.array([0, 0, 1, 1, 2, 2])
>>> y_scores = np.array([
... [0.7, 0.2, 0.1],
... [0.4, 0.3, 0.3],
... [0.1, 0.8, 0.1],
... [0.2, 0.3, 0.5],
... [0.4, 0.4, 0.2],
... [0.1, 0.2, 0.7],
... ])
>>> average_precision_score(y_true, y_scores)
0.77
"""
def _binary_uninterpolated_average_precision(
y_true, y_score, pos_label=1, sample_weight=None
):
precision, recall, _ = precision_recall_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve.
# Due to numerical error, we can get `-0.0` and we therefore clip it.
return float(max(0.0, -np.sum(np.diff(recall) * np.array(precision)[:-1])))
y_type = type_of_target(y_true, input_name="y_true")
present_labels = np.unique(y_true)
if y_type == "binary":
if len(present_labels) == 2 and pos_label not in present_labels:
raise ValueError(
f"pos_label={pos_label} is not a valid label. It should be "
f"one of {present_labels}"
)
elif y_type == "multilabel-indicator" and pos_label != 1:
raise ValueError(
"Parameter pos_label is fixed to 1 for multilabel-indicator y_true. "
"Do not set pos_label or set pos_label to 1."
)
elif y_type == "multiclass":
if pos_label != 1:
raise ValueError(
"Parameter pos_label is fixed to 1 for multiclass y_true. "
"Do not set pos_label or set pos_label to 1."
)
y_true = label_binarize(y_true, classes=present_labels)
if not y_score.shape == y_true.shape:
raise ValueError(
"`y_score` needs to be of shape `(n_samples, n_classes)`, since "
"`y_true` contains multiple classes. Got "
f"`y_score.shape={y_score.shape}`."
)
average_precision = partial(
_binary_uninterpolated_average_precision, pos_label=pos_label
)
return _average_binary_score(
average_precision, y_true, y_score, average, sample_weight=sample_weight
)
@validate_params(
{
"y_true": ["array-like"],
"y_score": ["array-like"],
"pos_label": [Real, str, "boolean", None],
"sample_weight": ["array-like", None],
"drop_intermediate": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def det_curve(
y_true, y_score, pos_label=None, sample_weight=None, drop_intermediate=False
):
"""Compute Detection Error Tradeoff (DET) for different probability thresholds.
Note: Support beyond :term:`binary` classification tasks, via one-vs-rest or
one-vs-one, is not implemented.
The DET curve is used for evaluation of ranking and error tradeoffs in binary
classification tasks.
Read more in the :ref:`User Guide <det_curve>`.
.. versionadded:: 0.24
.. versionchanged:: 1.7
An arbitrary threshold at infinity is added to represent a classifier
that always predicts the negative class, i.e. `fpr=0` and `fnr=1`, unless
`fpr=0` is already reached at a finite threshold.
Parameters
----------
y_true : ndarray of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : ndarray of shape of (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
For :term:`decision_function` scores, values greater than or equal to
zero should indicate the positive class.
pos_label : int, float, bool or str, default=None
The label of the positive class.
When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=False
Whether to drop thresholds where true positives (tp) do not change from
the previous or subsequent threshold. All points with the same tp value
have the same `fnr` and thus same y coordinate.
.. versionadded:: 1.7
Returns
-------
fpr : ndarray of shape (n_thresholds,)
False positive rate (FPR) such that element i is the false positive
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false acceptance probability or fall-out.
fnr : ndarray of shape (n_thresholds,)
False negative rate (FNR) such that element i is the false negative
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false rejection or miss rate.
thresholds : ndarray of shape (n_thresholds,)
Decreasing thresholds on the decision function (either `predict_proba`
or `decision_function`) used to compute FPR and FNR.
.. versionchanged:: 1.7
An arbitrary threshold at infinity is added for the case `fpr=0`
and `fnr=1`.
See Also
--------
DetCurveDisplay.from_estimator : Plot DET curve given an estimator and
some data.
DetCurveDisplay.from_predictions : Plot DET curve given the true and
predicted labels.
DetCurveDisplay : DET curve visualization.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
precision_recall_curve : Compute precision-recall curve.
confusion_matrix_at_thresholds : For binary classification, compute true negative,
false positive, false negative and true positive counts per threshold.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import det_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, fnr, thresholds = det_curve(y_true, y_scores)
>>> fpr
array([0.5, 0.5, 0. ])
>>> fnr
array([0. , 0.5, 0.5])
>>> thresholds
array([0.35, 0.4 , 0.8 ])
"""
xp, _, device = get_namespace_and_device(y_true, y_score)
_, fps, _, tps, thresholds = confusion_matrix_at_thresholds(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
# add a threshold at inf where the clf always predicts the negative class
# i.e. tps = fps = 0
tps = xp.concat((xp.asarray([0.0], device=device), tps))
fps = xp.concat((xp.asarray([0.0], device=device), fps))
thresholds = xp.astype(thresholds, _max_precision_float_dtype(xp, device))
thresholds = xp.concat((xp.asarray([xp.inf], device=device), thresholds))
if drop_intermediate and len(fps) > 2:
# Drop thresholds where true positives (tp) do not change from the
# previous or subsequent threshold. As tp + fn, is fixed for a dataset,
# this means the false negative rate (fnr) remains constant while the
# false positive rate (fpr) changes, producing horizontal line segments
# in the transformed (normal deviate) scale. These intermediate points
# can be dropped to create lighter DET curve plots.
optimal_idxs = xp.where(
xp.concat(
[
xp.asarray([True], device=device),
xp.logical_or(xp.diff(tps[:-1]), xp.diff(tps[1:])),
xp.asarray([True], device=device),
]
)
)[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if xp.unique_values(y_true).shape[0] != 2:
raise ValueError(
"Only one class is present in y_true. Detection error "
"tradeoff curve is not defined in that case."
)
fns = tps[-1] - tps
p_count = tps[-1]
n_count = fps[-1]
# start with false positives zero, which may be at a finite threshold
first_ind = (
xp.searchsorted(fps, fps[0], side="right") - 1
if xp.searchsorted(fps, fps[0], side="right") > 0
else None
)
# stop with false negatives zero
last_ind = xp.searchsorted(tps, tps[-1]) + 1
sl = slice(first_ind, last_ind)
# reverse the output such that list of false positives is decreasing
return (
xp.flip(fps[sl]) / n_count,
xp.flip(fns[sl]) / p_count,
xp.flip(thresholds[sl]),
)
def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None):
"""Binary roc auc score."""
if len(np.unique(y_true)) != 2:
warnings.warn(
(
"Only one class is present in y_true. ROC AUC score "
"is not defined in that case."
),
UndefinedMetricWarning,
)
return np.nan
fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight)
if max_fpr is None or max_fpr == 1:
return auc(fpr, tpr)
if max_fpr <= 0 or max_fpr > 1:
raise ValueError("Expected max_fpr in range (0, 1], got: %r" % max_fpr)
# Add a single point at max_fpr by linear interpolation
stop = np.searchsorted(fpr, max_fpr, "right")
x_interp = [fpr[stop - 1], fpr[stop]]
y_interp = [tpr[stop - 1], tpr[stop]]
tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))
fpr = np.append(fpr[:stop], max_fpr)
partial_auc = auc(fpr, tpr)
# McClish correction: standardize result to be 0.5 if non-discriminant
# and 1 if maximal
min_area = 0.5 * max_fpr**2
max_area = max_fpr
return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
@validate_params(
{
"y_true": ["array-like"],
"y_score": ["array-like"],
"average": [StrOptions({"micro", "macro", "samples", "weighted"}), None],
"sample_weight": ["array-like", None],
"max_fpr": [Interval(Real, 0.0, 1, closed="right"), None],
"multi_class": [StrOptions({"raise", "ovr", "ovo"})],
"labels": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def roc_auc_score(
y_true,
y_score,
*,
average="macro",
sample_weight=None,
max_fpr=None,
multi_class="raise",
labels=None,
):
"""Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) \
from prediction scores.
Note: this implementation can be used with :term:`binary`, :term:`multiclass` and
:term:`multilabel` classification, but some restrictions apply (see Parameters).
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_classes)
True labels or :term:`label indicator matrix`. The binary and multiclass cases
expect labels with shape (n_samples,) while the multilabel case expects
a :term:`multilabel indicator matrix` with shape (n_samples, n_classes).
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores.
* In the :term:`binary` case, it corresponds to an array of shape
`(n_samples,)`. Both probability estimates and non-thresholded
decision values can be provided. The probability estimates correspond
to the **probability of the class with the greater label**,
i.e. `estimator.classes_[1]` and thus
`estimator.predict_proba(X, y)[:, 1]`. The decision values
corresponds to the output of `estimator.decision_function(X, y)`.
See more information in the :ref:`User guide <roc_auc_binary>`;
* In the :term:`multiclass` case, it corresponds to an array of shape
`(n_samples, n_classes)` of probability estimates provided by the
`predict_proba` method. The probability estimates **must**
sum to 1 across the possible classes. In addition, the order of the
class scores must correspond to the order of ``labels``,
if provided, or else to the numerical or lexicographical order of
the labels in ``y_true``. See more information in the
:ref:`User guide <roc_auc_multiclass>`;
* In the :term:`multilabel` case, it corresponds to an array of shape
`(n_samples, n_classes)`. Probability estimates are provided by the
`predict_proba` method and the non-thresholded decision values by
the `decision_function` method. The probability estimates correspond
to the **probability of the class with the greater label for each
output** of the classifier. See more information in the
:ref:`User guide <roc_auc_multilabel>`.
average : {'micro', 'macro', 'samples', 'weighted'} or None, \
default='macro'
If ``None``, the scores for each class are returned.
Otherwise, this determines the type of averaging performed on the data.
Note: multiclass ROC AUC currently only handles the 'macro' and
'weighted' averages. For multiclass targets, `average=None` is only
implemented for `multi_class='ovr'` and `average='micro'` is only
implemented for `multi_class='ovr'`.
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
max_fpr : float > 0 and <= 1, default=None
If not ``None``, the standardized partial AUC [2]_ over the range
[0, max_fpr] is returned. For the multiclass case, ``max_fpr``,
should be either equal to ``None`` or ``1.0`` as AUC ROC partial
computation currently is not supported for multiclass.
multi_class : {'raise', 'ovr', 'ovo'}, default='raise'
Only used for multiclass targets. Determines the type of configuration
to use. The default value raises an error, so either
``'ovr'`` or ``'ovo'`` must be passed explicitly.
``'ovr'``:
Stands for One-vs-rest. Computes the AUC of each class
against the rest [3]_ [4]_. This
treats the multiclass case in the same way as the multilabel case.
Sensitive to class imbalance even when ``average == 'macro'``,
because class imbalance affects the composition of each of the
'rest' groupings.
``'ovo'``:
Stands for One-vs-one. Computes the average AUC of all
possible pairwise combinations of classes [5]_.
Insensitive to class imbalance when
``average == 'macro'``.
labels : array-like of shape (n_classes,), default=None
Only used for multiclass targets. List of labels that index the
classes in ``y_score``. If ``None``, the numerical or lexicographical
order of the labels in ``y_true`` is used.
Returns
-------
auc : float
Area Under the Curve score.
See Also
--------
average_precision_score : Area under the precision-recall curve.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic
(ROC) curve given an estimator and some data.
RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic
(ROC) curve given the true and predicted values.
Notes
-----
The Gini Coefficient is a summary measure of the ranking ability of binary
classifiers. It is expressed using the area under of the ROC as follows:
G = 2 * AUC - 1
Where G is the Gini coefficient and AUC is the ROC-AUC score. This normalisation
will ensure that random guessing will yield a score of 0 in expectation, and it is
upper bounded by 1.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] `Analyzing a portion of the ROC curve. McClish, 1989
<https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_
.. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving
probability estimation trees (Section 6.2), CeDER Working Paper
#IS-00-04, Stern School of Business, New York University.
.. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern
Recognition Letters, 27(8), 861-874.
<https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_
.. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area
Under the ROC Curve for Multiple Class Classification Problems.
Machine Learning, 45(2), 171-186.
<http://link.springer.com/article/10.1023/A:1010920819831>`_
.. [6] `Wikipedia entry for the Gini coefficient
<https://en.wikipedia.org/wiki/Gini_coefficient>`_
Examples
--------
Binary case:
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.metrics import roc_auc_score
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = LogisticRegression(solver="newton-cholesky", random_state=0).fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X)[:, 1])
0.99
>>> roc_auc_score(y, clf.decision_function(X))
0.99
Multiclass case:
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(solver="newton-cholesky").fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr')
0.99
Multilabel case:
>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> X, y = make_multilabel_classification(random_state=0)
>>> clf = MultiOutputClassifier(clf).fit(X, y)
>>> # get a list of n_output containing probability arrays of shape
>>> # (n_samples, n_classes)
>>> y_score = clf.predict_proba(X)
>>> # extract the positive columns for each output
>>> y_score = np.transpose([score[:, 1] for score in y_score])
>>> roc_auc_score(y, y_score, average=None)
array([0.828, 0.852, 0.94, 0.869, 0.95])
>>> from sklearn.linear_model import RidgeClassifierCV
>>> clf = RidgeClassifierCV().fit(X, y)
>>> roc_auc_score(y, clf.decision_function(X), average=None)
array([0.82, 0.847, 0.93, 0.872, 0.944])
"""
y_type = type_of_target(y_true, input_name="y_true")
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_score = check_array(y_score, ensure_2d=False)
if y_type == "multiclass" or (
y_type == "binary" and y_score.ndim == 2 and y_score.shape[1] > 2
):
# do not support partial ROC computation for multiclass
if max_fpr is not None and max_fpr != 1.0:
raise ValueError(
"Partial AUC computation not available in "
"multiclass setting, 'max_fpr' must be"
" set to `None`, received `max_fpr={0}` "
"instead".format(max_fpr)
)
if multi_class == "raise":
raise ValueError("multi_class must be in ('ovo', 'ovr')")
return _multiclass_roc_auc_score(
y_true, y_score, labels, multi_class, average, sample_weight
)
elif y_type == "binary":
labels = np.unique(y_true)
y_true = label_binarize(y_true, classes=labels)[:, 0]
return _average_binary_score(
partial(_binary_roc_auc_score, max_fpr=max_fpr),
y_true,
y_score,
average,
sample_weight=sample_weight,
)
else: # multilabel-indicator
return _average_binary_score(
partial(_binary_roc_auc_score, max_fpr=max_fpr),
y_true,
y_score,
average,
sample_weight=sample_weight,
)
def _multiclass_roc_auc_score(
y_true, y_score, labels, multi_class, average, sample_weight
):
"""Multiclass roc auc score.
Parameters
----------
y_true : array-like of shape (n_samples,)
True multiclass labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores corresponding to probability estimates of a sample
belonging to a particular class
labels : array-like of shape (n_classes,) or None
List of labels to index ``y_score`` used for multiclass. If ``None``,
the lexical order of ``y_true`` is used to index ``y_score``.
multi_class : {'ovr', 'ovo'}
Determines the type of multiclass configuration to use.
``'ovr'``:
Calculate metrics for the multiclass case using the one-vs-rest
approach.
``'ovo'``:
Calculate metrics for the multiclass case using the one-vs-one
approach.
average : {'micro', 'macro', 'weighted'}
Determines the type of averaging performed on the pairwise binary
metric scores
``'micro'``:
Calculate metrics for the binarized-raveled classes. Only supported
for `multi_class='ovr'`.
.. versionadded:: 1.2
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account. Classes
are assumed to be uniformly distributed.
``'weighted'``:
Calculate metrics for each label, taking into account the
prevalence of the classes.
sample_weight : array-like of shape (n_samples,) or None
Sample weights.
"""
if not y_score.ndim == 2:
raise ValueError(
"`y_score` needs to be of shape `(n_samples, n_classes)`, since "
"`y_true` contains multiple classes. Got "
f"`y_score.shape={y_score.shape}`."
)
if not np.allclose(1, y_score.sum(axis=1)):
raise ValueError(
"Target scores need to be probabilities for multiclass "
"roc_auc, i.e. they should sum up to 1.0 over classes"
)
# validation for multiclass parameter specifications
average_options = ("macro", "weighted", None)
if multi_class == "ovr":
average_options = ("micro",) + average_options
if average not in average_options:
raise ValueError(
"average must be one of {0} for multiclass problems".format(average_options)
)
multiclass_options = ("ovo", "ovr")
if multi_class not in multiclass_options:
raise ValueError(
"multi_class='{0}' is not supported "
"for multiclass ROC AUC, multi_class must be "
"in {1}".format(multi_class, multiclass_options)
)
if average is None and multi_class == "ovo":
raise NotImplementedError(
"average=None is not implemented for multi_class='ovo'."
)
if labels is not None:
labels = column_or_1d(labels)
classes = _unique(labels)
if len(classes) != len(labels):
raise ValueError("Parameter 'labels' must be unique")
if not np.array_equal(classes, labels):
raise ValueError("Parameter 'labels' must be ordered")
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of given labels, {0}, not equal to the number "
"of columns in 'y_score', {1}".format(len(classes), y_score.shape[1])
)
if len(np.setdiff1d(y_true, classes)):
raise ValueError("'y_true' contains labels not in parameter 'labels'")
else:
classes = _unique(y_true)
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of classes in y_true not equal to the number of "
"columns in 'y_score'"
)
if multi_class == "ovo":
if sample_weight is not None:
raise ValueError(
"sample_weight is not supported "
"for multiclass one-vs-one ROC AUC, "
"'sample_weight' must be None in this case."
)
y_true_encoded = _encode(y_true, uniques=classes)
# Hand & Till (2001) implementation (ovo)
return _average_multiclass_ovo_score(
_binary_roc_auc_score, y_true_encoded, y_score, average=average
)
else:
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_regression.py | sklearn/metrics/_regression.py | """Metrics to assess performance on regression task.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Real
import numpy as np
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.utils._array_api import (
_average,
_find_matching_floating_dtype,
_median,
get_namespace,
get_namespace_and_device,
size,
)
from sklearn.utils._array_api import _xlogy as xlogy
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.stats import _weighted_percentile
from sklearn.utils.validation import (
_check_sample_weight,
_num_samples,
check_array,
check_consistent_length,
column_or_1d,
)
__ALL__ = [
"max_error",
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"median_absolute_error",
"mean_absolute_percentage_error",
"mean_pinball_loss",
"r2_score",
"root_mean_squared_log_error",
"root_mean_squared_error",
"explained_variance_score",
"mean_tweedie_deviance",
"mean_poisson_deviance",
"mean_gamma_deviance",
"d2_tweedie_score",
"d2_pinball_score",
"d2_absolute_error_score",
]
def _check_reg_targets(
y_true, y_pred, sample_weight, multioutput, dtype="numeric", xp=None
):
"""Check that y_true, y_pred and sample_weight belong to the same regression task.
To reduce redundancy when calling `_find_matching_floating_dtype`,
please use `_check_reg_targets_with_floating_dtype` instead.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,) or None
Sample weights.
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
dtype : str or list, default="numeric"
the dtype argument passed to check_array.
xp : module, default=None
Precomputed array namespace module. When passed, typically from a caller
that has already performed inspection of its own inputs, skips array
namespace inspection.
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'.
y_true : array-like of shape (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,) or None
Sample weights.
multioutput : array-like of shape (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
xp, _ = get_namespace(y_true, y_pred, multioutput, xp=xp)
check_consistent_length(y_true, y_pred, sample_weight)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, y_true, dtype=dtype)
if y_true.ndim == 1:
y_true = xp.reshape(y_true, (-1, 1))
if y_pred.ndim == 1:
y_pred = xp.reshape(y_pred, (-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError(
"y_true and y_pred have different number of output ({0}!={1})".format(
y_true.shape[1], y_pred.shape[1]
)
)
n_outputs = y_true.shape[1]
allowed_multioutput_str = ("raw_values", "uniform_average", "variance_weighted")
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError(
"Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str, multioutput
)
)
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in multi-output cases.")
elif n_outputs != multioutput.shape[0]:
raise ValueError(
"There must be equally many custom weights "
f"({multioutput.shape[0]}) as outputs ({n_outputs})."
)
y_type = "continuous" if n_outputs == 1 else "continuous-multioutput"
return y_type, y_true, y_pred, sample_weight, multioutput
def _check_reg_targets_with_floating_dtype(
y_true, y_pred, sample_weight, multioutput, xp=None
):
"""Ensures y_true, y_pred, and sample_weight correspond to same regression task.
Extends `_check_reg_targets` by automatically selecting a suitable floating-point
data type for inputs using `_find_matching_floating_dtype`.
Use this private method only when converting inputs to array API-compatibles.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,)
multioutput : array-like or string in ['raw_values', 'uniform_average', \
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
xp : module, default=None
Precomputed array namespace module. When passed, typically from a caller
that has already performed inspection of its own inputs, skips array
namespace inspection.
Returns
-------
type_true : one of {'continuous', 'continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'.
y_true : array-like of shape (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : array-like of shape (n_outputs) or string in ['raw_values', \
'uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
dtype_name = _find_matching_floating_dtype(y_true, y_pred, sample_weight, xp=xp)
y_type, y_true, y_pred, sample_weight, multioutput = _check_reg_targets(
y_true, y_pred, sample_weight, multioutput, dtype=dtype_name, xp=xp
)
return y_type, y_true, y_pred, sample_weight, multioutput
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"sample_weight": ["array-like", None],
"multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"],
},
prefer_skip_nested_validation=True,
)
def mean_absolute_error(
y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"
):
"""Mean absolute error regression loss.
The mean absolute error is a non-negative floating point value, where best value
is 0.0. Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or array of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.85...
"""
xp, _ = get_namespace(y_true, y_pred, sample_weight, multioutput)
_, y_true, y_pred, sample_weight, multioutput = (
_check_reg_targets_with_floating_dtype(
y_true, y_pred, sample_weight, multioutput, xp=xp
)
)
output_errors = _average(
xp.abs(y_pred - y_true), weights=sample_weight, axis=0, xp=xp
)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to _average: uniform mean
multioutput = None
# Average across the outputs (if needed).
# The second call to `_average` should always return
# a scalar array that we convert to a Python float to
# consistently return the same eager evaluated value.
# Therefore, `axis=None`.
mean_absolute_error = _average(output_errors, weights=multioutput, xp=xp)
return float(mean_absolute_error)
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"sample_weight": ["array-like", None],
"alpha": [Interval(Real, 0, 1, closed="both")],
"multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"],
},
prefer_skip_nested_validation=True,
)
def mean_pinball_loss(
y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average"
):
"""Pinball loss for quantile regression.
Read more in the :ref:`User Guide <pinball_loss>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
alpha : float, slope of the pinball loss, default=0.5,
This loss is equivalent to :ref:`mean_absolute_error` when `alpha=0.5`,
`alpha=0.95` is minimized by estimators of the 95th percentile.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
The pinball loss output is a non-negative floating point. The best
value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_pinball_loss
>>> y_true = [1, 2, 3]
>>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.1)
0.03...
>>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.1)
0.3...
>>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.9)
0.3...
>>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.9)
0.03...
>>> mean_pinball_loss(y_true, y_true, alpha=0.1)
0.0
>>> mean_pinball_loss(y_true, y_true, alpha=0.9)
0.0
"""
xp, _ = get_namespace(y_true, y_pred, sample_weight, multioutput)
_, y_true, y_pred, sample_weight, multioutput = (
_check_reg_targets_with_floating_dtype(
y_true, y_pred, sample_weight, multioutput, xp=xp
)
)
diff = y_true - y_pred
sign = xp.astype(diff >= 0, diff.dtype)
loss = alpha * sign * diff - (1 - alpha) * (1 - sign) * diff
output_errors = _average(loss, weights=sample_weight, axis=0, xp=xp)
if isinstance(multioutput, str) and multioutput == "raw_values":
return output_errors
if isinstance(multioutput, str) and multioutput == "uniform_average":
# pass None as weights to _average: uniform mean
multioutput = None
# Average across the outputs (if needed).
# The second call to `_average` should always return
# a scalar array that we convert to a Python float to
# consistently return the same eager evaluated value.
# Therefore, `axis=None`.
return float(_average(output_errors, weights=multioutput, xp=xp))
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"sample_weight": ["array-like", None],
"multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"],
},
prefer_skip_nested_validation=True,
)
def mean_absolute_percentage_error(
y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"
):
"""Mean absolute percentage error (MAPE) regression loss.
Note that we are not using the common "percentage" definition: the percentage
in the range [0, 100] is converted to a relative value in the range [0, 1]
by dividing by 100. Thus, an error of 200% corresponds to a relative error of 2.
Read more in the :ref:`User Guide <mean_absolute_percentage_error>`.
.. versionadded:: 0.24
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
If input is list then the shape must be (n_outputs,).
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute percentage error
is returned for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAPE output is non-negative floating point. The best value is 0.0.
But note that bad predictions can lead to arbitrarily large
MAPE values, especially if some `y_true` values are very close to zero.
Note that we return a large value instead of `inf` when `y_true` is zero.
Examples
--------
>>> from sklearn.metrics import mean_absolute_percentage_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.3273...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.5515...
>>> mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.6198...
>>> # the value when some element of the y_true is zero is arbitrarily high because
>>> # of the division by epsilon
>>> y_true = [1., 0., 2.4, 7.]
>>> y_pred = [1.2, 0.1, 2.4, 8.]
>>> mean_absolute_percentage_error(y_true, y_pred)
112589990684262.48
"""
xp, _, device_ = get_namespace_and_device(
y_true, y_pred, sample_weight, multioutput
)
_, y_true, y_pred, sample_weight, multioutput = (
_check_reg_targets_with_floating_dtype(
y_true, y_pred, sample_weight, multioutput, xp=xp
)
)
epsilon = xp.asarray(xp.finfo(xp.float64).eps, dtype=y_true.dtype, device=device_)
y_true_abs = xp.abs(y_true)
mape = xp.abs(y_pred - y_true) / xp.maximum(y_true_abs, epsilon)
output_errors = _average(mape, weights=sample_weight, axis=0, xp=xp)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to _average: uniform mean
multioutput = None
# Average across the outputs (if needed).
# The second call to `_average` should always return
# a scalar array that we convert to a Python float to
# consistently return the same eager evaluated value.
# Therefore, `axis=None`.
mean_absolute_percentage_error = _average(output_errors, weights=multioutput, xp=xp)
return float(mean_absolute_percentage_error)
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"sample_weight": ["array-like", None],
"multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"],
},
prefer_skip_nested_validation=True,
)
def mean_squared_error(
y_true,
y_pred,
*,
sample_weight=None,
multioutput="uniform_average",
):
"""Mean squared error regression loss.
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or array of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred)
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
array([0.41666667, 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.825...
"""
xp, _ = get_namespace(y_true, y_pred, sample_weight, multioutput)
_, y_true, y_pred, sample_weight, multioutput = (
_check_reg_targets_with_floating_dtype(
y_true, y_pred, sample_weight, multioutput, xp=xp
)
)
output_errors = _average(
(y_true - y_pred) ** 2, axis=0, weights=sample_weight, xp=xp
)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to _average: uniform mean
multioutput = None
# Average across the outputs (if needed).
# The second call to `_average` should always return
# a scalar array that we convert to a Python float to
# consistently return the same eager evaluated value.
# Therefore, `axis=None`.
mean_squared_error = _average(output_errors, weights=multioutput, xp=xp)
return float(mean_squared_error)
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"sample_weight": ["array-like", None],
"multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"],
},
prefer_skip_nested_validation=True,
)
def root_mean_squared_error(
y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"
):
"""Root mean squared error regression loss.
Read more in the :ref:`User Guide <mean_squared_error>`.
.. versionadded:: 1.4
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import root_mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> root_mean_squared_error(y_true, y_pred)
0.612...
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> root_mean_squared_error(y_true, y_pred)
0.822...
"""
xp, _ = get_namespace(y_true, y_pred, sample_weight, multioutput)
output_errors = xp.sqrt(
mean_squared_error(
y_true, y_pred, sample_weight=sample_weight, multioutput="raw_values"
)
)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to _average: uniform mean
multioutput = None
# Average across the outputs (if needed).
# The second call to `_average` should always return
# a scalar array that we convert to a Python float to
# consistently return the same eager evaluated value.
# Therefore, `axis=None`.
root_mean_squared_error = _average(output_errors, weights=multioutput, xp=xp)
return float(root_mean_squared_error)
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"sample_weight": ["array-like", None],
"multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"],
},
prefer_skip_nested_validation=True,
)
def mean_squared_log_error(
y_true,
y_pred,
*,
sample_weight=None,
multioutput="uniform_average",
):
"""Mean squared logarithmic error regression loss.
Read more in the :ref:`User Guide <mean_squared_log_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> mean_squared_log_error(y_true, y_pred)
0.039...
>>> y_true = [[0.5, 1], [1, 2], [7, 6]]
>>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
>>> mean_squared_log_error(y_true, y_pred)
0.044...
>>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
array([0.00462428, 0.08377444])
>>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.060...
"""
xp, _ = get_namespace(y_true, y_pred)
_, y_true, y_pred, sample_weight, multioutput = (
_check_reg_targets_with_floating_dtype(
y_true, y_pred, sample_weight, multioutput, xp=xp
)
)
if xp.any(y_true <= -1) or xp.any(y_pred <= -1):
raise ValueError(
"Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
return mean_squared_error(
xp.log1p(y_true),
xp.log1p(y_pred),
sample_weight=sample_weight,
multioutput=multioutput,
)
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"sample_weight": ["array-like", None],
"multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"],
},
prefer_skip_nested_validation=True,
)
def root_mean_squared_log_error(
y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"
):
"""Root mean squared logarithmic error regression loss.
Read more in the :ref:`User Guide <mean_squared_log_error>`.
.. versionadded:: 1.4
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import root_mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> root_mean_squared_log_error(y_true, y_pred)
0.199...
"""
xp, _ = get_namespace(y_true, y_pred)
_, y_true, y_pred, sample_weight, multioutput = (
_check_reg_targets_with_floating_dtype(
y_true, y_pred, sample_weight, multioutput, xp=xp
)
)
if xp.any(y_true <= -1) or xp.any(y_pred <= -1):
raise ValueError(
"Root Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
return root_mean_squared_error(
xp.log1p(y_true),
xp.log1p(y_pred),
sample_weight=sample_weight,
multioutput=multioutput,
)
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"multioutput": [StrOptions({"raw_values", "uniform_average"}), "array-like"],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def median_absolute_error(
y_true, y_pred, *, multioutput="uniform_average", sample_weight=None
):
"""Median absolute error regression loss.
Median absolute error output is non-negative floating point. The best value
is 0.0. Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values. Array-like value defines
weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.24
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> median_absolute_error(y_true, y_pred)
0.75
>>> median_absolute_error(y_true, y_pred, multioutput='raw_values')
array([0.5, 1. ])
>>> median_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.85
"""
xp, _ = get_namespace(y_true, y_pred, multioutput, sample_weight)
_, y_true, y_pred, sample_weight, multioutput = _check_reg_targets(
y_true, y_pred, sample_weight, multioutput
)
if sample_weight is None:
output_errors = _median(xp.abs(y_pred - y_true), axis=0)
else:
output_errors = _weighted_percentile(
xp.abs(y_pred - y_true), sample_weight=sample_weight, average=True
)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to np.average: uniform mean
multioutput = None
return float(_average(output_errors, weights=multioutput, xp=xp))
def _assemble_fraction_of_explained_deviance(
numerator, denominator, n_outputs, multioutput, force_finite, xp, device
):
"""Common part used by explained variance score and :math:`R^2` score."""
dtype = numerator.dtype
nonzero_denominator = denominator != 0
if not force_finite:
# Standard formula, that may lead to NaN or -Inf
output_scores = 1 - (numerator / denominator)
else:
nonzero_numerator = numerator != 0
# Default = Zero Numerator = perfect predictions. Set to 1.0
# (note: even if denominator is zero, thus avoiding NaN scores)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/roc_curve.py | sklearn/metrics/_plot/roc_curve.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from sklearn.metrics._ranking import auc, roc_curve
from sklearn.utils import _safe_indexing
from sklearn.utils._plotting import (
_BinaryClassifierCurveDisplayMixin,
_check_param_lengths,
_convert_to_list_leaving_none,
_deprecate_estimator_name,
_deprecate_y_pred_parameter,
_despine,
_validate_style_kwargs,
)
from sklearn.utils._response import _get_response_values_binary
class RocCurveDisplay(_BinaryClassifierCurveDisplayMixin):
"""ROC Curve visualization.
It is recommended to use
:func:`~sklearn.metrics.RocCurveDisplay.from_estimator` or
:func:`~sklearn.metrics.RocCurveDisplay.from_predictions` or
:func:`~sklearn.metrics.RocCurveDisplay.from_cv_results` to create
a :class:`~sklearn.metrics.RocCurveDisplay`. All parameters are
stored as attributes.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the :ref:`Model
Evaluation Guide <roc_metrics>`.
Parameters
----------
fpr : ndarray or list of ndarrays
False positive rates. Each ndarray should contain values for a single curve.
If plotting multiple curves, list should be of same length as `tpr`.
.. versionchanged:: 1.7
Now accepts a list for plotting multiple curves.
tpr : ndarray or list of ndarrays
True positive rates. Each ndarray should contain values for a single curve.
If plotting multiple curves, list should be of same length as `fpr`.
.. versionchanged:: 1.7
Now accepts a list for plotting multiple curves.
roc_auc : float or list of floats, default=None
Area under ROC curve, used for labeling each curve in the legend.
If plotting multiple curves, should be a list of the same length as `fpr`
and `tpr`. If `None`, ROC AUC scores are not shown in the legend.
.. versionchanged:: 1.7
Now accepts a list for plotting multiple curves.
name : str or list of str, default=None
Name for labeling legend entries. The number of legend entries is determined
by the `curve_kwargs` passed to `plot`, and is not affected by `name`.
To label each curve, provide a list of strings. To avoid labeling
individual curves that have the same appearance, a list cannot be used in
conjunction with `curve_kwargs` being a dictionary or None. If a
string is provided, it will be used to either label the single legend entry
or if there are multiple legend entries, label each individual curve with
the same name. If `None`, no name is shown in the legend.
.. versionchanged:: 1.7
`estimator_name` was deprecated in favor of `name`.
pos_label : int, float, bool or str, default=None
The class considered the positive class when ROC AUC metrics computed.
If not `None`, this value is displayed in the x- and y-axes labels.
.. versionadded:: 0.24
estimator_name : str, default=None
Name of estimator. If None, the estimator name is not shown.
.. deprecated:: 1.7
`estimator_name` is deprecated and will be removed in 1.9. Use `name`
instead.
Attributes
----------
line_ : matplotlib Artist or list of matplotlib Artists
ROC Curves.
.. versionchanged:: 1.7
This attribute can now be a list of Artists, for when multiple curves
are plotted.
chance_level_ : matplotlib Artist or None
The chance level line. It is `None` if the chance level is not plotted.
.. versionadded:: 1.3
ax_ : matplotlib Axes
Axes with ROC Curve.
figure_ : matplotlib Figure
Figure containing the curve.
See Also
--------
roc_curve : Compute Receiver operating characteristic (ROC) curve.
RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic
(ROC) curve given an estimator and some data.
RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic
(ROC) curve given the true and predicted values.
RocCurveDisplay.from_cv_results : Plot multi-fold ROC curves given
cross-validation results.
roc_auc_score : Compute the area under the ROC curve.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sklearn import metrics
>>> y_true = np.array([0, 0, 1, 1])
>>> y_score = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score)
>>> roc_auc = metrics.auc(fpr, tpr)
>>> display = metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc,
... name='example estimator')
>>> display.plot()
<...>
>>> plt.show()
"""
def __init__(
self,
*,
fpr,
tpr,
roc_auc=None,
name=None,
pos_label=None,
estimator_name="deprecated",
):
self.fpr = fpr
self.tpr = tpr
self.roc_auc = roc_auc
self.name = _deprecate_estimator_name(estimator_name, name, "1.7")
self.pos_label = pos_label
def _validate_plot_params(self, *, ax, name):
self.ax_, self.figure_, name = super()._validate_plot_params(ax=ax, name=name)
fpr = _convert_to_list_leaving_none(self.fpr)
tpr = _convert_to_list_leaving_none(self.tpr)
roc_auc = _convert_to_list_leaving_none(self.roc_auc)
name = _convert_to_list_leaving_none(name)
optional = {"self.roc_auc": roc_auc}
if isinstance(name, list) and len(name) != 1:
optional.update({"'name' (or self.name)": name})
_check_param_lengths(
required={"self.fpr": fpr, "self.tpr": tpr},
optional=optional,
class_name="RocCurveDisplay",
)
return fpr, tpr, roc_auc, name
def plot(
self,
ax=None,
*,
name=None,
curve_kwargs=None,
plot_chance_level=False,
chance_level_kw=None,
despine=False,
**kwargs,
):
"""Plot visualization.
Parameters
----------
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
name : str or list of str, default=None
Name for labeling legend entries. The number of legend entries
is determined by `curve_kwargs`, and is not affected by `name`.
To label each curve, provide a list of strings. To avoid labeling
individual curves that have the same appearance, a list cannot be used in
conjunction with `curve_kwargs` being a dictionary or None. If a
string is provided, it will be used to either label the single legend entry
or if there are multiple legend entries, label each individual curve with
the same name. If `None`, set to `name` provided at `RocCurveDisplay`
initialization. If still `None`, no name is shown in the legend.
.. versionadded:: 1.7
curve_kwargs : dict or list of dict, default=None
Keywords arguments to be passed to matplotlib's `plot` function
to draw individual ROC curves. For single curve plotting, should be
a dictionary. For multi-curve plotting, if a list is provided the
parameters are applied to the ROC curves of each CV fold
sequentially and a legend entry is added for each curve.
If a single dictionary is provided, the same parameters are applied
to all ROC curves and a single legend entry for all curves is added,
labeled with the mean ROC AUC score.
.. versionadded:: 1.7
plot_chance_level : bool, default=False
Whether to plot the chance level.
.. versionadded:: 1.3
chance_level_kw : dict, default=None
Keyword arguments to be passed to matplotlib's `plot` for rendering
the chance level line.
.. versionadded:: 1.3
despine : bool, default=False
Whether to remove the top and right spines from the plot.
.. versionadded:: 1.6
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
.. deprecated:: 1.7
kwargs is deprecated and will be removed in 1.9. Pass matplotlib
arguments to `curve_kwargs` as a dictionary instead.
Returns
-------
display : :class:`~sklearn.metrics.RocCurveDisplay`
Object that stores computed values.
"""
fpr, tpr, roc_auc, name = self._validate_plot_params(ax=ax, name=name)
n_curves = len(fpr)
if not isinstance(curve_kwargs, list) and n_curves > 1:
if roc_auc:
legend_metric = {"mean": np.mean(roc_auc), "std": np.std(roc_auc)}
else:
legend_metric = {"mean": None, "std": None}
else:
roc_auc = roc_auc if roc_auc is not None else [None] * n_curves
legend_metric = {"metric": roc_auc}
curve_kwargs = self._validate_curve_kwargs(
n_curves,
name,
legend_metric,
"AUC",
curve_kwargs=curve_kwargs,
default_multi_curve_kwargs={
"alpha": 0.5,
"linestyle": "--",
"color": "blue",
},
**kwargs,
)
default_chance_level_line_kw = {
"label": "Chance level (AUC = 0.5)",
"color": "k",
"linestyle": "--",
}
if chance_level_kw is None:
chance_level_kw = {}
chance_level_kw = _validate_style_kwargs(
default_chance_level_line_kw, chance_level_kw
)
self.line_ = []
for fpr, tpr, line_kw in zip(fpr, tpr, curve_kwargs):
self.line_.extend(self.ax_.plot(fpr, tpr, **line_kw))
# Return single artist if only one curve is plotted
if len(self.line_) == 1:
self.line_ = self.line_[0]
info_pos_label = (
f" (Positive label: {self.pos_label})" if self.pos_label is not None else ""
)
xlabel = "False Positive Rate" + info_pos_label
ylabel = "True Positive Rate" + info_pos_label
self.ax_.set(
xlabel=xlabel,
xlim=(-0.01, 1.01),
ylabel=ylabel,
ylim=(-0.01, 1.01),
aspect="equal",
)
if plot_chance_level:
(self.chance_level_,) = self.ax_.plot((0, 1), (0, 1), **chance_level_kw)
else:
self.chance_level_ = None
if despine:
_despine(self.ax_)
if curve_kwargs[0].get("label") is not None or (
plot_chance_level and chance_level_kw.get("label") is not None
):
self.ax_.legend(loc="lower right")
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
sample_weight=None,
drop_intermediate=True,
response_method="auto",
pos_label=None,
name=None,
ax=None,
curve_kwargs=None,
plot_chance_level=False,
chance_level_kw=None,
despine=False,
**kwargs,
):
"""Create a ROC Curve display from an estimator.
For general information regarding `scikit-learn` visualization tools,
see the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the :ref:`Model
Evaluation Guide <roc_metrics>`.
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop thresholds where the resulting point is collinear
with its neighbors in ROC space. This has no effect on the ROC AUC
or visual shape of the curve, but reduces the number of plotted
points.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing the ROC AUC.
By default, `estimators.classes_[1]` is considered
as the positive class.
name : str, default=None
Name of ROC Curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
curve_kwargs : dict, default=None
Keywords arguments to be passed to matplotlib's `plot` function.
.. versionadded:: 1.7
plot_chance_level : bool, default=False
Whether to plot the chance level.
.. versionadded:: 1.3
chance_level_kw : dict, default=None
Keyword arguments to be passed to matplotlib's `plot` for rendering
the chance level line.
.. versionadded:: 1.3
despine : bool, default=False
Whether to remove the top and right spines from the plot.
.. versionadded:: 1.6
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
.. deprecated:: 1.7
kwargs is deprecated and will be removed in 1.9. Pass matplotlib
arguments to `curve_kwargs` as a dictionary instead.
Returns
-------
display : :class:`~sklearn.metrics.RocCurveDisplay`
The ROC Curve display.
See Also
--------
roc_curve : Compute Receiver operating characteristic (ROC) curve.
RocCurveDisplay.from_predictions : ROC Curve visualization given the
probabilities of scores of a classifier.
RocCurveDisplay.from_cv_results : Plot multi-fold ROC curves given
cross-validation results.
roc_auc_score : Compute the area under the ROC curve.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import RocCurveDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = SVC(random_state=0).fit(X_train, y_train)
>>> RocCurveDisplay.from_estimator(
... clf, X_test, y_test)
<...>
>>> plt.show()
"""
y_score, pos_label, name = cls._validate_and_get_response_values(
estimator,
X,
y,
response_method=response_method,
pos_label=pos_label,
name=name,
)
return cls.from_predictions(
y_true=y,
y_score=y_score,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
name=name,
ax=ax,
curve_kwargs=curve_kwargs,
plot_chance_level=plot_chance_level,
chance_level_kw=chance_level_kw,
despine=despine,
**kwargs,
)
@classmethod
def from_predictions(
cls,
y_true,
y_score=None,
*,
sample_weight=None,
drop_intermediate=True,
pos_label=None,
name=None,
ax=None,
curve_kwargs=None,
plot_chance_level=False,
chance_level_kw=None,
despine=False,
y_pred="deprecated",
**kwargs,
):
"""Plot ROC curve given the true and predicted values.
For general information regarding `scikit-learn` visualization tools,
see the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the :ref:`Model
Evaluation Guide <roc_metrics>`.
.. versionadded:: 1.0
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_score : array-like of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by “decision_function” on some classifiers).
.. versionadded:: 1.7
`y_pred` has been renamed to `y_score`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop thresholds where the resulting point is collinear
with its neighbors in ROC space. This has no effect on the ROC AUC
or visual shape of the curve, but reduces the number of plotted
points.
pos_label : int, float, bool or str, default=None
The label of the positive class when computing the ROC AUC.
When `pos_label=None`, if `y_true` is in {-1, 1} or {0, 1}, `pos_label`
is set to 1, otherwise an error will be raised.
name : str, default=None
Name of ROC curve for legend labeling. If `None`, name will be set to
`"Classifier"`.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
curve_kwargs : dict, default=None
Keywords arguments to be passed to matplotlib's `plot` function.
.. versionadded:: 1.7
plot_chance_level : bool, default=False
Whether to plot the chance level.
.. versionadded:: 1.3
chance_level_kw : dict, default=None
Keyword arguments to be passed to matplotlib's `plot` for rendering
the chance level line.
.. versionadded:: 1.3
despine : bool, default=False
Whether to remove the top and right spines from the plot.
.. versionadded:: 1.6
y_pred : array-like of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by “decision_function” on some classifiers).
.. deprecated:: 1.7
`y_pred` is deprecated and will be removed in 1.9. Use
`y_score` instead.
**kwargs : dict
Additional keywords arguments passed to matplotlib `plot` function.
.. deprecated:: 1.7
kwargs is deprecated and will be removed in 1.9. Pass matplotlib
arguments to `curve_kwargs` as a dictionary instead.
Returns
-------
display : :class:`~sklearn.metrics.RocCurveDisplay`
Object that stores computed values.
See Also
--------
roc_curve : Compute Receiver operating characteristic (ROC) curve.
RocCurveDisplay.from_estimator : ROC Curve visualization given an
estimator and some data.
RocCurveDisplay.from_cv_results : Plot multi-fold ROC curves given
cross-validation results.
roc_auc_score : Compute the area under the ROC curve.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import RocCurveDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = SVC(random_state=0).fit(X_train, y_train)
>>> y_score = clf.decision_function(X_test)
>>> RocCurveDisplay.from_predictions(y_test, y_score)
<...>
>>> plt.show()
"""
y_score = _deprecate_y_pred_parameter(y_score, y_pred, "1.7")
pos_label_validated, name = cls._validate_from_predictions_params(
y_true, y_score, sample_weight=sample_weight, pos_label=pos_label, name=name
)
fpr, tpr, _ = roc_curve(
y_true,
y_score,
pos_label=pos_label,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
)
roc_auc = auc(fpr, tpr)
viz = cls(
fpr=fpr,
tpr=tpr,
roc_auc=roc_auc,
name=name,
pos_label=pos_label_validated,
)
return viz.plot(
ax=ax,
curve_kwargs=curve_kwargs,
plot_chance_level=plot_chance_level,
chance_level_kw=chance_level_kw,
despine=despine,
**kwargs,
)
@classmethod
def from_cv_results(
cls,
cv_results,
X,
y,
*,
sample_weight=None,
drop_intermediate=True,
response_method="auto",
pos_label=None,
ax=None,
name=None,
curve_kwargs=None,
plot_chance_level=False,
chance_level_kwargs=None,
despine=False,
):
"""Create a multi-fold ROC curve display given cross-validation results.
.. versionadded:: 1.7
Parameters
----------
cv_results : dict
Dictionary as returned by :func:`~sklearn.model_selection.cross_validate`
using `return_estimator=True` and `return_indices=True` (i.e., dictionary
should contain the keys "estimator" and "indices").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing the ROC AUC
metrics. By default, `estimator.classes_[1]` (using `estimator` from
`cv_results`) is considered as the positive class.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
name : str or list of str, default=None
Name for labeling legend entries. The number of legend entries
is determined by `curve_kwargs`, and is not affected by `name`.
To label each curve, provide a list of strings. To avoid labeling
individual curves that have the same appearance, a list cannot be used in
conjunction with `curve_kwargs` being a dictionary or None. If a
string is provided, it will be used to either label the single legend entry
or if there are multiple legend entries, label each individual curve with
the same name. If `None`, no name is shown in the legend.
curve_kwargs : dict or list of dict, default=None
Keywords arguments to be passed to matplotlib's `plot` function
to draw individual ROC curves. If a list is provided the
parameters are applied to the ROC curves of each CV fold
sequentially and a legend entry is added for each curve.
If a single dictionary is provided, the same parameters are applied
to all ROC curves and a single legend entry for all curves is added,
labeled with the mean ROC AUC score.
plot_chance_level : bool, default=False
Whether to plot the chance level.
chance_level_kwargs : dict, default=None
Keyword arguments to be passed to matplotlib's `plot` for rendering
the chance level line.
despine : bool, default=False
Whether to remove the top and right spines from the plot.
Returns
-------
display : :class:`~sklearn.metrics.RocCurveDisplay`
The multi-fold ROC curve display.
See Also
--------
roc_curve : Compute Receiver operating characteristic (ROC) curve.
RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic
(ROC) curve given an estimator and some data.
RocCurveDisplay.from_predictions : ROC Curve visualization given the
probabilities of scores of a classifier.
roc_auc_score : Compute the area under the ROC curve.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import RocCurveDisplay
>>> from sklearn.model_selection import cross_validate
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> clf = SVC(random_state=0)
>>> cv_results = cross_validate(
... clf, X, y, cv=3, return_estimator=True, return_indices=True)
>>> RocCurveDisplay.from_cv_results(cv_results, X, y)
<...>
>>> plt.show()
"""
cls._validate_from_cv_results_params(
cv_results,
X,
y,
sample_weight=sample_weight,
)
fpr_folds, tpr_folds, auc_folds = [], [], []
for estimator, test_indices in zip(
cv_results["estimator"], cv_results["indices"]["test"]
):
y_true = _safe_indexing(y, test_indices)
y_pred, pos_label_ = _get_response_values_binary(
estimator,
_safe_indexing(X, test_indices),
response_method=response_method,
pos_label=pos_label,
)
sample_weight_fold = (
None
if sample_weight is None
else _safe_indexing(sample_weight, test_indices)
)
fpr, tpr, _ = roc_curve(
y_true,
y_pred,
pos_label=pos_label_,
sample_weight=sample_weight_fold,
drop_intermediate=drop_intermediate,
)
roc_auc = auc(fpr, tpr)
fpr_folds.append(fpr)
tpr_folds.append(tpr)
auc_folds.append(roc_auc)
viz = cls(
fpr=fpr_folds,
tpr=tpr_folds,
roc_auc=auc_folds,
name=name,
pos_label=pos_label_,
)
return viz.plot(
ax=ax,
curve_kwargs=curve_kwargs,
plot_chance_level=plot_chance_level,
chance_level_kw=chance_level_kwargs,
despine=despine,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/det_curve.py | sklearn/metrics/_plot/det_curve.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import scipy as sp
from sklearn.metrics._ranking import det_curve
from sklearn.utils._plotting import (
_BinaryClassifierCurveDisplayMixin,
_deprecate_y_pred_parameter,
)
class DetCurveDisplay(_BinaryClassifierCurveDisplayMixin):
"""Detection Error Tradeoff (DET) curve visualization.
It is recommended to use :func:`~sklearn.metrics.DetCurveDisplay.from_estimator`
or :func:`~sklearn.metrics.DetCurveDisplay.from_predictions` to create a
visualizer. All parameters are stored as attributes.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Model Evaluation Guide <det_curve>`.
.. versionadded:: 0.24
Parameters
----------
fpr : ndarray
False positive rate.
fnr : ndarray
False negative rate.
estimator_name : str, default=None
Name of estimator. If None, the estimator name is not shown.
pos_label : int, float, bool or str, default=None
The label of the positive class. If not `None`, this value is displayed in
the x- and y-axes labels.
Attributes
----------
line_ : matplotlib Artist
DET Curve.
ax_ : matplotlib Axes
Axes with DET Curve.
figure_ : matplotlib Figure
Figure containing the curve.
See Also
--------
det_curve : Compute error rates for different probability thresholds.
DetCurveDisplay.from_estimator : Plot DET curve given an estimator and
some data.
DetCurveDisplay.from_predictions : Plot DET curve given the true and
predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import det_curve, DetCurveDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(n_samples=1000, random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.4, random_state=0)
>>> clf = SVC(random_state=0).fit(X_train, y_train)
>>> y_score = clf.decision_function(X_test)
>>> fpr, fnr, _ = det_curve(y_test, y_score)
>>> display = DetCurveDisplay(
... fpr=fpr, fnr=fnr, estimator_name="SVC"
... )
>>> display.plot()
<...>
>>> plt.show()
"""
def __init__(self, *, fpr, fnr, estimator_name=None, pos_label=None):
self.fpr = fpr
self.fnr = fnr
self.estimator_name = estimator_name
self.pos_label = pos_label
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
sample_weight=None,
drop_intermediate=True,
response_method="auto",
pos_label=None,
name=None,
ax=None,
**kwargs,
):
"""Plot DET curve given an estimator and data.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Model Evaluation Guide <det_curve>`.
.. versionadded:: 1.0
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop thresholds where true positives (tp) do not change
from the previous or subsequent threshold. All points with the same
tp value have the same `fnr` and thus same y coordinate.
.. versionadded:: 1.7
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the predicted target response. If set
to 'auto', :term:`predict_proba` is tried first and if it does not
exist :term:`decision_function` is tried next.
pos_label : int, float, bool or str, default=None
The label of the positive class. By default, `estimators.classes_[1]`
is considered as the positive class.
name : str, default=None
Name of DET curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
**kwargs : dict
Additional keywords arguments passed to matplotlib `plot` function.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
See Also
--------
det_curve : Compute error rates for different probability thresholds.
DetCurveDisplay.from_predictions : Plot DET curve given the true and
predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import DetCurveDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(n_samples=1000, random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.4, random_state=0)
>>> clf = SVC(random_state=0).fit(X_train, y_train)
>>> DetCurveDisplay.from_estimator(
... clf, X_test, y_test)
<...>
>>> plt.show()
"""
y_score, pos_label, name = cls._validate_and_get_response_values(
estimator,
X,
y,
response_method=response_method,
pos_label=pos_label,
name=name,
)
return cls.from_predictions(
y_true=y,
y_score=y_score,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
name=name,
ax=ax,
pos_label=pos_label,
**kwargs,
)
@classmethod
def from_predictions(
cls,
y_true,
y_score=None,
*,
sample_weight=None,
drop_intermediate=True,
pos_label=None,
name=None,
ax=None,
y_pred="deprecated",
**kwargs,
):
"""Plot the DET curve given the true and predicted labels.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Model Evaluation Guide <det_curve>`.
.. versionadded:: 1.0
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_score : array-like of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by `decision_function` on some classifiers).
.. versionadded:: 1.8
`y_pred` has been renamed to `y_score`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop thresholds where true positives (tp) do not change
from the previous or subsequent threshold. All points with the same
tp value have the same `fnr` and thus same y coordinate.
.. versionadded:: 1.7
pos_label : int, float, bool or str, default=None
The label of the positive class. When `pos_label=None`, if `y_true`
is in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an
error will be raised.
name : str, default=None
Name of DET curve for labeling. If `None`, name will be set to
`"Classifier"`.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
y_pred : array-like of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by “decision_function” on some classifiers).
.. deprecated:: 1.8
`y_pred` is deprecated and will be removed in 1.10. Use
`y_score` instead.
**kwargs : dict
Additional keywords arguments passed to matplotlib `plot` function.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
See Also
--------
det_curve : Compute error rates for different probability thresholds.
DetCurveDisplay.from_estimator : Plot DET curve given an estimator and
some data.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import DetCurveDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(n_samples=1000, random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.4, random_state=0)
>>> clf = SVC(random_state=0).fit(X_train, y_train)
>>> y_score = clf.decision_function(X_test)
>>> DetCurveDisplay.from_predictions(
... y_test, y_score)
<...>
>>> plt.show()
"""
y_score = _deprecate_y_pred_parameter(y_score, y_pred, "1.8")
pos_label_validated, name = cls._validate_from_predictions_params(
y_true, y_score, sample_weight=sample_weight, pos_label=pos_label, name=name
)
fpr, fnr, _ = det_curve(
y_true,
y_score,
pos_label=pos_label,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
)
viz = cls(
fpr=fpr,
fnr=fnr,
estimator_name=name,
pos_label=pos_label_validated,
)
return viz.plot(ax=ax, name=name, **kwargs)
def plot(self, ax=None, *, name=None, **kwargs):
"""Plot visualization.
Parameters
----------
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
name : str, default=None
Name of DET curve for labeling. If `None`, use `estimator_name` if
it is not `None`, otherwise no labeling is shown.
**kwargs : dict
Additional keywords arguments passed to matplotlib `plot` function.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
"""
self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name)
line_kwargs = {} if name is None else {"label": name}
line_kwargs.update(**kwargs)
# We have the following bounds:
# sp.stats.norm.ppf(0.0) = -np.inf
# sp.stats.norm.ppf(1.0) = np.inf
# We therefore clip to eps and 1 - eps to not provide infinity to matplotlib.
eps = np.finfo(self.fpr.dtype).eps
self.fpr = self.fpr.clip(eps, 1 - eps)
self.fnr = self.fnr.clip(eps, 1 - eps)
(self.line_,) = self.ax_.plot(
sp.stats.norm.ppf(self.fpr),
sp.stats.norm.ppf(self.fnr),
**line_kwargs,
)
info_pos_label = (
f" (Positive label: {self.pos_label})" if self.pos_label is not None else ""
)
xlabel = "False Positive Rate" + info_pos_label
ylabel = "False Negative Rate" + info_pos_label
self.ax_.set(xlabel=xlabel, ylabel=ylabel)
if "label" in line_kwargs:
self.ax_.legend(loc="lower right")
ticks = [0.001, 0.01, 0.05, 0.20, 0.5, 0.80, 0.95, 0.99, 0.999]
tick_locations = sp.stats.norm.ppf(ticks)
tick_labels = [
"{:.0%}".format(s) if (100 * s).is_integer() else "{:.1%}".format(s)
for s in ticks
]
self.ax_.set_xticks(tick_locations)
self.ax_.set_xticklabels(tick_labels)
self.ax_.set_xlim(-3, 3)
self.ax_.set_yticks(tick_locations)
self.ax_.set_yticklabels(tick_labels)
self.ax_.set_ylim(-3, 3)
return self
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/confusion_matrix.py | sklearn/metrics/_plot/confusion_matrix.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from itertools import product
import numpy as np
from sklearn.base import is_classifier
from sklearn.metrics import confusion_matrix
from sklearn.utils._optional_dependencies import check_matplotlib_support
from sklearn.utils._plotting import _validate_style_kwargs
from sklearn.utils.multiclass import unique_labels
class ConfusionMatrixDisplay:
"""Confusion Matrix visualization.
It is recommended to use
:func:`~sklearn.metrics.ConfusionMatrixDisplay.from_estimator` or
:func:`~sklearn.metrics.ConfusionMatrixDisplay.from_predictions` to
create a :class:`ConfusionMatrixDisplay`. All parameters are stored as
attributes.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Model Evaluation Guide <confusion_matrix>`.
Parameters
----------
confusion_matrix : ndarray of shape (n_classes, n_classes)
Confusion matrix.
display_labels : ndarray of shape (n_classes,), default=None
Display labels for plot. If None, display labels are set from 0 to
`n_classes - 1`.
Attributes
----------
im_ : matplotlib AxesImage
Image representing the confusion matrix.
text_ : ndarray of shape (n_classes, n_classes), dtype=matplotlib Text, \
or None
Array of matplotlib axes. `None` if `include_values` is false.
ax_ : matplotlib Axes
Axes with confusion matrix.
figure_ : matplotlib Figure
Figure containing the confusion matrix.
See Also
--------
confusion_matrix : Compute Confusion Matrix to evaluate the accuracy of a
classification.
ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix
given an estimator, the data, and the label.
ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix
given the true and predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> clf = SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> predictions = clf.predict(X_test)
>>> cm = confusion_matrix(y_test, predictions, labels=clf.classes_)
>>> disp = ConfusionMatrixDisplay(confusion_matrix=cm,
... display_labels=clf.classes_)
>>> disp.plot()
<...>
>>> plt.show()
"""
def __init__(self, confusion_matrix, *, display_labels=None):
self.confusion_matrix = confusion_matrix
self.display_labels = display_labels
def plot(
self,
*,
include_values=True,
cmap="viridis",
xticks_rotation="horizontal",
values_format=None,
ax=None,
colorbar=True,
im_kw=None,
text_kw=None,
):
"""Plot visualization.
Parameters
----------
include_values : bool, default=True
Includes values in confusion matrix.
cmap : str or matplotlib Colormap, default='viridis'
Colormap recognized by matplotlib.
xticks_rotation : {'vertical', 'horizontal'} or float, \
default='horizontal'
Rotation of xtick labels.
values_format : str, default=None
Format specification for values in confusion matrix. If `None`,
the format specification is 'd' or '.2g' whichever is shorter.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
colorbar : bool, default=True
Whether or not to add a colorbar to the plot.
im_kw : dict, default=None
Dict with keywords passed to `matplotlib.pyplot.imshow` call.
text_kw : dict, default=None
Dict with keywords passed to `matplotlib.pyplot.text` call.
.. versionadded:: 1.2
Returns
-------
display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`
Returns a :class:`~sklearn.metrics.ConfusionMatrixDisplay` instance
that contains all the information to plot the confusion matrix.
"""
check_matplotlib_support("ConfusionMatrixDisplay.plot")
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
cm = self.confusion_matrix
n_classes = cm.shape[0]
default_im_kw = dict(interpolation="nearest", cmap=cmap)
im_kw = im_kw or {}
im_kw = _validate_style_kwargs(default_im_kw, im_kw)
text_kw = text_kw or {}
self.im_ = ax.imshow(cm, **im_kw)
self.text_ = None
cmap_min, cmap_max = self.im_.cmap(0), self.im_.cmap(1.0)
if include_values:
self.text_ = np.empty_like(cm, dtype=object)
# print text with appropriate color depending on background
thresh = (cm.max() + cm.min()) / 2.0
for i, j in product(range(n_classes), range(n_classes)):
color = cmap_max if cm[i, j] < thresh else cmap_min
if values_format is None:
text_cm = format(cm[i, j], ".2g")
if cm.dtype.kind != "f":
text_d = format(cm[i, j], "d")
if len(text_d) < len(text_cm):
text_cm = text_d
else:
text_cm = format(cm[i, j], values_format)
default_text_kwargs = dict(ha="center", va="center", color=color)
text_kwargs = _validate_style_kwargs(default_text_kwargs, text_kw)
self.text_[i, j] = ax.text(j, i, text_cm, **text_kwargs)
if self.display_labels is None:
display_labels = np.arange(n_classes)
else:
display_labels = self.display_labels
if colorbar:
fig.colorbar(self.im_, ax=ax)
ax.set(
xticks=np.arange(n_classes),
yticks=np.arange(n_classes),
xticklabels=display_labels,
yticklabels=display_labels,
ylabel="True label",
xlabel="Predicted label",
)
ax.set_ylim((n_classes - 0.5, -0.5))
plt.setp(ax.get_xticklabels(), rotation=xticks_rotation)
self.figure_ = fig
self.ax_ = ax
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
labels=None,
sample_weight=None,
normalize=None,
display_labels=None,
include_values=True,
xticks_rotation="horizontal",
values_format=None,
cmap="viridis",
ax=None,
colorbar=True,
im_kw=None,
text_kw=None,
):
"""Plot Confusion Matrix given an estimator and some data.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Model Evaluation Guide <confusion_matrix>`.
.. versionadded:: 1.0
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
labels : array-like of shape (n_classes,), default=None
List of labels to index the confusion matrix. This may be used to
reorder or select a subset of labels. If `None` is given, those
that appear at least once in `y_true` or `y_pred` are used in
sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
normalize : {'true', 'pred', 'all'}, default=None
Either to normalize the counts display in the matrix:
- if `'true'`, the confusion matrix is normalized over the true
conditions (e.g. rows);
- if `'pred'`, the confusion matrix is normalized over the
predicted conditions (e.g. columns);
- if `'all'`, the confusion matrix is normalized by the total
number of samples;
- if `None` (default), the confusion matrix will not be normalized.
display_labels : array-like of shape (n_classes,), default=None
Target names used for plotting. By default, `labels` will be used
if it is defined, otherwise the unique labels of `y_true` and
`y_pred` will be used.
include_values : bool, default=True
Includes values in confusion matrix.
xticks_rotation : {'vertical', 'horizontal'} or float, \
default='horizontal'
Rotation of xtick labels.
values_format : str, default=None
Format specification for values in confusion matrix. If `None`, the
format specification is 'd' or '.2g' whichever is shorter.
cmap : str or matplotlib Colormap, default='viridis'
Colormap recognized by matplotlib.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
colorbar : bool, default=True
Whether or not to add a colorbar to the plot.
im_kw : dict, default=None
Dict with keywords passed to `matplotlib.pyplot.imshow` call.
text_kw : dict, default=None
Dict with keywords passed to `matplotlib.pyplot.text` call.
.. versionadded:: 1.2
Returns
-------
display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`
See Also
--------
ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix
given the true and predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import ConfusionMatrixDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> ConfusionMatrixDisplay.from_estimator(
... clf, X_test, y_test)
<...>
>>> plt.show()
For a detailed example of using a confusion matrix to evaluate a
Support Vector Classifier, please see
:ref:`sphx_glr_auto_examples_model_selection_plot_confusion_matrix.py`
"""
method_name = f"{cls.__name__}.from_estimator"
check_matplotlib_support(method_name)
if not is_classifier(estimator):
raise ValueError(f"{method_name} only supports classifiers")
y_pred = estimator.predict(X)
return cls.from_predictions(
y,
y_pred,
sample_weight=sample_weight,
labels=labels,
normalize=normalize,
display_labels=display_labels,
include_values=include_values,
cmap=cmap,
ax=ax,
xticks_rotation=xticks_rotation,
values_format=values_format,
colorbar=colorbar,
im_kw=im_kw,
text_kw=text_kw,
)
@classmethod
def from_predictions(
cls,
y_true,
y_pred,
*,
labels=None,
sample_weight=None,
normalize=None,
display_labels=None,
include_values=True,
xticks_rotation="horizontal",
values_format=None,
cmap="viridis",
ax=None,
colorbar=True,
im_kw=None,
text_kw=None,
):
"""Plot Confusion Matrix given true and predicted labels.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Model Evaluation Guide <confusion_matrix>`.
.. versionadded:: 1.0
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_pred : array-like of shape (n_samples,)
The predicted labels given by the method `predict` of an
classifier.
labels : array-like of shape (n_classes,), default=None
List of labels to index the confusion matrix. This may be used to
reorder or select a subset of labels. If `None` is given, those
that appear at least once in `y_true` or `y_pred` are used in
sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
normalize : {'true', 'pred', 'all'}, default=None
Either to normalize the counts display in the matrix:
- if `'true'`, the confusion matrix is normalized over the true
conditions (e.g. rows);
- if `'pred'`, the confusion matrix is normalized over the
predicted conditions (e.g. columns);
- if `'all'`, the confusion matrix is normalized by the total
number of samples;
- if `None` (default), the confusion matrix will not be normalized.
display_labels : array-like of shape (n_classes,), default=None
Target names used for plotting. By default, `labels` will be used
if it is defined, otherwise the unique labels of `y_true` and
`y_pred` will be used.
include_values : bool, default=True
Includes values in confusion matrix.
xticks_rotation : {'vertical', 'horizontal'} or float, \
default='horizontal'
Rotation of xtick labels.
values_format : str, default=None
Format specification for values in confusion matrix. If `None`, the
format specification is 'd' or '.2g' whichever is shorter.
cmap : str or matplotlib Colormap, default='viridis'
Colormap recognized by matplotlib.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
colorbar : bool, default=True
Whether or not to add a colorbar to the plot.
im_kw : dict, default=None
Dict with keywords passed to `matplotlib.pyplot.imshow` call.
text_kw : dict, default=None
Dict with keywords passed to `matplotlib.pyplot.text` call.
.. versionadded:: 1.2
Returns
-------
display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`
See Also
--------
ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix
given an estimator, the data, and the label.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import ConfusionMatrixDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> y_pred = clf.predict(X_test)
>>> ConfusionMatrixDisplay.from_predictions(
... y_test, y_pred)
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_predictions")
if display_labels is None:
if labels is None:
display_labels = unique_labels(y_true, y_pred)
else:
display_labels = labels
cm = confusion_matrix(
y_true,
y_pred,
sample_weight=sample_weight,
labels=labels,
normalize=normalize,
)
disp = cls(confusion_matrix=cm, display_labels=display_labels)
return disp.plot(
include_values=include_values,
cmap=cmap,
ax=ax,
xticks_rotation=xticks_rotation,
values_format=values_format,
colorbar=colorbar,
im_kw=im_kw,
text_kw=text_kw,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/regression.py | sklearn/metrics/_plot/regression.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import numpy as np
from sklearn.utils import _safe_indexing, check_random_state
from sklearn.utils._optional_dependencies import check_matplotlib_support
from sklearn.utils._plotting import _validate_style_kwargs
class PredictionErrorDisplay:
"""Visualization of the prediction error of a regression model.
This tool can display "residuals vs predicted" or "actual vs predicted"
using scatter plots to qualitatively assess the behavior of a regressor,
preferably on held-out data points.
See the details in the docstrings of
:func:`~sklearn.metrics.PredictionErrorDisplay.from_estimator` or
:func:`~sklearn.metrics.PredictionErrorDisplay.from_predictions` to
create a visualizer. All parameters are stored as attributes.
For general information regarding `scikit-learn` visualization tools, read
more in the :ref:`Visualization Guide <visualizations>`.
For details regarding interpreting these plots, refer to the
:ref:`Model Evaluation Guide <visualization_regression_evaluation>`.
.. versionadded:: 1.2
Parameters
----------
y_true : ndarray of shape (n_samples,)
True values.
y_pred : ndarray of shape (n_samples,)
Prediction values.
Attributes
----------
line_ : matplotlib Artist
Optimal line representing `y_true == y_pred`. Therefore, it is a
diagonal line for `kind="predictions"` and a horizontal line for
`kind="residuals"`.
errors_lines_ : matplotlib Artist or None
Residual lines. If `with_errors=False`, then it is set to `None`.
scatter_ : matplotlib Artist
Scatter data points.
ax_ : matplotlib Axes
Axes with the different matplotlib axis.
figure_ : matplotlib Figure
Figure containing the scatter and lines.
See Also
--------
PredictionErrorDisplay.from_estimator : Prediction error visualization
given an estimator and some data.
PredictionErrorDisplay.from_predictions : Prediction error visualization
given the true and predicted targets.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import Ridge
>>> from sklearn.metrics import PredictionErrorDisplay
>>> X, y = load_diabetes(return_X_y=True)
>>> ridge = Ridge().fit(X, y)
>>> y_pred = ridge.predict(X)
>>> display = PredictionErrorDisplay(y_true=y, y_pred=y_pred)
>>> display.plot()
<...>
>>> plt.show()
"""
def __init__(self, *, y_true, y_pred):
self.y_true = y_true
self.y_pred = y_pred
def plot(
self,
ax=None,
*,
kind="residual_vs_predicted",
scatter_kwargs=None,
line_kwargs=None,
):
"""Plot visualization.
Extra keyword arguments will be passed to matplotlib's ``plot``.
Parameters
----------
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
kind : {"actual_vs_predicted", "residual_vs_predicted"}, \
default="residual_vs_predicted"
The type of plot to draw:
- "actual_vs_predicted" draws the observed values (y-axis) vs.
the predicted values (x-axis).
- "residual_vs_predicted" draws the residuals, i.e. difference
between observed and predicted values, (y-axis) vs. the predicted
values (x-axis).
scatter_kwargs : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.scatter`
call.
line_kwargs : dict, default=None
Dictionary with keyword passed to the `matplotlib.pyplot.plot`
call to draw the optimal line.
Returns
-------
display : :class:`~sklearn.metrics.PredictionErrorDisplay`
Object that stores computed values.
"""
check_matplotlib_support(f"{self.__class__.__name__}.plot")
expected_kind = ("actual_vs_predicted", "residual_vs_predicted")
if kind not in expected_kind:
raise ValueError(
f"`kind` must be one of {', '.join(expected_kind)}. "
f"Got {kind!r} instead."
)
import matplotlib.pyplot as plt
if scatter_kwargs is None:
scatter_kwargs = {}
if line_kwargs is None:
line_kwargs = {}
default_scatter_kwargs = {"color": "tab:blue", "alpha": 0.8}
default_line_kwargs = {"color": "black", "alpha": 0.7, "linestyle": "--"}
scatter_kwargs = _validate_style_kwargs(default_scatter_kwargs, scatter_kwargs)
line_kwargs = _validate_style_kwargs(default_line_kwargs, line_kwargs)
scatter_kwargs = {**default_scatter_kwargs, **scatter_kwargs}
line_kwargs = {**default_line_kwargs, **line_kwargs}
if ax is None:
_, ax = plt.subplots()
if kind == "actual_vs_predicted":
max_value = max(np.max(self.y_true), np.max(self.y_pred))
min_value = min(np.min(self.y_true), np.min(self.y_pred))
self.line_ = ax.plot(
[min_value, max_value], [min_value, max_value], **line_kwargs
)[0]
x_data, y_data = self.y_pred, self.y_true
xlabel, ylabel = "Predicted values", "Actual values"
self.scatter_ = ax.scatter(x_data, y_data, **scatter_kwargs)
# force to have a squared axis
ax.set_aspect("equal", adjustable="datalim")
ax.set_xticks(np.linspace(min_value, max_value, num=5))
ax.set_yticks(np.linspace(min_value, max_value, num=5))
else: # kind == "residual_vs_predicted"
self.line_ = ax.plot(
[np.min(self.y_pred), np.max(self.y_pred)],
[0, 0],
**line_kwargs,
)[0]
self.scatter_ = ax.scatter(
self.y_pred, self.y_true - self.y_pred, **scatter_kwargs
)
xlabel, ylabel = "Predicted values", "Residuals (actual - predicted)"
ax.set(xlabel=xlabel, ylabel=ylabel)
self.ax_ = ax
self.figure_ = ax.figure
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
kind="residual_vs_predicted",
subsample=1_000,
random_state=None,
ax=None,
scatter_kwargs=None,
line_kwargs=None,
):
"""Plot the prediction error given a regressor and some data.
For general information regarding `scikit-learn` visualization tools,
read more in the :ref:`Visualization Guide <visualizations>`.
For details regarding interpreting these plots, refer to the
:ref:`Model Evaluation Guide <visualization_regression_evaluation>`.
.. versionadded:: 1.2
Parameters
----------
estimator : estimator instance
Fitted regressor or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a regressor.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
kind : {"actual_vs_predicted", "residual_vs_predicted"}, \
default="residual_vs_predicted"
The type of plot to draw:
- "actual_vs_predicted" draws the observed values (y-axis) vs.
the predicted values (x-axis).
- "residual_vs_predicted" draws the residuals, i.e. difference
between observed and predicted values, (y-axis) vs. the predicted
values (x-axis).
subsample : float, int or None, default=1_000
Sampling the samples to be shown on the scatter plot. If `float`,
it should be between 0 and 1 and represents the proportion of the
original dataset. If `int`, it represents the number of samples
display on the scatter plot. If `None`, no subsampling will be
applied. by default, 1000 samples or less will be displayed.
random_state : int or RandomState, default=None
Controls the randomness when `subsample` is not `None`.
See :term:`Glossary <random_state>` for details.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
scatter_kwargs : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.scatter`
call.
line_kwargs : dict, default=None
Dictionary with keyword passed to the `matplotlib.pyplot.plot`
call to draw the optimal line.
Returns
-------
display : :class:`~sklearn.metrics.PredictionErrorDisplay`
Object that stores the computed values.
See Also
--------
PredictionErrorDisplay : Prediction error visualization for regression.
PredictionErrorDisplay.from_predictions : Prediction error visualization
given the true and predicted targets.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import Ridge
>>> from sklearn.metrics import PredictionErrorDisplay
>>> X, y = load_diabetes(return_X_y=True)
>>> ridge = Ridge().fit(X, y)
>>> disp = PredictionErrorDisplay.from_estimator(ridge, X, y)
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
y_pred = estimator.predict(X)
return cls.from_predictions(
y_true=y,
y_pred=y_pred,
kind=kind,
subsample=subsample,
random_state=random_state,
ax=ax,
scatter_kwargs=scatter_kwargs,
line_kwargs=line_kwargs,
)
@classmethod
def from_predictions(
cls,
y_true,
y_pred,
*,
kind="residual_vs_predicted",
subsample=1_000,
random_state=None,
ax=None,
scatter_kwargs=None,
line_kwargs=None,
):
"""Plot the prediction error given the true and predicted targets.
For general information regarding `scikit-learn` visualization tools,
read more in the :ref:`Visualization Guide <visualizations>`.
For details regarding interpreting these plots, refer to the
:ref:`Model Evaluation Guide <visualization_regression_evaluation>`.
.. versionadded:: 1.2
Parameters
----------
y_true : array-like of shape (n_samples,)
True target values.
y_pred : array-like of shape (n_samples,)
Predicted target values.
kind : {"actual_vs_predicted", "residual_vs_predicted"}, \
default="residual_vs_predicted"
The type of plot to draw:
- "actual_vs_predicted" draws the observed values (y-axis) vs.
the predicted values (x-axis).
- "residual_vs_predicted" draws the residuals, i.e. difference
between observed and predicted values, (y-axis) vs. the predicted
values (x-axis).
subsample : float, int or None, default=1_000
Sampling the samples to be shown on the scatter plot. If `float`,
it should be between 0 and 1 and represents the proportion of the
original dataset. If `int`, it represents the number of samples
display on the scatter plot. If `None`, no subsampling will be
applied. by default, 1000 samples or less will be displayed.
random_state : int or RandomState, default=None
Controls the randomness when `subsample` is not `None`.
See :term:`Glossary <random_state>` for details.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
scatter_kwargs : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.scatter`
call.
line_kwargs : dict, default=None
Dictionary with keyword passed to the `matplotlib.pyplot.plot`
call to draw the optimal line.
Returns
-------
display : :class:`~sklearn.metrics.PredictionErrorDisplay`
Object that stores the computed values.
See Also
--------
PredictionErrorDisplay : Prediction error visualization for regression.
PredictionErrorDisplay.from_estimator : Prediction error visualization
given an estimator and some data.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import Ridge
>>> from sklearn.metrics import PredictionErrorDisplay
>>> X, y = load_diabetes(return_X_y=True)
>>> ridge = Ridge().fit(X, y)
>>> y_pred = ridge.predict(X)
>>> disp = PredictionErrorDisplay.from_predictions(y_true=y, y_pred=y_pred)
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_predictions")
random_state = check_random_state(random_state)
n_samples = len(y_true)
if isinstance(subsample, numbers.Integral):
if subsample <= 0:
raise ValueError(
f"When an integer, subsample={subsample} should be positive."
)
elif isinstance(subsample, numbers.Real):
if subsample <= 0 or subsample >= 1:
raise ValueError(
f"When a floating-point, subsample={subsample} should"
" be in the (0, 1) range."
)
subsample = int(n_samples * subsample)
if subsample is not None and subsample < n_samples:
indices = random_state.choice(np.arange(n_samples), size=subsample)
y_true = _safe_indexing(y_true, indices, axis=0)
y_pred = _safe_indexing(y_pred, indices, axis=0)
viz = cls(
y_true=y_true,
y_pred=y_pred,
)
return viz.plot(
ax=ax,
kind=kind,
scatter_kwargs=scatter_kwargs,
line_kwargs=line_kwargs,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/__init__.py | sklearn/metrics/_plot/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/precision_recall_curve.py | sklearn/metrics/_plot/precision_recall_curve.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from collections import Counter
from sklearn.metrics._ranking import average_precision_score, precision_recall_curve
from sklearn.utils._plotting import (
_BinaryClassifierCurveDisplayMixin,
_deprecate_estimator_name,
_deprecate_y_pred_parameter,
_despine,
_validate_style_kwargs,
)
class PrecisionRecallDisplay(_BinaryClassifierCurveDisplayMixin):
"""Precision Recall visualization.
It is recommended to use
:func:`~sklearn.metrics.PrecisionRecallDisplay.from_estimator` or
:func:`~sklearn.metrics.PrecisionRecallDisplay.from_predictions` to create
a :class:`~sklearn.metrics.PrecisionRecallDisplay`. All parameters are
stored as attributes.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the :ref:`Model
Evaluation Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
precision : ndarray
Precision values.
recall : ndarray
Recall values.
average_precision : float, default=None
Average precision. If None, the average precision is not shown.
name : str, default=None
Name of estimator. If None, then the estimator name is not shown.
.. versionchanged:: 1.8
`estimator_name` was deprecated in favor of `name`.
pos_label : int, float, bool or str, default=None
The class considered the positive class when precision and recall metrics
computed. If not `None`, this value is displayed in the x- and y-axes labels.
.. versionadded:: 0.24
prevalence_pos_label : float, default=None
The prevalence of the positive label. It is used for plotting the
chance level line. If None, the chance level line will not be plotted
even if `plot_chance_level` is set to True when plotting.
.. versionadded:: 1.3
estimator_name : str, default=None
Name of estimator. If None, the estimator name is not shown.
.. deprecated:: 1.8
`estimator_name` is deprecated and will be removed in 1.10. Use `name`
instead.
Attributes
----------
line_ : matplotlib Artist
Precision recall curve.
chance_level_ : matplotlib Artist or None
The chance level line. It is `None` if the chance level is not plotted.
.. versionadded:: 1.3
ax_ : matplotlib Axes
Axes with precision recall curve.
figure_ : matplotlib Figure
Figure containing the curve.
See Also
--------
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
PrecisionRecallDisplay.from_estimator : Plot Precision Recall Curve given
a binary classifier.
PrecisionRecallDisplay.from_predictions : Plot Precision Recall Curve
using predictions from a binary classifier.
Notes
-----
The average precision (cf. :func:`~sklearn.metrics.average_precision_score`) in
scikit-learn is computed without any interpolation. To be consistent with
this metric, the precision-recall curve is plotted without any
interpolation as well (step-wise style).
You can change this style by passing the keyword argument
`drawstyle="default"` in :meth:`plot`, :meth:`from_estimator`, or
:meth:`from_predictions`. However, the curve will not be strictly
consistent with the reported average precision.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import (precision_recall_curve,
... PrecisionRecallDisplay)
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> clf = SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> predictions = clf.predict(X_test)
>>> precision, recall, _ = precision_recall_curve(y_test, predictions)
>>> disp = PrecisionRecallDisplay(precision=precision, recall=recall)
>>> disp.plot()
<...>
>>> plt.show()
"""
def __init__(
self,
precision,
recall,
*,
average_precision=None,
name=None,
pos_label=None,
prevalence_pos_label=None,
estimator_name="deprecated",
):
self.name = _deprecate_estimator_name(estimator_name, name, "1.8")
self.precision = precision
self.recall = recall
self.average_precision = average_precision
self.pos_label = pos_label
self.prevalence_pos_label = prevalence_pos_label
def plot(
self,
ax=None,
*,
name=None,
plot_chance_level=False,
chance_level_kw=None,
despine=False,
**kwargs,
):
"""Plot visualization.
Extra keyword arguments will be passed to matplotlib's `plot`.
Parameters
----------
ax : Matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
name : str, default=None
Name of precision recall curve for labeling. If `None`, use
`name` if not `None`, otherwise no labeling is shown.
plot_chance_level : bool, default=False
Whether to plot the chance level. The chance level is the prevalence
of the positive label computed from the data passed during
:meth:`from_estimator` or :meth:`from_predictions` call.
.. versionadded:: 1.3
chance_level_kw : dict, default=None
Keyword arguments to be passed to matplotlib's `plot` for rendering
the chance level line.
.. versionadded:: 1.3
despine : bool, default=False
Whether to remove the top and right spines from the plot.
.. versionadded:: 1.6
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
Returns
-------
display : :class:`~sklearn.metrics.PrecisionRecallDisplay`
Object that stores computed values.
Notes
-----
The average precision (cf. :func:`~sklearn.metrics.average_precision_score`)
in scikit-learn is computed without any interpolation. To be consistent
with this metric, the precision-recall curve is plotted without any
interpolation as well (step-wise style).
You can change this style by passing the keyword argument
`drawstyle="default"`. However, the curve will not be strictly
consistent with the reported average precision.
"""
self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name)
default_line_kwargs = {"drawstyle": "steps-post"}
if self.average_precision is not None and name is not None:
default_line_kwargs["label"] = (
f"{name} (AP = {self.average_precision:0.2f})"
)
elif self.average_precision is not None:
default_line_kwargs["label"] = f"AP = {self.average_precision:0.2f}"
elif name is not None:
default_line_kwargs["label"] = name
line_kwargs = _validate_style_kwargs(default_line_kwargs, kwargs)
(self.line_,) = self.ax_.plot(self.recall, self.precision, **line_kwargs)
info_pos_label = (
f" (Positive label: {self.pos_label})" if self.pos_label is not None else ""
)
xlabel = "Recall" + info_pos_label
ylabel = "Precision" + info_pos_label
self.ax_.set(
xlabel=xlabel,
xlim=(-0.01, 1.01),
ylabel=ylabel,
ylim=(-0.01, 1.01),
aspect="equal",
)
if plot_chance_level:
if self.prevalence_pos_label is None:
raise ValueError(
"You must provide prevalence_pos_label when constructing the "
"PrecisionRecallDisplay object in order to plot the chance "
"level line. Alternatively, you may use "
"PrecisionRecallDisplay.from_estimator or "
"PrecisionRecallDisplay.from_predictions "
"to automatically set prevalence_pos_label"
)
default_chance_level_line_kw = {
"label": f"Chance level (AP = {self.prevalence_pos_label:0.2f})",
"color": "k",
"linestyle": "--",
}
if chance_level_kw is None:
chance_level_kw = {}
chance_level_line_kw = _validate_style_kwargs(
default_chance_level_line_kw, chance_level_kw
)
(self.chance_level_,) = self.ax_.plot(
(0, 1),
(self.prevalence_pos_label, self.prevalence_pos_label),
**chance_level_line_kw,
)
else:
self.chance_level_ = None
if despine:
_despine(self.ax_)
if "label" in line_kwargs or plot_chance_level:
self.ax_.legend(loc="lower left")
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
sample_weight=None,
drop_intermediate=False,
response_method="auto",
pos_label=None,
name=None,
ax=None,
plot_chance_level=False,
chance_level_kw=None,
despine=False,
**kwargs,
):
"""Plot precision-recall curve given an estimator and some data.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the :ref:`Model
Evaluation Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=False
Whether to drop some suboptimal thresholds which would not appear
on a plotted precision-recall curve. This is useful in order to
create lighter precision-recall curves.
.. versionadded:: 1.3
response_method : {'predict_proba', 'decision_function', 'auto'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing the
precision and recall metrics. By default, `estimators.classes_[1]`
is considered as the positive class.
name : str, default=None
Name for labeling curve. If `None`, no name is used.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
plot_chance_level : bool, default=False
Whether to plot the chance level. The chance level is the prevalence
of the positive label computed from the data passed during
:meth:`from_estimator` or :meth:`from_predictions` call.
.. versionadded:: 1.3
chance_level_kw : dict, default=None
Keyword arguments to be passed to matplotlib's `plot` for rendering
the chance level line.
.. versionadded:: 1.3
despine : bool, default=False
Whether to remove the top and right spines from the plot.
.. versionadded:: 1.6
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
Returns
-------
display : :class:`~sklearn.metrics.PrecisionRecallDisplay`
See Also
--------
PrecisionRecallDisplay.from_predictions : Plot precision-recall curve
using estimated probabilities or output of decision function.
Notes
-----
The average precision (cf. :func:`~sklearn.metrics.average_precision_score`)
in scikit-learn is computed without any interpolation. To be consistent
with this metric, the precision-recall curve is plotted without any
interpolation as well (step-wise style).
You can change this style by passing the keyword argument
`drawstyle="default"`. However, the curve will not be strictly
consistent with the reported average precision.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import PrecisionRecallDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression()
>>> clf.fit(X_train, y_train)
LogisticRegression()
>>> PrecisionRecallDisplay.from_estimator(
... clf, X_test, y_test)
<...>
>>> plt.show()
"""
y_score, pos_label, name = cls._validate_and_get_response_values(
estimator,
X,
y,
response_method=response_method,
pos_label=pos_label,
name=name,
)
return cls.from_predictions(
y,
y_score,
sample_weight=sample_weight,
name=name,
pos_label=pos_label,
drop_intermediate=drop_intermediate,
ax=ax,
plot_chance_level=plot_chance_level,
chance_level_kw=chance_level_kw,
despine=despine,
**kwargs,
)
@classmethod
def from_predictions(
cls,
y_true,
y_score=None,
*,
sample_weight=None,
drop_intermediate=False,
pos_label=None,
name=None,
ax=None,
plot_chance_level=False,
chance_level_kw=None,
despine=False,
y_pred="deprecated",
**kwargs,
):
"""Plot precision-recall curve given binary class predictions.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the :ref:`Model
Evaluation Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
True binary labels.
y_score : array-like of shape (n_samples,)
Estimated probabilities or output of decision function.
.. versionadded:: 1.8
`y_pred` has been renamed to `y_score`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=False
Whether to drop some suboptimal thresholds which would not appear
on a plotted precision-recall curve. This is useful in order to
create lighter precision-recall curves.
.. versionadded:: 1.3
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing the
precision and recall metrics. When `pos_label=None`, if `y_true` is
in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an error
will be raised.
name : str, default=None
Name for labeling curve. If `None`, name will be set to
`"Classifier"`.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
plot_chance_level : bool, default=False
Whether to plot the chance level. The chance level is the prevalence
of the positive label computed from the data passed during
:meth:`from_estimator` or :meth:`from_predictions` call.
.. versionadded:: 1.3
chance_level_kw : dict, default=None
Keyword arguments to be passed to matplotlib's `plot` for rendering
the chance level line.
.. versionadded:: 1.3
despine : bool, default=False
Whether to remove the top and right spines from the plot.
.. versionadded:: 1.6
y_pred : array-like of shape (n_samples,)
Estimated probabilities or output of decision function.
.. deprecated:: 1.8
`y_pred` is deprecated and will be removed in 1.10. Use
`y_score` instead.
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
Returns
-------
display : :class:`~sklearn.metrics.PrecisionRecallDisplay`
See Also
--------
PrecisionRecallDisplay.from_estimator : Plot precision-recall curve
using an estimator.
Notes
-----
The average precision (cf. :func:`~sklearn.metrics.average_precision_score`)
in scikit-learn is computed without any interpolation. To be consistent
with this metric, the precision-recall curve is plotted without any
interpolation as well (step-wise style).
You can change this style by passing the keyword argument
`drawstyle="default"`. However, the curve will not be strictly
consistent with the reported average precision.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import PrecisionRecallDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression()
>>> clf.fit(X_train, y_train)
LogisticRegression()
>>> y_score = clf.predict_proba(X_test)[:, 1]
>>> PrecisionRecallDisplay.from_predictions(
... y_test, y_score)
<...>
>>> plt.show()
"""
y_score = _deprecate_y_pred_parameter(y_score, y_pred, "1.8")
pos_label, name = cls._validate_from_predictions_params(
y_true, y_score, sample_weight=sample_weight, pos_label=pos_label, name=name
)
precision, recall, _ = precision_recall_curve(
y_true,
y_score,
pos_label=pos_label,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
)
average_precision = average_precision_score(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
class_count = Counter(y_true)
prevalence_pos_label = class_count[pos_label] / sum(class_count.values())
viz = cls(
precision=precision,
recall=recall,
average_precision=average_precision,
name=name,
pos_label=pos_label,
prevalence_pos_label=prevalence_pos_label,
)
return viz.plot(
ax=ax,
name=name,
plot_chance_level=plot_chance_level,
chance_level_kw=chance_level_kw,
despine=despine,
**kwargs,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/tests/test_roc_curve_display.py | sklearn/metrics/_plot/tests/test_roc_curve_display.py | from collections.abc import Mapping
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy.integrate import trapezoid
from sklearn import clone
from sklearn.compose import make_column_transformer
from sklearn.datasets import load_breast_cancer, make_classification
from sklearn.exceptions import NotFittedError, UndefinedMetricWarning
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import RocCurveDisplay, auc, roc_curve
from sklearn.model_selection import cross_validate, train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils import _safe_indexing, shuffle
from sklearn.utils._response import _get_response_values_binary
@pytest.fixture(scope="module")
def data_binary():
X, y = make_classification(
n_samples=200,
n_features=20,
n_informative=5,
n_redundant=2,
flip_y=0.1,
class_sep=0.8,
random_state=42,
)
return X, y
def _check_figure_axes_and_labels(display, pos_label):
"""Check mpl axes and figure defaults are correct."""
import matplotlib as mpl
assert isinstance(display.ax_, mpl.axes.Axes)
assert isinstance(display.figure_, mpl.figure.Figure)
assert display.ax_.get_adjustable() == "box"
assert display.ax_.get_aspect() in ("equal", 1.0)
assert display.ax_.get_xlim() == display.ax_.get_ylim() == (-0.01, 1.01)
expected_pos_label = 1 if pos_label is None else pos_label
expected_ylabel = f"True Positive Rate (Positive label: {expected_pos_label})"
expected_xlabel = f"False Positive Rate (Positive label: {expected_pos_label})"
assert display.ax_.get_ylabel() == expected_ylabel
assert display.ax_.get_xlabel() == expected_xlabel
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
@pytest.mark.parametrize("with_sample_weight", [True, False])
@pytest.mark.parametrize("drop_intermediate", [True, False])
@pytest.mark.parametrize("with_strings", [True, False])
@pytest.mark.parametrize(
"constructor_name, default_name",
[
("from_estimator", "LogisticRegression"),
("from_predictions", "Classifier"),
],
)
def test_roc_curve_display_plotting(
pyplot,
response_method,
data_binary,
with_sample_weight,
drop_intermediate,
with_strings,
constructor_name,
default_name,
):
"""Check the overall plotting behaviour for single curve."""
X, y = data_binary
pos_label = None
if with_strings:
y = np.array(["c", "b"])[y]
pos_label = "c"
if with_sample_weight:
rng = np.random.RandomState(42)
sample_weight = rng.randint(1, 4, size=(X.shape[0]))
else:
sample_weight = None
lr = LogisticRegression()
lr.fit(X, y)
y_score = getattr(lr, response_method)(X)
y_score = y_score if y_score.ndim == 1 else y_score[:, 1]
if constructor_name == "from_estimator":
display = RocCurveDisplay.from_estimator(
lr,
X,
y,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
curve_kwargs={"alpha": 0.8},
)
else:
display = RocCurveDisplay.from_predictions(
y,
y_score,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
curve_kwargs={"alpha": 0.8},
)
fpr, tpr, _ = roc_curve(
y,
y_score,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
)
assert_allclose(display.roc_auc, auc(fpr, tpr))
assert_allclose(display.fpr, fpr)
assert_allclose(display.tpr, tpr)
assert display.name == default_name
import matplotlib as mpl
_check_figure_axes_and_labels(display, pos_label)
assert isinstance(display.line_, mpl.lines.Line2D)
assert display.line_.get_alpha() == 0.8
expected_label = f"{default_name} (AUC = {display.roc_auc:.2f})"
assert display.line_.get_label() == expected_label
@pytest.mark.parametrize(
"params, err_msg",
[
(
{
"fpr": [np.array([0, 0.5, 1]), np.array([0, 0.5, 1])],
"tpr": [np.array([0, 0.5, 1])],
"roc_auc": None,
"name": None,
},
"self.fpr and self.tpr from `RocCurveDisplay` initialization,",
),
(
{
"fpr": [np.array([0, 0.5, 1])],
"tpr": [np.array([0, 0.5, 1]), np.array([0, 0.5, 1])],
"roc_auc": [0.8, 0.9],
"name": None,
},
"self.fpr, self.tpr and self.roc_auc from `RocCurveDisplay`",
),
(
{
"fpr": [np.array([0, 0.5, 1]), np.array([0, 0.5, 1])],
"tpr": [np.array([0, 0.5, 1]), np.array([0, 0.5, 1])],
"roc_auc": [0.8],
"name": None,
},
"Got: self.fpr: 2, self.tpr: 2, self.roc_auc: 1",
),
(
{
"fpr": [np.array([0, 0.5, 1]), np.array([0, 0.5, 1])],
"tpr": [np.array([0, 0.5, 1]), np.array([0, 0.5, 1])],
"roc_auc": [0.8, 0.9],
"name": ["curve1", "curve2", "curve3"],
},
r"self.fpr, self.tpr, self.roc_auc and 'name' \(or self.name\)",
),
(
{
"fpr": [np.array([0, 0.5, 1]), np.array([0, 0.5, 1])],
"tpr": [np.array([0, 0.5, 1]), np.array([0, 0.5, 1])],
"roc_auc": [0.8, 0.9],
# List of length 1 is always allowed
"name": ["curve1"],
},
None,
),
],
)
def test_roc_curve_plot_parameter_length_validation(pyplot, params, err_msg):
"""Check `plot` parameter length validation performed correctly."""
display = RocCurveDisplay(**params)
if err_msg:
with pytest.raises(ValueError, match=err_msg):
display.plot()
else:
# No error should be raised
display.plot()
def test_validate_plot_params(pyplot):
"""Check `_validate_plot_params` returns the correct variables."""
fpr = np.array([0, 0.5, 1])
tpr = [np.array([0, 0.5, 1])]
roc_auc = None
name = "test_curve"
# Initialize display with test inputs
display = RocCurveDisplay(
fpr=fpr,
tpr=tpr,
roc_auc=roc_auc,
name=name,
pos_label=None,
)
fpr_out, tpr_out, roc_auc_out, name_out = display._validate_plot_params(
ax=None, name=None
)
assert isinstance(fpr_out, list)
assert isinstance(tpr_out, list)
assert len(fpr_out) == 1
assert len(tpr_out) == 1
assert roc_auc_out is None
assert name_out == ["test_curve"]
def test_roc_curve_from_cv_results_param_validation(pyplot, data_binary):
"""Check parameter validation is correct."""
X, y = data_binary
# `cv_results` missing key
cv_results_no_est = cross_validate(
LogisticRegression(), X, y, cv=3, return_estimator=True, return_indices=False
)
cv_results_no_indices = cross_validate(
LogisticRegression(), X, y, cv=3, return_estimator=True, return_indices=False
)
for cv_results in (cv_results_no_est, cv_results_no_indices):
with pytest.raises(
ValueError,
match="`cv_results` does not contain one of the following required",
):
RocCurveDisplay.from_cv_results(cv_results, X, y)
cv_results = cross_validate(
LogisticRegression(), X, y, cv=3, return_estimator=True, return_indices=True
)
# `X` wrong length
with pytest.raises(ValueError, match="`X` does not contain the correct"):
RocCurveDisplay.from_cv_results(cv_results, X[:10, :], y)
# `y` not binary
y_multi = y.copy()
y_multi[0] = 2
with pytest.raises(ValueError, match="The target `y` is not binary."):
RocCurveDisplay.from_cv_results(cv_results, X, y_multi)
# input inconsistent length
with pytest.raises(ValueError, match="Found input variables with inconsistent"):
RocCurveDisplay.from_cv_results(cv_results, X, y[:10])
with pytest.raises(ValueError, match="Found input variables with inconsistent"):
RocCurveDisplay.from_cv_results(cv_results, X, y, sample_weight=[1, 2])
# `pos_label` inconsistency
y_multi[y_multi == 1] = 2
with pytest.warns(UndefinedMetricWarning, match="No positive samples in y_true"):
RocCurveDisplay.from_cv_results(cv_results, X, y_multi)
# `name` is list while `curve_kwargs` is None or dict
for curve_kwargs in (None, {"alpha": 0.2}):
with pytest.raises(ValueError, match="To avoid labeling individual curves"):
RocCurveDisplay.from_cv_results(
cv_results,
X,
y,
name=["one", "two", "three"],
curve_kwargs=curve_kwargs,
)
# `curve_kwargs` incorrect length
with pytest.raises(ValueError, match="`curve_kwargs` must be None, a dictionary"):
RocCurveDisplay.from_cv_results(cv_results, X, y, curve_kwargs=[{"alpha": 1}])
# `curve_kwargs` both alias provided
with pytest.raises(TypeError, match="Got both c and"):
RocCurveDisplay.from_cv_results(
cv_results, X, y, curve_kwargs={"c": "blue", "color": "red"}
)
@pytest.mark.parametrize(
"curve_kwargs",
[None, {"alpha": 0.2}, [{"alpha": 0.2}, {"alpha": 0.3}, {"alpha": 0.4}]],
)
def test_roc_curve_display_from_cv_results_curve_kwargs(
pyplot, data_binary, curve_kwargs
):
"""Check `curve_kwargs` correctly passed."""
X, y = data_binary
n_cv = 3
cv_results = cross_validate(
LogisticRegression(), X, y, cv=n_cv, return_estimator=True, return_indices=True
)
display = RocCurveDisplay.from_cv_results(
cv_results,
X,
y,
curve_kwargs=curve_kwargs,
)
if curve_kwargs is None:
# Default `alpha` used
assert all(line.get_alpha() == 0.5 for line in display.line_)
elif isinstance(curve_kwargs, Mapping):
# `alpha` from dict used for all curves
assert all(line.get_alpha() == 0.2 for line in display.line_)
else:
# Different `alpha` used for each curve
assert all(
line.get_alpha() == curve_kwargs[i]["alpha"]
for i, line in enumerate(display.line_)
)
# Other default kwargs should be the same
for line in display.line_:
assert line.get_linestyle() == "--"
assert line.get_color() == "blue"
# TODO(1.9): Remove in 1.9
@pytest.mark.parametrize(
"constructor_name", ["from_estimator", "from_predictions", "plot"]
)
def test_roc_curve_display_kwargs_deprecation(pyplot, data_binary, constructor_name):
"""Check **kwargs deprecated correctly in favour of `curve_kwargs`."""
X, y = data_binary
lr = LogisticRegression()
lr.fit(X, y)
fpr = np.array([0, 0.5, 1])
tpr = np.array([0, 0.5, 1])
# Error when both `curve_kwargs` and `**kwargs` provided
with pytest.raises(ValueError, match="Cannot provide both `curve_kwargs`"):
if constructor_name == "from_estimator":
RocCurveDisplay.from_estimator(
lr, X, y, curve_kwargs={"alpha": 1}, label="test"
)
elif constructor_name == "from_predictions":
RocCurveDisplay.from_predictions(
y, y, curve_kwargs={"alpha": 1}, label="test"
)
else:
RocCurveDisplay(fpr=fpr, tpr=tpr).plot(
curve_kwargs={"alpha": 1}, label="test"
)
# Warning when `**kwargs`` provided
with pytest.warns(FutureWarning, match=r"`\*\*kwargs` is deprecated and will be"):
if constructor_name == "from_estimator":
RocCurveDisplay.from_estimator(lr, X, y, label="test")
elif constructor_name == "from_predictions":
RocCurveDisplay.from_predictions(y, y, label="test")
else:
RocCurveDisplay(fpr=fpr, tpr=tpr).plot(label="test")
@pytest.mark.parametrize(
"curve_kwargs",
[
None,
{"color": "blue"},
[{"color": "blue"}, {"color": "green"}, {"color": "red"}],
],
)
@pytest.mark.parametrize("drop_intermediate", [True, False])
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
@pytest.mark.parametrize("with_sample_weight", [True, False])
@pytest.mark.parametrize("with_strings", [True, False])
def test_roc_curve_display_plotting_from_cv_results(
pyplot,
data_binary,
with_strings,
with_sample_weight,
response_method,
drop_intermediate,
curve_kwargs,
):
"""Check overall plotting of `from_cv_results`."""
X, y = data_binary
pos_label = None
if with_strings:
y = np.array(["c", "b"])[y]
pos_label = "c"
if with_sample_weight:
rng = np.random.RandomState(42)
sample_weight = rng.randint(1, 4, size=(X.shape[0]))
else:
sample_weight = None
cv_results = cross_validate(
LogisticRegression(), X, y, cv=3, return_estimator=True, return_indices=True
)
display = RocCurveDisplay.from_cv_results(
cv_results,
X,
y,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
response_method=response_method,
pos_label=pos_label,
curve_kwargs=curve_kwargs,
)
for idx, (estimator, test_indices) in enumerate(
zip(cv_results["estimator"], cv_results["indices"]["test"])
):
y_true = _safe_indexing(y, test_indices)
y_pred = _get_response_values_binary(
estimator,
_safe_indexing(X, test_indices),
response_method=response_method,
pos_label=pos_label,
)[0]
sample_weight_fold = (
None
if sample_weight is None
else _safe_indexing(sample_weight, test_indices)
)
fpr, tpr, _ = roc_curve(
y_true,
y_pred,
sample_weight=sample_weight_fold,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
)
assert_allclose(display.roc_auc[idx], auc(fpr, tpr))
assert_allclose(display.fpr[idx], fpr)
assert_allclose(display.tpr[idx], tpr)
assert display.name is None
import matplotlib as mpl
_check_figure_axes_and_labels(display, pos_label)
if with_sample_weight:
aggregate_expected_labels = ["AUC = 0.64 +/- 0.04", "_child1", "_child2"]
else:
aggregate_expected_labels = ["AUC = 0.61 +/- 0.05", "_child1", "_child2"]
for idx, line in enumerate(display.line_):
assert isinstance(line, mpl.lines.Line2D)
# Default alpha for `from_cv_results`
line.get_alpha() == 0.5
if isinstance(curve_kwargs, list):
# Each individual curve labelled
assert line.get_label() == f"AUC = {display.roc_auc[idx]:.2f}"
else:
# Single aggregate label
assert line.get_label() == aggregate_expected_labels[idx]
@pytest.mark.parametrize("roc_auc", [[1.0, 1.0, 1.0], None])
@pytest.mark.parametrize(
"curve_kwargs",
[None, {"color": "red"}, [{"c": "red"}, {"c": "green"}, {"c": "yellow"}]],
)
@pytest.mark.parametrize("name", [None, "single", ["one", "two", "three"]])
def test_roc_curve_plot_legend_label(pyplot, data_binary, name, curve_kwargs, roc_auc):
"""Check legend label correct with all `curve_kwargs`, `name` combinations."""
fpr = [np.array([0, 0.5, 1]), np.array([0, 0.5, 1]), np.array([0, 0.5, 1])]
tpr = [np.array([0, 0.5, 1]), np.array([0, 0.5, 1]), np.array([0, 0.5, 1])]
if not isinstance(curve_kwargs, list) and isinstance(name, list):
with pytest.raises(ValueError, match="To avoid labeling individual curves"):
RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc).plot(
name=name, curve_kwargs=curve_kwargs
)
else:
display = RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc).plot(
name=name, curve_kwargs=curve_kwargs
)
legend = display.ax_.get_legend()
if legend is None:
# No legend is created, exit test early
assert name is None
assert roc_auc is None
return
else:
legend_labels = [text.get_text() for text in legend.get_texts()]
if isinstance(curve_kwargs, list):
# Multiple labels in legend
assert len(legend_labels) == 3
for idx, label in enumerate(legend_labels):
if name is None:
expected_label = "AUC = 1.00" if roc_auc else None
assert label == expected_label
elif isinstance(name, str):
expected_label = "single (AUC = 1.00)" if roc_auc else "single"
assert label == expected_label
else:
# `name` is a list of different strings
expected_label = (
f"{name[idx]} (AUC = 1.00)" if roc_auc else f"{name[idx]}"
)
assert label == expected_label
else:
# Single label in legend
assert len(legend_labels) == 1
if name is None:
expected_label = "AUC = 1.00 +/- 0.00" if roc_auc else None
assert legend_labels[0] == expected_label
else:
# name is single string
expected_label = "single (AUC = 1.00 +/- 0.00)" if roc_auc else "single"
assert legend_labels[0] == expected_label
@pytest.mark.parametrize(
"curve_kwargs",
[None, {"color": "red"}, [{"c": "red"}, {"c": "green"}, {"c": "yellow"}]],
)
@pytest.mark.parametrize("name", [None, "single", ["one", "two", "three"]])
def test_roc_curve_from_cv_results_legend_label(
pyplot, data_binary, name, curve_kwargs
):
"""Check legend label correct with all `curve_kwargs`, `name` combinations."""
X, y = data_binary
n_cv = 3
cv_results = cross_validate(
LogisticRegression(), X, y, cv=n_cv, return_estimator=True, return_indices=True
)
if not isinstance(curve_kwargs, list) and isinstance(name, list):
with pytest.raises(ValueError, match="To avoid labeling individual curves"):
RocCurveDisplay.from_cv_results(
cv_results, X, y, name=name, curve_kwargs=curve_kwargs
)
else:
display = RocCurveDisplay.from_cv_results(
cv_results, X, y, name=name, curve_kwargs=curve_kwargs
)
legend = display.ax_.get_legend()
legend_labels = [text.get_text() for text in legend.get_texts()]
if isinstance(curve_kwargs, list):
# Multiple labels in legend
assert len(legend_labels) == 3
auc = ["0.62", "0.66", "0.55"]
for idx, label in enumerate(legend_labels):
if name is None:
assert label == f"AUC = {auc[idx]}"
elif isinstance(name, str):
assert label == f"single (AUC = {auc[idx]})"
else:
# `name` is a list of different strings
assert label == f"{name[idx]} (AUC = {auc[idx]})"
else:
# Single label in legend
assert len(legend_labels) == 1
if name is None:
assert legend_labels[0] == "AUC = 0.61 +/- 0.05"
else:
# name is single string
assert legend_labels[0] == "single (AUC = 0.61 +/- 0.05)"
@pytest.mark.parametrize(
"curve_kwargs",
[None, {"color": "red"}, [{"c": "red"}, {"c": "green"}, {"c": "yellow"}]],
)
def test_roc_curve_from_cv_results_curve_kwargs(pyplot, data_binary, curve_kwargs):
"""Check line kwargs passed correctly in `from_cv_results`."""
X, y = data_binary
cv_results = cross_validate(
LogisticRegression(), X, y, cv=3, return_estimator=True, return_indices=True
)
display = RocCurveDisplay.from_cv_results(
cv_results, X, y, curve_kwargs=curve_kwargs
)
for idx, line in enumerate(display.line_):
color = line.get_color()
if curve_kwargs is None:
# Default color
assert color == "blue"
elif isinstance(curve_kwargs, Mapping):
# All curves "red"
assert color == "red"
else:
assert color == curve_kwargs[idx]["c"]
def test_roc_curve_from_cv_results_pos_label_inferred(pyplot, data_binary):
"""Check `pos_label` inferred correctly by `from_cv_results(pos_label=None)`."""
X, y = data_binary
cv_results = cross_validate(
LogisticRegression(), X, y, cv=3, return_estimator=True, return_indices=True
)
disp = RocCurveDisplay.from_cv_results(cv_results, X, y, pos_label=None)
# Should be `estimator.classes_[1]`
assert disp.pos_label == 1
def _check_chance_level(plot_chance_level, chance_level_kw, display):
"""Check chance level line and line styles correct."""
import matplotlib as mpl
if plot_chance_level:
assert isinstance(display.chance_level_, mpl.lines.Line2D)
assert tuple(display.chance_level_.get_xdata()) == (0, 1)
assert tuple(display.chance_level_.get_ydata()) == (0, 1)
else:
assert display.chance_level_ is None
# Checking for chance level line styles
if plot_chance_level and chance_level_kw is None:
assert display.chance_level_.get_color() == "k"
assert display.chance_level_.get_linestyle() == "--"
assert display.chance_level_.get_label() == "Chance level (AUC = 0.5)"
elif plot_chance_level:
if "c" in chance_level_kw:
assert display.chance_level_.get_color() == chance_level_kw["c"]
else:
assert display.chance_level_.get_color() == chance_level_kw["color"]
if "lw" in chance_level_kw:
assert display.chance_level_.get_linewidth() == chance_level_kw["lw"]
else:
assert display.chance_level_.get_linewidth() == chance_level_kw["linewidth"]
if "ls" in chance_level_kw:
assert display.chance_level_.get_linestyle() == chance_level_kw["ls"]
else:
assert display.chance_level_.get_linestyle() == chance_level_kw["linestyle"]
@pytest.mark.parametrize("plot_chance_level", [True, False])
@pytest.mark.parametrize("label", [None, "Test Label"])
@pytest.mark.parametrize(
"chance_level_kw",
[
None,
{"linewidth": 1, "color": "red", "linestyle": "-", "label": "DummyEstimator"},
{"lw": 1, "c": "red", "ls": "-", "label": "DummyEstimator"},
{"lw": 1, "color": "blue", "ls": "-", "label": None},
],
)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_roc_curve_chance_level_line(
pyplot,
data_binary,
plot_chance_level,
chance_level_kw,
label,
constructor_name,
):
"""Check chance level plotting behavior of `from_predictions`, `from_estimator`."""
X, y = data_binary
lr = LogisticRegression()
lr.fit(X, y)
y_score = getattr(lr, "predict_proba")(X)
y_score = y_score if y_score.ndim == 1 else y_score[:, 1]
if constructor_name == "from_estimator":
display = RocCurveDisplay.from_estimator(
lr,
X,
y,
curve_kwargs={"alpha": 0.8, "label": label},
plot_chance_level=plot_chance_level,
chance_level_kw=chance_level_kw,
)
else:
display = RocCurveDisplay.from_predictions(
y,
y_score,
curve_kwargs={"alpha": 0.8, "label": label},
plot_chance_level=plot_chance_level,
chance_level_kw=chance_level_kw,
)
import matplotlib as mpl
assert isinstance(display.line_, mpl.lines.Line2D)
assert display.line_.get_alpha() == 0.8
assert isinstance(display.ax_, mpl.axes.Axes)
assert isinstance(display.figure_, mpl.figure.Figure)
_check_chance_level(plot_chance_level, chance_level_kw, display)
# Checking for legend behaviour
if plot_chance_level and chance_level_kw is not None:
if label is not None or chance_level_kw.get("label") is not None:
legend = display.ax_.get_legend()
assert legend is not None # Legend should be present if any label is set
legend_labels = [text.get_text() for text in legend.get_texts()]
if label is not None:
assert label in legend_labels
if chance_level_kw.get("label") is not None:
assert chance_level_kw["label"] in legend_labels
else:
assert display.ax_.get_legend() is None
@pytest.mark.parametrize("plot_chance_level", [True, False])
@pytest.mark.parametrize(
"chance_level_kw",
[
None,
{"linewidth": 1, "color": "red", "linestyle": "-", "label": "DummyEstimator"},
{"lw": 1, "c": "red", "ls": "-", "label": "DummyEstimator"},
{"lw": 1, "color": "blue", "ls": "-", "label": None},
],
)
@pytest.mark.parametrize("curve_kwargs", [None, {"alpha": 0.8}])
def test_roc_curve_chance_level_line_from_cv_results(
pyplot,
data_binary,
plot_chance_level,
chance_level_kw,
curve_kwargs,
):
"""Check chance level plotting behavior with `from_cv_results`."""
X, y = data_binary
n_cv = 3
cv_results = cross_validate(
LogisticRegression(), X, y, cv=n_cv, return_estimator=True, return_indices=True
)
display = RocCurveDisplay.from_cv_results(
cv_results,
X,
y,
plot_chance_level=plot_chance_level,
chance_level_kwargs=chance_level_kw,
curve_kwargs=curve_kwargs,
)
import matplotlib as mpl
assert all(isinstance(line, mpl.lines.Line2D) for line in display.line_)
# Ensure both curve line kwargs passed correctly as well
if curve_kwargs:
assert all(line.get_alpha() == 0.8 for line in display.line_)
assert isinstance(display.ax_, mpl.axes.Axes)
assert isinstance(display.figure_, mpl.figure.Figure)
_check_chance_level(plot_chance_level, chance_level_kw, display)
legend = display.ax_.get_legend()
# There is always a legend, to indicate each 'Fold' curve
assert legend is not None
legend_labels = [text.get_text() for text in legend.get_texts()]
if plot_chance_level and chance_level_kw is not None:
if chance_level_kw.get("label") is not None:
assert chance_level_kw["label"] in legend_labels
else:
assert len(legend_labels) == 1
@pytest.mark.parametrize(
"clf",
[
LogisticRegression(),
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(
make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression()
),
],
)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_roc_curve_display_complex_pipeline(pyplot, data_binary, clf, constructor_name):
"""Check the behaviour with complex pipeline."""
X, y = data_binary
clf = clone(clf)
if constructor_name == "from_estimator":
with pytest.raises(NotFittedError):
RocCurveDisplay.from_estimator(clf, X, y)
clf.fit(X, y)
if constructor_name == "from_estimator":
display = RocCurveDisplay.from_estimator(clf, X, y)
name = clf.__class__.__name__
else:
display = RocCurveDisplay.from_predictions(y, y)
name = "Classifier"
assert name in display.line_.get_label()
assert display.name == name
@pytest.mark.parametrize(
"roc_auc, name, curve_kwargs, expected_labels",
[
([0.9, 0.8], None, None, ["AUC = 0.85 +/- 0.05", "_child1"]),
([0.9, 0.8], "Est name", None, ["Est name (AUC = 0.85 +/- 0.05)", "_child1"]),
(
[0.8, 0.7],
["fold1", "fold2"],
[{"c": "blue"}, {"c": "red"}],
["fold1 (AUC = 0.80)", "fold2 (AUC = 0.70)"],
),
(None, ["fold1", "fold2"], [{"c": "blue"}, {"c": "red"}], ["fold1", "fold2"]),
],
)
def test_roc_curve_display_default_labels(
pyplot, roc_auc, name, curve_kwargs, expected_labels
):
"""Check the default labels used in the display."""
fpr = [np.array([0, 0.5, 1]), np.array([0, 0.3, 1])]
tpr = [np.array([0, 0.5, 1]), np.array([0, 0.3, 1])]
disp = RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc, name=name).plot(
curve_kwargs=curve_kwargs
)
for idx, expected_label in enumerate(expected_labels):
assert disp.line_[idx].get_label() == expected_label
def _check_auc(display, constructor_name):
roc_auc_limit = 0.95679
roc_auc_limit_multi = [0.97007, 0.985915, 0.980952]
if constructor_name == "from_cv_results":
for idx, roc_auc in enumerate(display.roc_auc):
assert roc_auc == pytest.approx(roc_auc_limit_multi[idx])
else:
assert display.roc_auc == pytest.approx(roc_auc_limit)
assert trapezoid(display.tpr, display.fpr) == pytest.approx(roc_auc_limit)
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
@pytest.mark.parametrize(
"constructor_name", ["from_estimator", "from_predictions", "from_cv_results"]
)
def test_plot_roc_curve_pos_label(pyplot, response_method, constructor_name):
# check that we can provide the positive label and display the proper
# statistics
X, y = load_breast_cancer(return_X_y=True)
# create a highly imbalanced version of the breast cancer dataset
idx_positive = np.flatnonzero(y == 1)
idx_negative = np.flatnonzero(y == 0)
idx_selected = np.hstack([idx_negative, idx_positive[:25]])
X, y = X[idx_selected], y[idx_selected]
X, y = shuffle(X, y, random_state=42)
# only use 2 features to make the problem even harder
X = X[:, :2]
y = np.array(["cancer" if c == 1 else "not cancer" for c in y], dtype=object)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
stratify=y,
random_state=0,
)
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
cv_results = cross_validate(
LogisticRegression(), X, y, cv=3, return_estimator=True, return_indices=True
)
# Sanity check to be sure the positive class is `classes_[0]`
# Class imbalance ensures a large difference in prediction values between classes,
# allowing us to catch errors when we switch `pos_label`
assert classifier.classes_.tolist() == ["cancer", "not cancer"]
y_score = getattr(classifier, response_method)(X_test)
# we select the corresponding probability columns or reverse the decision
# function otherwise
y_score_cancer = -1 * y_score if y_score.ndim == 1 else y_score[:, 0]
y_score_not_cancer = y_score if y_score.ndim == 1 else y_score[:, 1]
pos_label = "cancer"
y_score = y_score_cancer
if constructor_name == "from_estimator":
display = RocCurveDisplay.from_estimator(
classifier,
X_test,
y_test,
pos_label=pos_label,
response_method=response_method,
)
elif constructor_name == "from_predictions":
display = RocCurveDisplay.from_predictions(
y_test,
y_score,
pos_label=pos_label,
)
else:
display = RocCurveDisplay.from_cv_results(
cv_results,
X,
y,
response_method=response_method,
pos_label=pos_label,
)
_check_auc(display, constructor_name)
pos_label = "not cancer"
y_score = y_score_not_cancer
if constructor_name == "from_estimator":
display = RocCurveDisplay.from_estimator(
classifier,
X_test,
y_test,
response_method=response_method,
pos_label=pos_label,
)
elif constructor_name == "from_predictions":
display = RocCurveDisplay.from_predictions(
y_test,
y_score,
pos_label=pos_label,
)
else:
display = RocCurveDisplay.from_cv_results(
cv_results,
X,
y,
response_method=response_method,
pos_label=pos_label,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/tests/test_confusion_matrix_display.py | sklearn/metrics/_plot/tests/test_confusion_matrix_display.py | import numpy as np
import pytest
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
from sklearn.compose import make_column_transformer
from sklearn.datasets import make_classification
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC, SVR
def test_confusion_matrix_display_validation(pyplot):
"""Check that we raise the proper error when validating parameters."""
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=5, random_state=0
)
with pytest.raises(NotFittedError):
ConfusionMatrixDisplay.from_estimator(SVC(), X, y)
regressor = SVR().fit(X, y)
y_pred_regressor = regressor.predict(X)
y_pred_classifier = SVC().fit(X, y).predict(X)
err_msg = "ConfusionMatrixDisplay.from_estimator only supports classifiers"
with pytest.raises(ValueError, match=err_msg):
ConfusionMatrixDisplay.from_estimator(regressor, X, y)
err_msg = "Mix type of y not allowed, got types"
with pytest.raises(ValueError, match=err_msg):
# Force `y_true` to be seen as a regression problem
ConfusionMatrixDisplay.from_predictions(y + 0.5, y_pred_classifier)
with pytest.raises(ValueError, match=err_msg):
ConfusionMatrixDisplay.from_predictions(y, y_pred_regressor)
err_msg = "Found input variables with inconsistent numbers of samples"
with pytest.raises(ValueError, match=err_msg):
ConfusionMatrixDisplay.from_predictions(y, y_pred_classifier[::2])
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("with_labels", [True, False])
@pytest.mark.parametrize("with_display_labels", [True, False])
def test_confusion_matrix_display_custom_labels(
pyplot, constructor_name, with_labels, with_display_labels
):
"""Check the resulting plot when labels are given."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
ax = pyplot.gca()
labels = [2, 1, 0, 3, 4] if with_labels else None
display_labels = ["b", "d", "a", "e", "f"] if with_display_labels else None
cm = confusion_matrix(y, y_pred, labels=labels)
common_kwargs = {
"ax": ax,
"display_labels": display_labels,
"labels": labels,
}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs)
else:
disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs)
assert_allclose(disp.confusion_matrix, cm)
if with_display_labels:
expected_display_labels = display_labels
elif with_labels:
expected_display_labels = labels
else:
expected_display_labels = list(range(n_classes))
expected_display_labels_str = [str(name) for name in expected_display_labels]
x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]
assert_array_equal(disp.display_labels, expected_display_labels)
assert_array_equal(x_ticks, expected_display_labels_str)
assert_array_equal(y_ticks, expected_display_labels_str)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("normalize", ["true", "pred", "all", None])
@pytest.mark.parametrize("include_values", [True, False])
def test_confusion_matrix_display_plotting(
pyplot,
constructor_name,
normalize,
include_values,
):
"""Check the overall plotting rendering."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
ax = pyplot.gca()
cmap = "plasma"
cm = confusion_matrix(y, y_pred)
common_kwargs = {
"normalize": normalize,
"cmap": cmap,
"ax": ax,
"include_values": include_values,
}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs)
else:
disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs)
assert disp.ax_ == ax
if normalize == "true":
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == "pred":
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == "all":
cm = cm / cm.sum()
assert_allclose(disp.confusion_matrix, cm)
import matplotlib as mpl
assert isinstance(disp.im_, mpl.image.AxesImage)
assert disp.im_.get_cmap().name == cmap
assert isinstance(disp.ax_, pyplot.Axes)
assert isinstance(disp.figure_, pyplot.Figure)
assert disp.ax_.get_ylabel() == "True label"
assert disp.ax_.get_xlabel() == "Predicted label"
x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]
expected_display_labels = list(range(n_classes))
expected_display_labels_str = [str(name) for name in expected_display_labels]
assert_array_equal(disp.display_labels, expected_display_labels)
assert_array_equal(x_ticks, expected_display_labels_str)
assert_array_equal(y_ticks, expected_display_labels_str)
image_data = disp.im_.get_array().data
assert_allclose(image_data, cm)
if include_values:
assert disp.text_.shape == (n_classes, n_classes)
fmt = ".2g"
expected_text = np.array([format(v, fmt) for v in cm.ravel(order="C")])
text_text = np.array([t.get_text() for t in disp.text_.ravel(order="C")])
assert_array_equal(expected_text, text_text)
else:
assert disp.text_ is None
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_confusion_matrix_display(pyplot, constructor_name):
"""Check the behaviour of the default constructor without using the class
methods."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
cm = confusion_matrix(y, y_pred)
common_kwargs = {
"normalize": None,
"include_values": True,
"cmap": "viridis",
"xticks_rotation": 45.0,
}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs)
else:
disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs)
assert_allclose(disp.confusion_matrix, cm)
assert disp.text_.shape == (n_classes, n_classes)
rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()]
assert_allclose(rotations, 45.0)
image_data = disp.im_.get_array().data
assert_allclose(image_data, cm)
disp.plot(cmap="plasma")
assert disp.im_.get_cmap().name == "plasma"
disp.plot(include_values=False)
assert disp.text_ is None
disp.plot(xticks_rotation=90.0)
rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()]
assert_allclose(rotations, 90.0)
disp.plot(values_format="e")
expected_text = np.array([format(v, "e") for v in cm.ravel(order="C")])
text_text = np.array([t.get_text() for t in disp.text_.ravel(order="C")])
assert_array_equal(expected_text, text_text)
def test_confusion_matrix_contrast(pyplot):
"""Check that the text color is appropriate depending on background."""
cm = np.eye(2) / 2
disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1])
disp.plot(cmap=pyplot.cm.gray)
# diagonal text is black
assert_allclose(disp.text_[0, 0].get_color(), [0.0, 0.0, 0.0, 1.0])
assert_allclose(disp.text_[1, 1].get_color(), [0.0, 0.0, 0.0, 1.0])
# off-diagonal text is white
assert_allclose(disp.text_[0, 1].get_color(), [1.0, 1.0, 1.0, 1.0])
assert_allclose(disp.text_[1, 0].get_color(), [1.0, 1.0, 1.0, 1.0])
disp.plot(cmap=pyplot.cm.gray_r)
# diagonal text is white
assert_allclose(disp.text_[0, 1].get_color(), [0.0, 0.0, 0.0, 1.0])
assert_allclose(disp.text_[1, 0].get_color(), [0.0, 0.0, 0.0, 1.0])
# off-diagonal text is black
assert_allclose(disp.text_[0, 0].get_color(), [1.0, 1.0, 1.0, 1.0])
assert_allclose(disp.text_[1, 1].get_color(), [1.0, 1.0, 1.0, 1.0])
# Regression test for #15920
cm = np.array([[19, 34], [32, 58]])
disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1])
disp.plot(cmap=pyplot.cm.Blues)
min_color = pyplot.cm.Blues(0)
max_color = pyplot.cm.Blues(255)
assert_allclose(disp.text_[0, 0].get_color(), max_color)
assert_allclose(disp.text_[0, 1].get_color(), max_color)
assert_allclose(disp.text_[1, 0].get_color(), max_color)
assert_allclose(disp.text_[1, 1].get_color(), min_color)
@pytest.mark.parametrize(
"clf",
[
LogisticRegression(),
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(
make_column_transformer((StandardScaler(), [0, 1])),
LogisticRegression(),
),
],
ids=["clf", "pipeline-clf", "pipeline-column_transformer-clf"],
)
def test_confusion_matrix_pipeline(pyplot, clf):
"""Check the behaviour of the plotting with more complex pipeline."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
with pytest.raises(NotFittedError):
ConfusionMatrixDisplay.from_estimator(clf, X, y)
clf.fit(X, y)
y_pred = clf.predict(X)
disp = ConfusionMatrixDisplay.from_estimator(clf, X, y)
cm = confusion_matrix(y, y_pred)
assert_allclose(disp.confusion_matrix, cm)
assert disp.text_.shape == (n_classes, n_classes)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_confusion_matrix_with_unknown_labels(pyplot, constructor_name):
"""Check that when labels=None, the unique values in `y_pred` and `y_true`
will be used.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/18405
"""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# create unseen labels in `y_true` not seen during fitting and not present
# in 'classifier.classes_'
y = y + 1
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
common_kwargs = {"labels": None}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(classifier, X, y, **common_kwargs)
else:
disp = ConfusionMatrixDisplay.from_predictions(y, y_pred, **common_kwargs)
display_labels = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
expected_labels = [str(i) for i in range(n_classes + 1)]
assert_array_equal(expected_labels, display_labels)
def test_colormap_max(pyplot):
"""Check that the max color is used for the color of the text."""
gray = pyplot.get_cmap("gray", 1024)
confusion_matrix = np.array([[1.0, 0.0], [0.0, 1.0]])
disp = ConfusionMatrixDisplay(confusion_matrix)
disp.plot(cmap=gray)
color = disp.text_[1, 0].get_color()
assert_allclose(color, [1.0, 1.0, 1.0, 1.0])
def test_im_kw_adjust_vmin_vmax(pyplot):
"""Check that im_kw passes kwargs to imshow"""
confusion_matrix = np.array([[0.48, 0.04], [0.08, 0.4]])
disp = ConfusionMatrixDisplay(confusion_matrix)
disp.plot(im_kw=dict(vmin=0.0, vmax=0.8))
clim = disp.im_.get_clim()
assert clim[0] == pytest.approx(0.0)
assert clim[1] == pytest.approx(0.8)
def test_confusion_matrix_text_kw(pyplot):
"""Check that text_kw is passed to the text call."""
font_size = 15.0
X, y = make_classification(random_state=0)
classifier = SVC().fit(X, y)
# from_estimator passes the font size
disp = ConfusionMatrixDisplay.from_estimator(
classifier, X, y, text_kw={"fontsize": font_size}
)
for text in disp.text_.reshape(-1):
assert text.get_fontsize() == font_size
# plot adjusts plot to new font size
new_font_size = 20.0
disp.plot(text_kw={"fontsize": new_font_size})
for text in disp.text_.reshape(-1):
assert text.get_fontsize() == new_font_size
# from_predictions passes the font size
y_pred = classifier.predict(X)
disp = ConfusionMatrixDisplay.from_predictions(
y, y_pred, text_kw={"fontsize": font_size}
)
for text in disp.text_.reshape(-1):
assert text.get_fontsize() == font_size
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/tests/test_common_curve_display.py | sklearn/metrics/_plot/tests/test_common_curve_display.py | import numpy as np
import pytest
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.calibration import CalibrationDisplay
from sklearn.compose import make_column_transformer
from sklearn.datasets import load_iris
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
ConfusionMatrixDisplay,
DetCurveDisplay,
PrecisionRecallDisplay,
PredictionErrorDisplay,
RocCurveDisplay,
)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
@pytest.fixture(scope="module")
def data():
return load_iris(return_X_y=True)
@pytest.fixture(scope="module")
def data_binary(data):
X, y = data
return X[y < 2], y[y < 2]
@pytest.mark.parametrize(
"Display",
[CalibrationDisplay, DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay],
)
def test_display_curve_error_classifier(pyplot, data, data_binary, Display):
"""Check that a proper error is raised when only binary classification is
supported."""
X, y = data
X_binary, y_binary = data_binary
clf = DecisionTreeClassifier().fit(X, y)
# Case 1: multiclass classifier with multiclass target
msg = "Expected 'estimator' to be a binary classifier. Got 3 classes instead."
with pytest.raises(ValueError, match=msg):
Display.from_estimator(clf, X, y)
# Case 2: multiclass classifier with binary target
with pytest.raises(ValueError, match=msg):
Display.from_estimator(clf, X_binary, y_binary)
# Case 3: binary classifier with multiclass target
clf = DecisionTreeClassifier().fit(X_binary, y_binary)
msg = "The target y is not binary. Got multiclass type of target."
with pytest.raises(ValueError, match=msg):
Display.from_estimator(clf, X, y)
@pytest.mark.parametrize(
"Display",
[CalibrationDisplay, DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay],
)
def test_display_curve_error_regression(pyplot, data_binary, Display):
"""Check that we raise an error with regressor."""
# Case 1: regressor
X, y = data_binary
regressor = DecisionTreeRegressor().fit(X, y)
msg = "Expected 'estimator' to be a binary classifier. Got DecisionTreeRegressor"
with pytest.raises(ValueError, match=msg):
Display.from_estimator(regressor, X, y)
# Case 2: regression target
classifier = DecisionTreeClassifier().fit(X, y)
# Force `y_true` to be seen as a regression problem
y = y + 0.5
msg = "The target y is not binary. Got continuous type of target."
with pytest.raises(ValueError, match=msg):
Display.from_estimator(classifier, X, y)
with pytest.raises(ValueError, match=msg):
Display.from_predictions(y, regressor.fit(X, y).predict(X))
@pytest.mark.parametrize(
"response_method, msg",
[
(
"predict_proba",
"MyClassifier has none of the following attributes: predict_proba.",
),
(
"decision_function",
"MyClassifier has none of the following attributes: decision_function.",
),
(
"auto",
(
"MyClassifier has none of the following attributes: predict_proba,"
" decision_function."
),
),
(
"bad_method",
"MyClassifier has none of the following attributes: bad_method.",
),
],
)
@pytest.mark.parametrize(
"Display", [DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay]
)
def test_display_curve_error_no_response(
pyplot,
data_binary,
response_method,
msg,
Display,
):
"""Check that a proper error is raised when the response method requested
is not defined for the given trained classifier."""
X, y = data_binary
class MyClassifier(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
self.classes_ = [0, 1]
return self
clf = MyClassifier().fit(X, y)
with pytest.raises(AttributeError, match=msg):
Display.from_estimator(clf, X, y, response_method=response_method)
@pytest.mark.parametrize(
"Display", [DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay]
)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_display_curve_estimator_name_multiple_calls(
pyplot,
data_binary,
Display,
constructor_name,
):
"""Check that passing `name` when calling `plot` will overwrite the original name
in the legend."""
X, y = data_binary
clf_name = "my hand-crafted name"
clf = LogisticRegression().fit(X, y)
y_pred = clf.predict_proba(X)[:, 1]
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
if constructor_name == "from_estimator":
disp = Display.from_estimator(clf, X, y, name=clf_name)
else:
disp = Display.from_predictions(y, y_pred, name=clf_name)
# TODO: Clean-up once `estimator_name` deprecated in all displays
if Display in (PrecisionRecallDisplay, RocCurveDisplay):
assert disp.name == clf_name
else:
assert disp.estimator_name == clf_name
pyplot.close("all")
disp.plot()
assert clf_name in disp.line_.get_label()
pyplot.close("all")
clf_name = "another_name"
disp.plot(name=clf_name)
assert clf_name in disp.line_.get_label()
@pytest.mark.parametrize(
"clf",
[
LogisticRegression(),
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(
make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression()
),
],
)
@pytest.mark.parametrize(
"Display", [DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay]
)
def test_display_curve_not_fitted_errors_old_name(pyplot, data_binary, clf, Display):
"""Check that a proper error is raised when the classifier is not
fitted."""
X, y = data_binary
# clone since we parametrize the test and the classifier will be fitted
# when testing the second and subsequent plotting function
model = clone(clf)
with pytest.raises(NotFittedError):
Display.from_estimator(model, X, y)
model.fit(X, y)
disp = Display.from_estimator(model, X, y)
assert model.__class__.__name__ in disp.line_.get_label()
# TODO: Clean-up once `estimator_name` deprecated in all displays
if Display in (PrecisionRecallDisplay, RocCurveDisplay):
assert disp.name == model.__class__.__name__
else:
assert disp.estimator_name == model.__class__.__name__
@pytest.mark.parametrize(
"clf",
[
LogisticRegression(),
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(
make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression()
),
],
)
@pytest.mark.parametrize("Display", [RocCurveDisplay])
def test_display_curve_not_fitted_errors(pyplot, data_binary, clf, Display):
"""Check that a proper error is raised when the classifier is not fitted."""
X, y = data_binary
# clone since we parametrize the test and the classifier will be fitted
# when testing the second and subsequent plotting function
model = clone(clf)
with pytest.raises(NotFittedError):
Display.from_estimator(model, X, y)
model.fit(X, y)
disp = Display.from_estimator(model, X, y)
assert model.__class__.__name__ in disp.line_.get_label()
assert disp.name == model.__class__.__name__
@pytest.mark.parametrize(
"Display", [DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay]
)
def test_display_curve_n_samples_consistency(pyplot, data_binary, Display):
"""Check the error raised when `y_pred` or `sample_weight` have inconsistent
length."""
X, y = data_binary
classifier = DecisionTreeClassifier().fit(X, y)
msg = "Found input variables with inconsistent numbers of samples"
with pytest.raises(ValueError, match=msg):
Display.from_estimator(classifier, X[:-2], y)
with pytest.raises(ValueError, match=msg):
Display.from_estimator(classifier, X, y[:-2])
with pytest.raises(ValueError, match=msg):
Display.from_estimator(classifier, X, y, sample_weight=np.ones(X.shape[0] - 2))
@pytest.mark.parametrize(
"Display", [DetCurveDisplay, PrecisionRecallDisplay, RocCurveDisplay]
)
def test_display_curve_error_pos_label(pyplot, data_binary, Display):
"""Check consistence of error message when `pos_label` should be specified."""
X, y = data_binary
y = y + 10
classifier = DecisionTreeClassifier().fit(X, y)
y_pred = classifier.predict_proba(X)[:, -1]
msg = r"y_true takes value in {10, 11} and pos_label is not specified"
with pytest.raises(ValueError, match=msg):
Display.from_predictions(y, y_pred)
@pytest.mark.parametrize(
"Display",
[
CalibrationDisplay,
DetCurveDisplay,
PrecisionRecallDisplay,
RocCurveDisplay,
PredictionErrorDisplay,
ConfusionMatrixDisplay,
],
)
@pytest.mark.parametrize(
"constructor",
["from_predictions", "from_estimator"],
)
def test_classifier_display_curve_named_constructor_return_type(
pyplot, data_binary, Display, constructor
):
"""Check that named constructors return the correct type when subclassed.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/27675
"""
X, y = data_binary
# This can be anything - we just need to check the named constructor return
# type so the only requirement here is instantiating the class without error
y_pred = y
classifier = LogisticRegression().fit(X, y)
class SubclassOfDisplay(Display):
pass
if constructor == "from_predictions":
curve = SubclassOfDisplay.from_predictions(y, y_pred)
else: # constructor == "from_estimator"
curve = SubclassOfDisplay.from_estimator(classifier, X, y)
assert isinstance(curve, SubclassOfDisplay)
# TODO(1.10): Remove once deprecated in all Displays
@pytest.mark.parametrize(
"Display, display_kwargs",
[
# TODO(1.10): Remove
(
PrecisionRecallDisplay,
{"precision": np.array([1, 0.5, 0]), "recall": np.array([0, 0.5, 1])},
),
# TODO(1.9): Remove
(RocCurveDisplay, {"fpr": np.array([0, 0.5, 1]), "tpr": np.array([0, 0.5, 1])}),
],
)
def test_display_estimator_name_deprecation(pyplot, Display, display_kwargs):
"""Check deprecation of `estimator_name`."""
with pytest.warns(FutureWarning, match="`estimator_name` is deprecated in"):
Display(**display_kwargs, estimator_name="test")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/tests/test_precision_recall_display.py | sklearn/metrics/_plot/tests/test_precision_recall_display.py | from collections import Counter
import numpy as np
import pytest
from scipy.integrate import trapezoid
from sklearn.compose import make_column_transformer
from sklearn.datasets import load_breast_cancer, make_classification
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
PrecisionRecallDisplay,
average_precision_score,
precision_recall_curve,
)
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
@pytest.mark.parametrize("drop_intermediate", [True, False])
def test_precision_recall_display_plotting(
pyplot, constructor_name, response_method, drop_intermediate
):
"""Check the overall plotting rendering."""
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
pos_label = 1
classifier = LogisticRegression().fit(X, y)
classifier.fit(X, y)
y_score = getattr(classifier, response_method)(X)
y_score = y_score if y_score.ndim == 1 else y_score[:, pos_label]
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(
classifier,
X,
y,
response_method=response_method,
drop_intermediate=drop_intermediate,
)
else:
display = PrecisionRecallDisplay.from_predictions(
y, y_score, pos_label=pos_label, drop_intermediate=drop_intermediate
)
precision, recall, _ = precision_recall_curve(
y, y_score, pos_label=pos_label, drop_intermediate=drop_intermediate
)
average_precision = average_precision_score(y, y_score, pos_label=pos_label)
np.testing.assert_allclose(display.precision, precision)
np.testing.assert_allclose(display.recall, recall)
assert display.average_precision == pytest.approx(average_precision)
import matplotlib as mpl
assert isinstance(display.line_, mpl.lines.Line2D)
assert isinstance(display.ax_, mpl.axes.Axes)
assert isinstance(display.figure_, mpl.figure.Figure)
assert display.ax_.get_xlabel() == "Recall (Positive label: 1)"
assert display.ax_.get_ylabel() == "Precision (Positive label: 1)"
assert display.ax_.get_adjustable() == "box"
assert display.ax_.get_aspect() in ("equal", 1.0)
assert display.ax_.get_xlim() == display.ax_.get_ylim() == (-0.01, 1.01)
# plotting passing some new parameters
display.plot(alpha=0.8, name="MySpecialEstimator")
expected_label = f"MySpecialEstimator (AP = {average_precision:0.2f})"
assert display.line_.get_label() == expected_label
assert display.line_.get_alpha() == pytest.approx(0.8)
# Check that the chance level line is not plotted by default
assert display.chance_level_ is None
@pytest.mark.parametrize("chance_level_kw", [None, {"color": "r"}, {"c": "r"}])
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_precision_recall_chance_level_line(
pyplot,
chance_level_kw,
constructor_name,
):
"""Check the chance level line plotting behavior."""
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
pos_prevalence = Counter(y)[1] / len(y)
lr = LogisticRegression()
y_score = lr.fit(X, y).predict_proba(X)[:, 1]
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(
lr,
X,
y,
plot_chance_level=True,
chance_level_kw=chance_level_kw,
)
else:
display = PrecisionRecallDisplay.from_predictions(
y,
y_score,
plot_chance_level=True,
chance_level_kw=chance_level_kw,
)
import matplotlib as mpl
assert isinstance(display.chance_level_, mpl.lines.Line2D)
assert tuple(display.chance_level_.get_xdata()) == (0, 1)
assert tuple(display.chance_level_.get_ydata()) == (pos_prevalence, pos_prevalence)
# Checking for chance level line styles
if chance_level_kw is None:
assert display.chance_level_.get_color() == "k"
else:
assert display.chance_level_.get_color() == "r"
@pytest.mark.parametrize(
"constructor_name, default_label",
[
("from_estimator", "LogisticRegression (AP = {:.2f})"),
("from_predictions", "Classifier (AP = {:.2f})"),
],
)
def test_precision_recall_display_name(pyplot, constructor_name, default_label):
"""Check the behaviour of the name parameters"""
X, y = make_classification(n_classes=2, n_samples=100, random_state=0)
pos_label = 1
classifier = LogisticRegression().fit(X, y)
classifier.fit(X, y)
y_score = classifier.predict_proba(X)[:, pos_label]
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(classifier, X, y)
else:
display = PrecisionRecallDisplay.from_predictions(
y, y_score, pos_label=pos_label
)
average_precision = average_precision_score(y, y_score, pos_label=pos_label)
# check that the default name is used
assert display.line_.get_label() == default_label.format(average_precision)
# check that the name can be set
display.plot(name="MySpecialEstimator")
assert (
display.line_.get_label()
== f"MySpecialEstimator (AP = {average_precision:.2f})"
)
@pytest.mark.parametrize(
"clf",
[
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(
make_column_transformer((StandardScaler(), [0, 1])), LogisticRegression()
),
],
)
def test_precision_recall_display_pipeline(pyplot, clf):
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
with pytest.raises(NotFittedError):
PrecisionRecallDisplay.from_estimator(clf, X, y)
clf.fit(X, y)
display = PrecisionRecallDisplay.from_estimator(clf, X, y)
assert display.name == clf.__class__.__name__
def test_precision_recall_display_string_labels(pyplot):
# regression test #15738
cancer = load_breast_cancer()
X, y = cancer.data, cancer.target_names[cancer.target]
lr = make_pipeline(StandardScaler(), LogisticRegression())
lr.fit(X, y)
for klass in cancer.target_names:
assert klass in lr.classes_
display = PrecisionRecallDisplay.from_estimator(lr, X, y)
y_score = lr.predict_proba(X)[:, 1]
avg_prec = average_precision_score(y, y_score, pos_label=lr.classes_[1])
assert display.average_precision == pytest.approx(avg_prec)
assert display.name == lr.__class__.__name__
err_msg = r"y_true takes value in {'benign', 'malignant'}"
with pytest.raises(ValueError, match=err_msg):
PrecisionRecallDisplay.from_predictions(y, y_score)
display = PrecisionRecallDisplay.from_predictions(
y, y_score, pos_label=lr.classes_[1]
)
assert display.average_precision == pytest.approx(avg_prec)
@pytest.mark.parametrize(
"average_precision, name, expected_label",
[
(0.9, None, "AP = 0.90"),
(None, "my_est", "my_est"),
(0.8, "my_est2", "my_est2 (AP = 0.80)"),
],
)
def test_default_labels(pyplot, average_precision, name, expected_label):
"""Check the default labels used in the display."""
precision = np.array([1, 0.5, 0])
recall = np.array([0, 0.5, 1])
display = PrecisionRecallDisplay(
precision,
recall,
average_precision=average_precision,
name=name,
)
display.plot()
assert display.line_.get_label() == expected_label
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
def test_plot_precision_recall_pos_label(pyplot, constructor_name, response_method):
# check that we can provide the positive label and display the proper
# statistics
X, y = load_breast_cancer(return_X_y=True)
# create a highly imbalanced version of the breast cancer dataset
idx_positive = np.flatnonzero(y == 1)
idx_negative = np.flatnonzero(y == 0)
idx_selected = np.hstack([idx_negative, idx_positive[:25]])
X, y = X[idx_selected], y[idx_selected]
X, y = shuffle(X, y, random_state=42)
# only use 2 features to make the problem even harder
X = X[:, :2]
y = np.array(["cancer" if c == 1 else "not cancer" for c in y], dtype=object)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
stratify=y,
random_state=0,
)
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
# sanity check to be sure the positive class is classes_[0] and that we
# are betrayed by the class imbalance
assert classifier.classes_.tolist() == ["cancer", "not cancer"]
y_score = getattr(classifier, response_method)(X_test)
# we select the corresponding probability columns or reverse the decision
# function otherwise
y_score_cancer = -1 * y_score if y_score.ndim == 1 else y_score[:, 0]
y_score_not_cancer = y_score if y_score.ndim == 1 else y_score[:, 1]
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(
classifier,
X_test,
y_test,
pos_label="cancer",
response_method=response_method,
)
else:
display = PrecisionRecallDisplay.from_predictions(
y_test,
y_score_cancer,
pos_label="cancer",
)
# we should obtain the statistics of the "cancer" class
avg_prec_limit = 0.65
assert display.average_precision < avg_prec_limit
assert -trapezoid(display.precision, display.recall) < avg_prec_limit
# otherwise we should obtain the statistics of the "not cancer" class
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(
classifier,
X_test,
y_test,
response_method=response_method,
pos_label="not cancer",
)
else:
display = PrecisionRecallDisplay.from_predictions(
y_test,
y_score_not_cancer,
pos_label="not cancer",
)
avg_prec_limit = 0.95
assert display.average_precision > avg_prec_limit
assert -trapezoid(display.precision, display.recall) > avg_prec_limit
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_precision_recall_prevalence_pos_label_reusable(pyplot, constructor_name):
# Check that even if one passes plot_chance_level=False the first time
# one can still call disp.plot with plot_chance_level=True and get the
# chance level line
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
lr = LogisticRegression()
y_score = lr.fit(X, y).predict_proba(X)[:, 1]
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(
lr, X, y, plot_chance_level=False
)
else:
display = PrecisionRecallDisplay.from_predictions(
y, y_score, plot_chance_level=False
)
assert display.chance_level_ is None
import matplotlib as mpl
# When calling from_estimator or from_predictions,
# prevalence_pos_label should have been set, so that directly
# calling plot_chance_level=True should plot the chance level line
display.plot(plot_chance_level=True)
assert isinstance(display.chance_level_, mpl.lines.Line2D)
def test_precision_recall_raise_no_prevalence(pyplot):
# Check that raises correctly when plotting chance level with
# no prvelance_pos_label is provided
precision = np.array([1, 0.5, 0])
recall = np.array([0, 0.5, 1])
display = PrecisionRecallDisplay(precision, recall)
msg = (
"You must provide prevalence_pos_label when constructing the "
"PrecisionRecallDisplay object in order to plot the chance "
"level line. Alternatively, you may use "
"PrecisionRecallDisplay.from_estimator or "
"PrecisionRecallDisplay.from_predictions "
"to automatically set prevalence_pos_label"
)
with pytest.raises(ValueError, match=msg):
display.plot(plot_chance_level=True)
@pytest.mark.parametrize("despine", [True, False])
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_plot_precision_recall_despine(pyplot, despine, constructor_name):
# Check that the despine keyword is working correctly
X, y = make_classification(n_classes=2, n_samples=50, random_state=0)
clf = LogisticRegression().fit(X, y)
clf.fit(X, y)
y_score = clf.decision_function(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
if constructor_name == "from_estimator":
display = PrecisionRecallDisplay.from_estimator(clf, X, y, despine=despine)
else:
display = PrecisionRecallDisplay.from_predictions(y, y_score, despine=despine)
for s in ["top", "right"]:
assert display.ax_.spines[s].get_visible() is not despine
if despine:
for s in ["bottom", "left"]:
assert display.ax_.spines[s].get_bounds() == (0, 1)
# TODO(1.10): remove
def test_y_score_and_y_pred_specified_error(pyplot):
"""1. Check that an error is raised when both y_score and y_pred are specified.
2. Check that a warning is raised when y_pred is specified.
"""
y_true = np.array([0, 1, 1, 0])
y_score = np.array([0.1, 0.4, 0.35, 0.8])
y_pred = np.array([0.2, 0.3, 0.5, 0.1])
with pytest.raises(
ValueError, match="`y_pred` and `y_score` cannot be both specified"
):
PrecisionRecallDisplay.from_predictions(y_true, y_score=y_score, y_pred=y_pred)
with pytest.warns(FutureWarning, match="y_pred was deprecated in 1.8"):
PrecisionRecallDisplay.from_predictions(y_true, y_pred=y_score)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/tests/__init__.py | sklearn/metrics/_plot/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/tests/test_predict_error_display.py | sklearn/metrics/_plot/tests/test_predict_error_display.py | import pytest
from numpy.testing import assert_allclose
from sklearn.datasets import load_diabetes
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import Ridge
from sklearn.metrics import PredictionErrorDisplay
X, y = load_diabetes(return_X_y=True)
@pytest.fixture
def regressor_fitted():
return Ridge().fit(X, y)
@pytest.mark.parametrize(
"regressor, params, err_type, err_msg",
[
(
Ridge().fit(X, y),
{"subsample": -1},
ValueError,
"When an integer, subsample=-1 should be",
),
(
Ridge().fit(X, y),
{"subsample": 20.0},
ValueError,
"When a floating-point, subsample=20.0 should be",
),
(
Ridge().fit(X, y),
{"subsample": -20.0},
ValueError,
"When a floating-point, subsample=-20.0 should be",
),
(
Ridge().fit(X, y),
{"kind": "xxx"},
ValueError,
"`kind` must be one of",
),
],
)
@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"])
def test_prediction_error_display_raise_error(
pyplot, class_method, regressor, params, err_type, err_msg
):
"""Check that we raise the proper error when making the parameters
# validation."""
with pytest.raises(err_type, match=err_msg):
if class_method == "from_estimator":
PredictionErrorDisplay.from_estimator(regressor, X, y, **params)
else:
y_pred = regressor.predict(X)
PredictionErrorDisplay.from_predictions(y_true=y, y_pred=y_pred, **params)
def test_from_estimator_not_fitted(pyplot):
"""Check that we raise a `NotFittedError` when the passed regressor is not
fit."""
regressor = Ridge()
with pytest.raises(NotFittedError, match="is not fitted yet."):
PredictionErrorDisplay.from_estimator(regressor, X, y)
@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("kind", ["actual_vs_predicted", "residual_vs_predicted"])
def test_prediction_error_display(pyplot, regressor_fitted, class_method, kind):
"""Check the default behaviour of the display."""
if class_method == "from_estimator":
display = PredictionErrorDisplay.from_estimator(
regressor_fitted, X, y, kind=kind
)
else:
y_pred = regressor_fitted.predict(X)
display = PredictionErrorDisplay.from_predictions(
y_true=y, y_pred=y_pred, kind=kind
)
if kind == "actual_vs_predicted":
assert_allclose(display.line_.get_xdata(), display.line_.get_ydata())
assert display.ax_.get_xlabel() == "Predicted values"
assert display.ax_.get_ylabel() == "Actual values"
assert display.line_ is not None
else:
assert display.ax_.get_xlabel() == "Predicted values"
assert display.ax_.get_ylabel() == "Residuals (actual - predicted)"
assert display.line_ is not None
assert display.ax_.get_legend() is None
@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize(
"subsample, expected_size",
[(5, 5), (0.1, int(X.shape[0] * 0.1)), (None, X.shape[0])],
)
def test_plot_prediction_error_subsample(
pyplot, regressor_fitted, class_method, subsample, expected_size
):
"""Check the behaviour of `subsample`."""
if class_method == "from_estimator":
display = PredictionErrorDisplay.from_estimator(
regressor_fitted, X, y, subsample=subsample
)
else:
y_pred = regressor_fitted.predict(X)
display = PredictionErrorDisplay.from_predictions(
y_true=y, y_pred=y_pred, subsample=subsample
)
assert len(display.scatter_.get_offsets()) == expected_size
@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"])
def test_plot_prediction_error_ax(pyplot, regressor_fitted, class_method):
"""Check that we can pass an axis to the display."""
_, ax = pyplot.subplots()
if class_method == "from_estimator":
display = PredictionErrorDisplay.from_estimator(regressor_fitted, X, y, ax=ax)
else:
y_pred = regressor_fitted.predict(X)
display = PredictionErrorDisplay.from_predictions(
y_true=y, y_pred=y_pred, ax=ax
)
assert display.ax_ is ax
@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize(
"scatter_kwargs",
[None, {"color": "blue", "alpha": 0.9}, {"c": "blue", "alpha": 0.9}],
)
@pytest.mark.parametrize(
"line_kwargs", [None, {"color": "red", "linestyle": "-"}, {"c": "red", "ls": "-"}]
)
def test_prediction_error_custom_artist(
pyplot, regressor_fitted, class_method, scatter_kwargs, line_kwargs
):
"""Check that we can tune the style of the line and the scatter."""
extra_params = {
"kind": "actual_vs_predicted",
"scatter_kwargs": scatter_kwargs,
"line_kwargs": line_kwargs,
}
if class_method == "from_estimator":
display = PredictionErrorDisplay.from_estimator(
regressor_fitted, X, y, **extra_params
)
else:
y_pred = regressor_fitted.predict(X)
display = PredictionErrorDisplay.from_predictions(
y_true=y, y_pred=y_pred, **extra_params
)
if line_kwargs is not None:
assert display.line_.get_linestyle() == "-"
assert display.line_.get_color() == "red"
else:
assert display.line_.get_linestyle() == "--"
assert display.line_.get_color() == "black"
assert display.line_.get_alpha() == 0.7
if scatter_kwargs is not None:
assert_allclose(display.scatter_.get_facecolor(), [[0.0, 0.0, 1.0, 0.9]])
assert_allclose(display.scatter_.get_edgecolor(), [[0.0, 0.0, 1.0, 0.9]])
else:
assert display.scatter_.get_alpha() == 0.8
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_plot/tests/test_det_curve_display.py | sklearn/metrics/_plot/tests/test_det_curve_display.py | import numpy as np
import pytest
from numpy.testing import assert_allclose
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import DetCurveDisplay, det_curve
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
@pytest.mark.parametrize("with_sample_weight", [True, False])
@pytest.mark.parametrize("drop_intermediate", [True, False])
@pytest.mark.parametrize("with_strings", [True, False])
def test_det_curve_display(
pyplot,
constructor_name,
response_method,
with_sample_weight,
drop_intermediate,
with_strings,
):
X, y = load_iris(return_X_y=True)
# Binarize the data with only the two first classes
X, y = X[y < 2], y[y < 2]
pos_label = None
if with_strings:
y = np.array(["c", "b"])[y]
pos_label = "c"
if with_sample_weight:
rng = np.random.RandomState(42)
sample_weight = rng.randint(1, 4, size=(X.shape[0]))
else:
sample_weight = None
lr = LogisticRegression()
lr.fit(X, y)
y_score = getattr(lr, response_method)(X)
if y_score.ndim == 2:
y_score = y_score[:, 1]
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
common_kwargs = {
"name": lr.__class__.__name__,
"alpha": 0.8,
"sample_weight": sample_weight,
"drop_intermediate": drop_intermediate,
"pos_label": pos_label,
}
if constructor_name == "from_estimator":
disp = DetCurveDisplay.from_estimator(lr, X, y, **common_kwargs)
else:
disp = DetCurveDisplay.from_predictions(y, y_score, **common_kwargs)
fpr, fnr, _ = det_curve(
y,
y_score,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
)
assert_allclose(disp.fpr, fpr, atol=1e-7)
assert_allclose(disp.fnr, fnr, atol=1e-7)
assert disp.estimator_name == "LogisticRegression"
# cannot fail thanks to pyplot fixture
import matplotlib as mpl
assert isinstance(disp.line_, mpl.lines.Line2D)
assert disp.line_.get_alpha() == 0.8
assert isinstance(disp.ax_, mpl.axes.Axes)
assert isinstance(disp.figure_, mpl.figure.Figure)
assert disp.line_.get_label() == "LogisticRegression"
expected_pos_label = 1 if pos_label is None else pos_label
expected_ylabel = f"False Negative Rate (Positive label: {expected_pos_label})"
expected_xlabel = f"False Positive Rate (Positive label: {expected_pos_label})"
assert disp.ax_.get_ylabel() == expected_ylabel
assert disp.ax_.get_xlabel() == expected_xlabel
@pytest.mark.parametrize(
"constructor_name, expected_clf_name",
[
("from_estimator", "LogisticRegression"),
("from_predictions", "Classifier"),
],
)
def test_det_curve_display_default_name(
pyplot,
constructor_name,
expected_clf_name,
):
# Check the default name display in the figure when `name` is not provided
X, y = load_iris(return_X_y=True)
# Binarize the data with only the two first classes
X, y = X[y < 2], y[y < 2]
lr = LogisticRegression().fit(X, y)
y_score = lr.predict_proba(X)[:, 1]
if constructor_name == "from_estimator":
disp = DetCurveDisplay.from_estimator(lr, X, y)
else:
disp = DetCurveDisplay.from_predictions(y, y_score)
assert disp.estimator_name == expected_clf_name
assert disp.line_.get_label() == expected_clf_name
# TODO(1.10): remove
def test_y_score_and_y_pred_specified_error(pyplot):
"""1. Check that an error is raised when both y_score and y_pred are specified.
2. Check that a warning is raised when y_pred is specified.
"""
y_true = np.array([0, 0, 1, 1])
y_score = np.array([0.1, 0.4, 0.35, 0.8])
y_pred = np.array([0.2, 0.3, 0.5, 0.1])
with pytest.raises(
ValueError, match="`y_pred` and `y_score` cannot be both specified"
):
DetCurveDisplay.from_predictions(y_true, y_score=y_score, y_pred=y_pred)
with pytest.warns(FutureWarning, match="y_pred was deprecated in 1.8"):
DetCurveDisplay.from_predictions(y_true, y_pred=y_score)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/tests/test_pairwise_distances_reduction.py | sklearn/metrics/tests/test_pairwise_distances_reduction.py | import itertools
import re
import warnings
from functools import partial
import numpy as np
import pytest
from scipy.spatial.distance import cdist
from sklearn.metrics import euclidean_distances, pairwise_distances
from sklearn.metrics._pairwise_distances_reduction import (
ArgKmin,
ArgKminClassMode,
BaseDistancesReductionDispatcher,
RadiusNeighbors,
RadiusNeighborsClassMode,
sqeuclidean_row_norms,
)
from sklearn.utils._testing import (
assert_allclose,
assert_array_equal,
create_memmap_backed_data,
)
from sklearn.utils.fixes import CSR_CONTAINERS
from sklearn.utils.parallel import _get_threadpool_controller
# Common supported metric between scipy.spatial.distance.cdist
# and BaseDistanceReductionDispatcher.
# This allows constructing tests to check consistency of results
# of concrete BaseDistanceReductionDispatcher on some metrics using APIs
# from scipy and numpy.
CDIST_PAIRWISE_DISTANCES_REDUCTION_COMMON_METRICS = [
"braycurtis",
"canberra",
"chebyshev",
"cityblock",
"euclidean",
"minkowski",
"seuclidean",
]
def _get_metric_params_list(metric: str, n_features: int, seed: int = 1):
"""Return list of dummy DistanceMetric kwargs for tests."""
# Distinguishing on cases not to compute unneeded datastructures.
rng = np.random.RandomState(seed)
if metric == "minkowski":
minkowski_kwargs = [
dict(p=1.5),
dict(p=2),
dict(p=3),
dict(p=np.inf),
dict(p=3, w=rng.rand(n_features)),
]
return minkowski_kwargs
if metric == "seuclidean":
return [dict(V=rng.rand(n_features))]
# Case of: "euclidean", "manhattan", "chebyshev", "haversine" or any other metric.
# In those cases, no kwargs is needed.
return [{}]
def assert_same_distances_for_common_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
rtol,
atol,
):
"""Check that the distances of common neighbors are equal up to tolerance.
This does not check if there are missing neighbors in either result set.
Missingness is handled by assert_no_missing_neighbors.
"""
# Compute a mapping from indices to distances for each result set and
# check that the computed neighbors with matching indices are within
# the expected distance tolerance.
indices_to_dist_a = dict(zip(indices_row_a, dist_row_a))
indices_to_dist_b = dict(zip(indices_row_b, dist_row_b))
common_indices = set(indices_row_a).intersection(set(indices_row_b))
for idx in common_indices:
dist_a = indices_to_dist_a[idx]
dist_b = indices_to_dist_b[idx]
try:
assert_allclose(dist_a, dist_b, rtol=rtol, atol=atol)
except AssertionError as e:
# Wrap exception to provide more context while also including
# the original exception with the computed absolute and
# relative differences.
raise AssertionError(
f"Query vector with index {query_idx} lead to different distances"
f" for common neighbor with index {idx}:"
f" dist_a={dist_a} vs dist_b={dist_b} (with atol={atol} and"
f" rtol={rtol})"
) from e
def assert_no_missing_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
threshold,
):
"""Compare the indices of neighbors in two results sets.
Any neighbor index with a distance below the precision threshold should
match one in the other result set. We ignore the last few neighbors beyond
the threshold as those can typically be missing due to rounding errors.
For radius queries, the threshold is just the radius minus the expected
precision level.
For k-NN queries, it is the maximum distance to the k-th neighbor minus the
expected precision level.
"""
mask_a = dist_row_a < threshold
mask_b = dist_row_b < threshold
missing_from_b = np.setdiff1d(indices_row_a[mask_a], indices_row_b)
missing_from_a = np.setdiff1d(indices_row_b[mask_b], indices_row_a)
if len(missing_from_a) > 0 or len(missing_from_b) > 0:
raise AssertionError(
f"Query vector with index {query_idx} lead to mismatched result indices:\n"
f"neighbors in b missing from a: {missing_from_a}\n"
f"neighbors in a missing from b: {missing_from_b}\n"
f"dist_row_a={dist_row_a}\n"
f"dist_row_b={dist_row_b}\n"
f"indices_row_a={indices_row_a}\n"
f"indices_row_b={indices_row_b}\n"
)
def assert_compatible_argkmin_results(
neighbors_dists_a,
neighbors_dists_b,
neighbors_indices_a,
neighbors_indices_b,
rtol=1e-5,
atol=1e-6,
):
"""Assert that argkmin results are valid up to rounding errors.
This function asserts that the results of argkmin queries are valid up to:
- rounding error tolerance on distance values;
- permutations of indices for distances values that differ up to the
expected precision level.
Furthermore, the distances must be sorted.
To be used for testing neighbors queries on float32 datasets: we accept
neighbors rank swaps only if they are caused by small rounding errors on
the distance computations.
"""
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
assert (
neighbors_dists_a.shape
== neighbors_dists_b.shape
== neighbors_indices_a.shape
== neighbors_indices_b.shape
), "Arrays of results have incompatible shapes."
n_queries, _ = neighbors_dists_a.shape
# Asserting equality results one row at a time
for query_idx in range(n_queries):
dist_row_a = neighbors_dists_a[query_idx]
dist_row_b = neighbors_dists_b[query_idx]
indices_row_a = neighbors_indices_a[query_idx]
indices_row_b = neighbors_indices_b[query_idx]
assert is_sorted(dist_row_a), f"Distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row_b), f"Distances aren't sorted on row {query_idx}"
assert_same_distances_for_common_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
rtol,
atol,
)
# Check that any neighbor with distances below the rounding error
# threshold have matching indices. The threshold is the distance to the
# k-th neighbors minus the expected precision level:
#
# (1 - rtol) * dist_k - atol
#
# Where dist_k is defined as the maximum distance to the kth-neighbor
# among the two result sets. This way of defining the threshold is
# stricter than taking the minimum of the two.
threshold = (1 - rtol) * np.maximum(
np.max(dist_row_a), np.max(dist_row_b)
) - atol
assert_no_missing_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
threshold,
)
def _non_trivial_radius(
*,
X=None,
Y=None,
metric=None,
precomputed_dists=None,
expected_n_neighbors=10,
n_subsampled_queries=10,
**metric_kwargs,
):
# Find a non-trivial radius using a small subsample of the pairwise
# distances between X and Y: we want to return around expected_n_neighbors
# on average. Yielding too many results would make the test slow (because
# checking the results is expensive for large result sets), yielding 0 most
# of the time would make the test useless.
assert precomputed_dists is not None or metric is not None, (
"Either metric or precomputed_dists must be provided."
)
if precomputed_dists is None:
assert X is not None
assert Y is not None
sampled_dists = pairwise_distances(X, Y, metric=metric, **metric_kwargs)
else:
sampled_dists = precomputed_dists[:n_subsampled_queries].copy()
sampled_dists.sort(axis=1)
return sampled_dists[:, expected_n_neighbors].mean()
def assert_compatible_radius_results(
neighbors_dists_a,
neighbors_dists_b,
neighbors_indices_a,
neighbors_indices_b,
radius,
check_sorted=True,
rtol=1e-5,
atol=1e-6,
):
"""Assert that radius neighborhood results are valid up to:
- relative and absolute tolerance on computed distance values
- permutations of indices for distances values that differ up to
a precision level
- missing or extra last elements if their distance is
close to the radius
To be used for testing neighbors queries on float32 datasets: we
accept neighbors rank swaps only if they are caused by small
rounding errors on the distance computations.
Input arrays must be sorted w.r.t distances.
"""
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
assert (
len(neighbors_dists_a)
== len(neighbors_dists_b)
== len(neighbors_indices_a)
== len(neighbors_indices_b)
)
n_queries = len(neighbors_dists_a)
# Asserting equality of results one vector at a time
for query_idx in range(n_queries):
dist_row_a = neighbors_dists_a[query_idx]
dist_row_b = neighbors_dists_b[query_idx]
indices_row_a = neighbors_indices_a[query_idx]
indices_row_b = neighbors_indices_b[query_idx]
if check_sorted:
assert is_sorted(dist_row_a), f"Distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row_b), f"Distances aren't sorted on row {query_idx}"
assert len(dist_row_a) == len(indices_row_a)
assert len(dist_row_b) == len(indices_row_b)
# Check that all distances are within the requested radius
if len(dist_row_a) > 0:
max_dist_a = np.max(dist_row_a)
assert max_dist_a <= radius, (
f"Largest returned distance {max_dist_a} not within requested"
f" radius {radius} on row {query_idx}"
)
if len(dist_row_b) > 0:
max_dist_b = np.max(dist_row_b)
assert max_dist_b <= radius, (
f"Largest returned distance {max_dist_b} not within requested"
f" radius {radius} on row {query_idx}"
)
assert_same_distances_for_common_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
rtol,
atol,
)
threshold = (1 - rtol) * radius - atol
assert_no_missing_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
threshold,
)
FLOAT32_TOLS = {
"atol": 1e-7,
"rtol": 1e-5,
}
FLOAT64_TOLS = {
"atol": 1e-9,
"rtol": 1e-7,
}
ASSERT_RESULT = {
(ArgKmin, np.float64): partial(assert_compatible_argkmin_results, **FLOAT64_TOLS),
(ArgKmin, np.float32): partial(assert_compatible_argkmin_results, **FLOAT32_TOLS),
(
RadiusNeighbors,
np.float64,
): partial(assert_compatible_radius_results, **FLOAT64_TOLS),
(
RadiusNeighbors,
np.float32,
): partial(assert_compatible_radius_results, **FLOAT32_TOLS),
}
def test_assert_compatible_argkmin_results():
atol = 1e-7
rtol = 0.0
tols = dict(atol=atol, rtol=rtol)
eps = atol / 3
_1m = 1.0 - eps
_1p = 1.0 + eps
_6_1m = 6.1 - eps
_6_1p = 6.1 + eps
ref_dist = np.array(
[
[1.2, 2.5, _6_1m, 6.1, _6_1p],
[_1m, _1m, 1, _1p, _1p],
]
)
ref_indices = np.array(
[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
]
)
# Sanity check: compare the reference results to themselves.
assert_compatible_argkmin_results(
ref_dist, ref_dist, ref_indices, ref_indices, rtol
)
# Apply valid permutation on indices: the last 3 points are all very close
# to one another so we accept any permutation on their rankings.
assert_compatible_argkmin_results(
np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]),
np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]),
np.array([[1, 2, 3, 4, 5]]),
np.array([[1, 2, 5, 4, 3]]),
**tols,
)
# The last few indices do not necessarily have to match because of the rounding
# errors on the distances: there could be tied results at the boundary.
assert_compatible_argkmin_results(
np.array([[1.2, 2.5, 3.0, 6.1, _6_1p]]),
np.array([[1.2, 2.5, 3.0, _6_1m, 6.1]]),
np.array([[1, 2, 3, 4, 5]]),
np.array([[1, 2, 3, 6, 7]]),
**tols,
)
# All points have close distances so any ranking permutation
# is valid for this query result.
assert_compatible_argkmin_results(
np.array([[_1m, 1, _1p, _1p, _1p]]),
np.array([[1, 1, 1, 1, _1p]]),
np.array([[7, 6, 8, 10, 9]]),
np.array([[6, 9, 7, 8, 10]]),
**tols,
)
# They could also be nearly truncation of very large nearly tied result
# sets hence all indices can also be distinct in this case:
assert_compatible_argkmin_results(
np.array([[_1m, 1, _1p, _1p, _1p]]),
np.array([[_1m, 1, 1, 1, _1p]]),
np.array([[34, 30, 8, 12, 24]]),
np.array([[42, 1, 21, 13, 3]]),
**tols,
)
# Apply invalid permutation on indices: permuting the ranks of the 2
# nearest neighbors is invalid because the distance values are too
# different.
msg = re.escape(
"Query vector with index 0 lead to different distances for common neighbor with"
" index 1: dist_a=1.2 vs dist_b=2.5"
)
with pytest.raises(AssertionError, match=msg):
assert_compatible_argkmin_results(
np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]),
np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]),
np.array([[1, 2, 3, 4, 5]]),
np.array([[2, 1, 3, 4, 5]]),
**tols,
)
# Detect missing indices within the expected precision level, even when the
# distances match exactly.
msg = re.escape(
"neighbors in b missing from a: [12]\nneighbors in a missing from b: [1]"
)
with pytest.raises(AssertionError, match=msg):
assert_compatible_argkmin_results(
np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]),
np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]),
np.array([[1, 2, 3, 4, 5]]),
np.array([[12, 2, 4, 11, 3]]),
**tols,
)
# Detect missing indices outside the expected precision level.
msg = re.escape(
"neighbors in b missing from a: []\nneighbors in a missing from b: [3]"
)
with pytest.raises(AssertionError, match=msg):
assert_compatible_argkmin_results(
np.array([[_1m, 1.0, _6_1m, 6.1, _6_1p]]),
np.array([[1.0, 1.0, _6_1m, 6.1, 7]]),
np.array([[1, 2, 3, 4, 5]]),
np.array([[2, 1, 4, 5, 12]]),
**tols,
)
# Detect missing indices outside the expected precision level, in the other
# direction:
msg = re.escape(
"neighbors in b missing from a: [5]\nneighbors in a missing from b: []"
)
with pytest.raises(AssertionError, match=msg):
assert_compatible_argkmin_results(
np.array([[_1m, 1.0, _6_1m, 6.1, 7]]),
np.array([[1.0, 1.0, _6_1m, 6.1, _6_1p]]),
np.array([[1, 2, 3, 4, 12]]),
np.array([[2, 1, 5, 3, 4]]),
**tols,
)
# Distances aren't properly sorted
msg = "Distances aren't sorted on row 0"
with pytest.raises(AssertionError, match=msg):
assert_compatible_argkmin_results(
np.array([[1.2, 2.5, _6_1m, 6.1, _6_1p]]),
np.array([[2.5, 1.2, _6_1m, 6.1, _6_1p]]),
np.array([[1, 2, 3, 4, 5]]),
np.array([[2, 1, 4, 5, 3]]),
**tols,
)
@pytest.mark.parametrize("check_sorted", [True, False])
def test_assert_compatible_radius_results(check_sorted):
atol = 1e-7
rtol = 0.0
tols = dict(atol=atol, rtol=rtol)
eps = atol / 3
_1m = 1.0 - eps
_1p = 1.0 + eps
_6_1m = 6.1 - eps
_6_1p = 6.1 + eps
ref_dist = [
np.array([1.2, 2.5, _6_1m, 6.1, _6_1p]),
np.array([_1m, 1, _1p, _1p]),
]
ref_indices = [
np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9]),
]
# Sanity check: compare the reference results to themselves.
assert_compatible_radius_results(
ref_dist,
ref_dist,
ref_indices,
ref_indices,
radius=7.0,
check_sorted=check_sorted,
**tols,
)
# Apply valid permutation on indices
assert_compatible_radius_results(
np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]),
np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]),
np.array([np.array([1, 2, 3, 4, 5])]),
np.array([np.array([1, 2, 4, 5, 3])]),
radius=7.0,
check_sorted=check_sorted,
**tols,
)
assert_compatible_radius_results(
np.array([np.array([_1m, _1m, 1, _1p, _1p])]),
np.array([np.array([_1m, _1m, 1, _1p, _1p])]),
np.array([np.array([6, 7, 8, 9, 10])]),
np.array([np.array([6, 9, 7, 8, 10])]),
radius=7.0,
check_sorted=check_sorted,
**tols,
)
# Apply invalid permutation on indices
msg = re.escape(
"Query vector with index 0 lead to different distances for common neighbor with"
" index 1: dist_a=1.2 vs dist_b=2.5"
)
with pytest.raises(AssertionError, match=msg):
assert_compatible_radius_results(
np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]),
np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]),
np.array([np.array([1, 2, 3, 4, 5])]),
np.array([np.array([2, 1, 3, 4, 5])]),
radius=7.0,
check_sorted=check_sorted,
**tols,
)
# Having extra last or missing elements is valid if they are in the
# tolerated rounding error range: [(1 - rtol) * radius - atol, radius]
assert_compatible_radius_results(
np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p, _6_1p])]),
np.array([np.array([1.2, 2.5, _6_1m, 6.1])]),
np.array([np.array([1, 2, 3, 4, 5, 7])]),
np.array([np.array([1, 2, 3, 6])]),
radius=_6_1p,
check_sorted=check_sorted,
**tols,
)
# Any discrepancy outside the tolerated rounding error range is invalid and
# indicates a missing neighbor in one of the result sets.
msg = re.escape(
"Query vector with index 0 lead to mismatched result indices:\nneighbors in b"
" missing from a: []\nneighbors in a missing from b: [3]"
)
with pytest.raises(AssertionError, match=msg):
assert_compatible_radius_results(
np.array([np.array([1.2, 2.5, 6])]),
np.array([np.array([1.2, 2.5])]),
np.array([np.array([1, 2, 3])]),
np.array([np.array([1, 2])]),
radius=6.1,
check_sorted=check_sorted,
**tols,
)
msg = re.escape(
"Query vector with index 0 lead to mismatched result indices:\nneighbors in b"
" missing from a: [4]\nneighbors in a missing from b: [2]"
)
with pytest.raises(AssertionError, match=msg):
assert_compatible_radius_results(
np.array([np.array([1.2, 2.1, 2.5])]),
np.array([np.array([1.2, 2, 2.5])]),
np.array([np.array([1, 2, 3])]),
np.array([np.array([1, 4, 3])]),
radius=6.1,
check_sorted=check_sorted,
**tols,
)
# Radius upper bound is strictly checked
msg = re.escape(
"Largest returned distance 6.100000033333333 not within requested radius 6.1 on"
" row 0"
)
with pytest.raises(AssertionError, match=msg):
assert_compatible_radius_results(
np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]),
np.array([np.array([1.2, 2.5, _6_1m, 6.1, 6.1])]),
np.array([np.array([1, 2, 3, 4, 5])]),
np.array([np.array([2, 1, 4, 5, 3])]),
radius=6.1,
check_sorted=check_sorted,
**tols,
)
with pytest.raises(AssertionError, match=msg):
assert_compatible_radius_results(
np.array([np.array([1.2, 2.5, _6_1m, 6.1, 6.1])]),
np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]),
np.array([np.array([1, 2, 3, 4, 5])]),
np.array([np.array([2, 1, 4, 5, 3])]),
radius=6.1,
check_sorted=check_sorted,
**tols,
)
if check_sorted:
# Distances aren't properly sorted
msg = "Distances aren't sorted on row 0"
with pytest.raises(AssertionError, match=msg):
assert_compatible_radius_results(
np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]),
np.array([np.array([2.5, 1.2, _6_1m, 6.1, _6_1p])]),
np.array([np.array([1, 2, 3, 4, 5])]),
np.array([np.array([2, 1, 4, 5, 3])]),
radius=_6_1p,
check_sorted=True,
**tols,
)
else:
assert_compatible_radius_results(
np.array([np.array([1.2, 2.5, _6_1m, 6.1, _6_1p])]),
np.array([np.array([2.5, 1.2, _6_1m, 6.1, _6_1p])]),
np.array([np.array([1, 2, 3, 4, 5])]),
np.array([np.array([2, 1, 4, 5, 3])]),
radius=_6_1p,
check_sorted=False,
**tols,
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_pairwise_distances_reduction_is_usable_for(csr_container):
rng = np.random.RandomState(0)
X = rng.rand(100, 10)
Y = rng.rand(100, 10)
X_csr = csr_container(X)
Y_csr = csr_container(Y)
metric = "manhattan"
# Must be usable for all possible pair of {dense, sparse} datasets
assert BaseDistancesReductionDispatcher.is_usable_for(X, Y, metric)
assert BaseDistancesReductionDispatcher.is_usable_for(X_csr, Y_csr, metric)
assert BaseDistancesReductionDispatcher.is_usable_for(X_csr, Y, metric)
assert BaseDistancesReductionDispatcher.is_usable_for(X, Y_csr, metric)
assert BaseDistancesReductionDispatcher.is_usable_for(
X.astype(np.float64), Y.astype(np.float64), metric
)
assert BaseDistancesReductionDispatcher.is_usable_for(
X.astype(np.float32), Y.astype(np.float32), metric
)
assert not BaseDistancesReductionDispatcher.is_usable_for(
X.astype(np.int64), Y.astype(np.int64), metric
)
assert not BaseDistancesReductionDispatcher.is_usable_for(X, Y, metric="pyfunc")
assert not BaseDistancesReductionDispatcher.is_usable_for(
X.astype(np.float32), Y, metric
)
assert not BaseDistancesReductionDispatcher.is_usable_for(
X, Y.astype(np.int32), metric
)
# F-ordered arrays are not supported
assert not BaseDistancesReductionDispatcher.is_usable_for(
np.asfortranarray(X), Y, metric
)
assert BaseDistancesReductionDispatcher.is_usable_for(X_csr, Y, metric="euclidean")
assert BaseDistancesReductionDispatcher.is_usable_for(
X, Y_csr, metric="sqeuclidean"
)
# FIXME: the current Cython implementation is too slow for a large number of
# features. We temporarily disable it to fallback on SciPy's implementation.
# See: https://github.com/scikit-learn/scikit-learn/issues/28191
assert not BaseDistancesReductionDispatcher.is_usable_for(
X_csr, Y_csr, metric="sqeuclidean"
)
assert not BaseDistancesReductionDispatcher.is_usable_for(
X_csr, Y_csr, metric="euclidean"
)
# CSR matrices without non-zeros elements aren't currently supported
# TODO: support CSR matrices without non-zeros elements
X_csr_0_nnz = csr_container(X * 0)
assert not BaseDistancesReductionDispatcher.is_usable_for(X_csr_0_nnz, Y, metric)
# CSR matrices with int64 indices and indptr (e.g. large nnz, or large n_features)
# aren't supported as of now.
# See: https://github.com/scikit-learn/scikit-learn/issues/23653
# TODO: support CSR matrices with int64 indices and indptr
X_csr_int64 = csr_container(X)
X_csr_int64.indices = X_csr_int64.indices.astype(np.int64)
assert not BaseDistancesReductionDispatcher.is_usable_for(X_csr_int64, Y, metric)
def test_argkmin_factory_method_wrong_usages():
rng = np.random.RandomState(1)
X = rng.rand(100, 10)
Y = rng.rand(100, 10)
k = 5
metric = "euclidean"
msg = (
"Only float64 or float32 datasets pairs are supported at this time, "
"got: X.dtype=float32 and Y.dtype=float64"
)
with pytest.raises(ValueError, match=msg):
ArgKmin.compute(X=X.astype(np.float32), Y=Y, k=k, metric=metric)
msg = (
"Only float64 or float32 datasets pairs are supported at this time, "
"got: X.dtype=float64 and Y.dtype=int32"
)
with pytest.raises(ValueError, match=msg):
ArgKmin.compute(X=X, Y=Y.astype(np.int32), k=k, metric=metric)
with pytest.raises(ValueError, match="k == -1, must be >= 1."):
ArgKmin.compute(X=X, Y=Y, k=-1, metric=metric)
with pytest.raises(ValueError, match="k == 0, must be >= 1."):
ArgKmin.compute(X=X, Y=Y, k=0, metric=metric)
with pytest.raises(ValueError, match="Unrecognized metric"):
ArgKmin.compute(X=X, Y=Y, k=k, metric="wrong metric")
with pytest.raises(
ValueError, match=r"Buffer has wrong number of dimensions \(expected 2, got 1\)"
):
ArgKmin.compute(X=np.array([1.0, 2.0]), Y=Y, k=k, metric=metric)
with pytest.raises(ValueError, match="ndarray is not C-contiguous"):
ArgKmin.compute(X=np.asfortranarray(X), Y=Y, k=k, metric=metric)
# A UserWarning must be raised in this case.
unused_metric_kwargs = {"p": 3}
message = r"Some metric_kwargs have been passed \({'p': 3}\) but"
with pytest.warns(UserWarning, match=message):
ArgKmin.compute(
X=X, Y=Y, k=k, metric=metric, metric_kwargs=unused_metric_kwargs
)
# A UserWarning must be raised in this case.
metric_kwargs = {
"p": 3, # unused
"Y_norm_squared": sqeuclidean_row_norms(Y, num_threads=2),
}
message = r"Some metric_kwargs have been passed \({'p': 3, 'Y_norm_squared'"
with pytest.warns(UserWarning, match=message):
ArgKmin.compute(X=X, Y=Y, k=k, metric=metric, metric_kwargs=metric_kwargs)
# No user warning must be raised in this case.
metric_kwargs = {
"X_norm_squared": sqeuclidean_row_norms(X, num_threads=2),
}
with warnings.catch_warnings():
warnings.simplefilter("error", category=UserWarning)
ArgKmin.compute(X=X, Y=Y, k=k, metric=metric, metric_kwargs=metric_kwargs)
# No user warning must be raised in this case.
metric_kwargs = {
"X_norm_squared": sqeuclidean_row_norms(X, num_threads=2),
"Y_norm_squared": sqeuclidean_row_norms(Y, num_threads=2),
}
with warnings.catch_warnings():
warnings.simplefilter("error", category=UserWarning)
ArgKmin.compute(X=X, Y=Y, k=k, metric=metric, metric_kwargs=metric_kwargs)
def test_argkmin_classmode_factory_method_wrong_usages():
rng = np.random.RandomState(1)
X = rng.rand(100, 10)
Y = rng.rand(100, 10)
k = 5
metric = "manhattan"
weights = "uniform"
Y_labels = rng.randint(low=0, high=10, size=100)
unique_Y_labels = np.unique(Y_labels)
msg = (
"Only float64 or float32 datasets pairs are supported at this time, "
"got: X.dtype=float32 and Y.dtype=float64"
)
with pytest.raises(ValueError, match=msg):
ArgKminClassMode.compute(
X=X.astype(np.float32),
Y=Y,
k=k,
metric=metric,
weights=weights,
Y_labels=Y_labels,
unique_Y_labels=unique_Y_labels,
)
msg = (
"Only float64 or float32 datasets pairs are supported at this time, "
"got: X.dtype=float64 and Y.dtype=int32"
)
with pytest.raises(ValueError, match=msg):
ArgKminClassMode.compute(
X=X,
Y=Y.astype(np.int32),
k=k,
metric=metric,
weights=weights,
Y_labels=Y_labels,
unique_Y_labels=unique_Y_labels,
)
with pytest.raises(ValueError, match="k == -1, must be >= 1."):
ArgKminClassMode.compute(
X=X,
Y=Y,
k=-1,
metric=metric,
weights=weights,
Y_labels=Y_labels,
unique_Y_labels=unique_Y_labels,
)
with pytest.raises(ValueError, match="k == 0, must be >= 1."):
ArgKminClassMode.compute(
X=X,
Y=Y,
k=0,
metric=metric,
weights=weights,
Y_labels=Y_labels,
unique_Y_labels=unique_Y_labels,
)
with pytest.raises(ValueError, match="Unrecognized metric"):
ArgKminClassMode.compute(
X=X,
Y=Y,
k=k,
metric="wrong metric",
weights=weights,
Y_labels=Y_labels,
unique_Y_labels=unique_Y_labels,
)
with pytest.raises(
ValueError, match=r"Buffer has wrong number of dimensions \(expected 2, got 1\)"
):
ArgKminClassMode.compute(
X=np.array([1.0, 2.0]),
Y=Y,
k=k,
metric=metric,
weights=weights,
Y_labels=Y_labels,
unique_Y_labels=unique_Y_labels,
)
with pytest.raises(ValueError, match="ndarray is not C-contiguous"):
ArgKminClassMode.compute(
X=np.asfortranarray(X),
Y=Y,
k=k,
metric=metric,
weights=weights,
Y_labels=Y_labels,
unique_Y_labels=unique_Y_labels,
)
non_existent_weights_strategy = "non_existent_weights_strategy"
message = (
"Only the 'uniform' or 'distance' weights options are supported at this time. "
f"Got: weights='{non_existent_weights_strategy}'."
)
with pytest.raises(ValueError, match=message):
ArgKminClassMode.compute(
X=X,
Y=Y,
k=k,
metric=metric,
weights=non_existent_weights_strategy,
Y_labels=Y_labels,
unique_Y_labels=unique_Y_labels,
)
# TODO: introduce assertions on UserWarnings once the Euclidean specialisation
# of ArgKminClassMode is supported.
def test_radius_neighbors_factory_method_wrong_usages():
rng = np.random.RandomState(1)
X = rng.rand(100, 10)
Y = rng.rand(100, 10)
radius = 5
metric = "euclidean"
msg = (
"Only float64 or float32 datasets pairs are supported at this time, "
"got: X.dtype=float32 and Y.dtype=float64"
)
with pytest.raises(
ValueError,
match=msg,
):
RadiusNeighbors.compute(
X=X.astype(np.float32), Y=Y, radius=radius, metric=metric
)
msg = (
"Only float64 or float32 datasets pairs are supported at this time, "
"got: X.dtype=float64 and Y.dtype=int32"
)
with pytest.raises(
ValueError,
match=msg,
):
RadiusNeighbors.compute(X=X, Y=Y.astype(np.int32), radius=radius, metric=metric)
with pytest.raises(ValueError, match="radius == -1.0, must be >= 0."):
RadiusNeighbors.compute(X=X, Y=Y, radius=-1, metric=metric)
with pytest.raises(ValueError, match="Unrecognized metric"):
RadiusNeighbors.compute(X=X, Y=Y, radius=radius, metric="wrong metric")
with pytest.raises(
ValueError, match=r"Buffer has wrong number of dimensions \(expected 2, got 1\)"
):
RadiusNeighbors.compute(
X=np.array([1.0, 2.0]), Y=Y, radius=radius, metric=metric
)
with pytest.raises(ValueError, match="ndarray is not C-contiguous"):
RadiusNeighbors.compute(
X=np.asfortranarray(X), Y=Y, radius=radius, metric=metric
)
unused_metric_kwargs = {"p": 3}
# A UserWarning must be raised in this case.
message = r"Some metric_kwargs have been passed \({'p': 3}\) but"
with pytest.warns(UserWarning, match=message):
RadiusNeighbors.compute(
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/tests/test_classification.py | sklearn/metrics/tests/test_classification.py | import re
import warnings
from functools import partial
from itertools import chain, permutations, product
import numpy as np
import pytest
from scipy import linalg, sparse
from scipy.spatial.distance import hamming as sp_hamming
from scipy.stats import bernoulli
from sklearn import datasets, svm
from sklearn.base import config_context
from sklearn.datasets import make_multilabel_classification
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import (
accuracy_score,
average_precision_score,
balanced_accuracy_score,
brier_score_loss,
class_likelihood_ratios,
classification_report,
cohen_kappa_score,
confusion_matrix,
f1_score,
fbeta_score,
hamming_loss,
hinge_loss,
jaccard_score,
log_loss,
make_scorer,
matthews_corrcoef,
multilabel_confusion_matrix,
precision_recall_fscore_support,
precision_score,
recall_score,
zero_one_loss,
)
from sklearn.metrics._classification import (
_check_targets,
d2_brier_score,
d2_log_loss_score,
)
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelBinarizer, label_binarize
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils._array_api import (
_get_namespace_device_dtype_ids,
get_namespace,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._array_api import (
device as array_api_device,
)
from sklearn.utils._mocking import MockDataFrame
from sklearn.utils._testing import (
_array_api_for_tests,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
ignore_warnings,
)
from sklearn.utils.extmath import _nanaverage
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
from sklearn.utils.validation import check_random_state
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using an SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel="linear", probability=True, random_state=0)
y_pred_proba = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
y_pred_proba = y_pred_proba[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, y_pred_proba
###############################################################################
# Tests
def test_classification_report_dictionary_output():
# Test performance report with dictionary output
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = {
"setosa": {
"precision": 0.82608695652173914,
"recall": 0.79166666666666663,
"f1-score": 0.8085106382978724,
"support": 24,
},
"versicolor": {
"precision": 0.33333333333333331,
"recall": 0.096774193548387094,
"f1-score": 0.15000000000000002,
"support": 31,
},
"virginica": {
"precision": 0.41860465116279072,
"recall": 0.90000000000000002,
"f1-score": 0.57142857142857151,
"support": 20,
},
"macro avg": {
"f1-score": 0.5099797365754813,
"precision": 0.5260083136726211,
"recall": 0.596146953405018,
"support": 75,
},
"accuracy": 0.5333333333333333,
"weighted avg": {
"f1-score": 0.47310435663627154,
"precision": 0.5137535108414785,
"recall": 0.5333333333333333,
"support": 75,
},
}
report = classification_report(
y_true,
y_pred,
labels=np.arange(len(iris.target_names)),
target_names=iris.target_names,
output_dict=True,
)
# assert the 2 dicts are equal.
assert report.keys() == expected_report.keys()
for key in expected_report:
if key == "accuracy":
assert isinstance(report[key], float)
assert report[key] == expected_report[key]
else:
assert report[key].keys() == expected_report[key].keys()
for metric in expected_report[key]:
assert_almost_equal(expected_report[key][metric], report[key][metric])
assert isinstance(expected_report["setosa"]["precision"], float)
assert isinstance(expected_report["macro avg"]["precision"], float)
assert isinstance(expected_report["setosa"]["support"], int)
assert isinstance(expected_report["macro avg"]["support"], int)
@pytest.mark.parametrize("zero_division", ["warn", 0, 1, np.nan])
def test_classification_report_zero_division_warning(zero_division):
y_true, y_pred = ["a", "b", "c"], ["a", "b", "d"]
with warnings.catch_warnings(record=True) as record:
# We need "always" instead of "once" for free-threaded with
# pytest-run-parallel to capture all the warnings in the
# zero_division="warn" case.
warnings.filterwarnings("always", message=".+Use `zero_division`")
classification_report(
y_true, y_pred, zero_division=zero_division, output_dict=True
)
if zero_division == "warn":
assert len(record) > 1
for item in record:
msg = "Use `zero_division` parameter to control this behavior."
assert msg in str(item.message)
else:
assert not record
@pytest.mark.parametrize(
"labels, show_micro_avg", [([0], True), ([0, 1], False), ([0, 1, 2], False)]
)
def test_classification_report_labels_subset_superset(labels, show_micro_avg):
"""Check the behaviour of passing `labels` as a superset or subset of the labels.
WHen a superset, we expect to show the "accuracy" in the report while it should be
the micro-averaging if this is a subset.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27927
"""
y_true, y_pred = [0, 1], [0, 1]
report = classification_report(y_true, y_pred, labels=labels, output_dict=True)
if show_micro_avg:
assert "micro avg" in report
assert "accuracy" not in report
else: # accuracy should be shown
assert "accuracy" in report
assert "micro avg" not in report
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert accuracy_score(y1, y2) == 0.5
assert accuracy_score(y1, y1) == 1
assert accuracy_score(y2, y2) == 1
assert accuracy_score(y2, np.logical_not(y2)) == 0
assert accuracy_score(y1, np.logical_not(y1)) == 0
assert accuracy_score(y1, np.zeros(y1.shape)) == 0
assert accuracy_score(y2, np.zeros(y1.shape)) == 0
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
for kwargs in [{}, {"average": "binary"}]:
with warnings.catch_warnings():
warnings.simplefilter("error")
ps = precision_score(y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = recall_score(y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = f1_score(y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(
fbeta_score(y_true, y_pred, beta=2, **kwargs),
(1 + 2**2) * ps * rs / (2**2 * ps + rs),
2,
)
@pytest.mark.filterwarnings(r"ignore::sklearn.exceptions.UndefinedMetricWarning")
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F-scores behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert 1.0 == precision_score([1, 1], [1, 1])
assert 1.0 == recall_score([1, 1], [1, 1])
assert 1.0 == f1_score([1, 1], [1, 1])
assert 1.0 == fbeta_score([1, 1], [1, 1], beta=0)
assert 0.0 == precision_score([-1, -1], [-1, -1])
assert 0.0 == recall_score([-1, -1], [-1, -1])
assert 0.0 == f1_score([-1, -1], [-1, -1])
assert 0.0 == fbeta_score([-1, -1], [-1, -1], beta=float("inf"))
assert fbeta_score([-1, -1], [-1, -1], beta=float("inf")) == pytest.approx(
fbeta_score([-1, -1], [-1, -1], beta=1e5)
)
@pytest.mark.filterwarnings(r"ignore::sklearn.exceptions.UndefinedMetricWarning")
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred), (y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4], average=None)
assert_array_almost_equal([0.0, 1.0, 1.0, 0.5, 0.0], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4], average="macro")
assert_array_almost_equal(np.mean([0.0, 1.0, 1.0, 0.5, 0.0]), actual)
# No effect otherwise
for average in ["micro", "weighted", "samples"]:
if average == "samples" and i == 0:
continue
assert_almost_equal(
recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4], average=average),
recall_score(y_true, y_pred, labels=None, average=average),
)
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, "macro", "micro", "samples"]:
with pytest.raises(ValueError):
recall_score(y_true_bin, y_pred_bin, labels=np.arange(6), average=average)
with pytest.raises(ValueError):
recall_score(
y_true_bin, y_pred_bin, labels=np.arange(-1, 4), average=average
)
# tests non-regression on issue #10307
y_true = np.array([[0, 1, 1], [1, 0, 0]])
y_pred = np.array([[1, 1, 1], [1, 0, 1]])
p, r, f, _ = precision_recall_fscore_support(
y_true, y_pred, average="samples", labels=[0, 1]
)
assert_almost_equal(np.array([p, r, f]), np.array([3 / 4, 1, 5 / 6]))
@pytest.mark.filterwarnings(r"ignore::sklearn.exceptions.UndefinedMetricWarning")
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred), (y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([0.5, 1.0], recall_13(average=None))
assert_almost_equal((0.5 + 1.0) / 2, recall_13(average="macro"))
assert_almost_equal((0.5 * 2 + 1.0 * 1) / 3, recall_13(average="weighted"))
assert_almost_equal(2.0 / 3, recall_13(average="micro"))
# ensure the above were meaningful tests:
for average in ["macro", "weighted", "micro"]:
assert recall_13(average=average) != recall_all(average=average)
def test_average_precision_score_non_binary_class():
"""Test multiclass-multiouptut for `average_precision_score`."""
y_true = np.array(
[
[2, 2, 1],
[1, 2, 0],
[0, 1, 2],
[1, 2, 1],
[2, 0, 1],
[1, 2, 1],
]
)
y_score = np.array(
[
[0.7, 0.2, 0.1],
[0.4, 0.3, 0.3],
[0.1, 0.8, 0.1],
[0.2, 0.3, 0.5],
[0.4, 0.4, 0.2],
[0.1, 0.2, 0.7],
]
)
err_msg = "multiclass-multioutput format is not supported"
with pytest.raises(ValueError, match=err_msg):
average_precision_score(y_true, y_score, pos_label=2)
@pytest.mark.parametrize(
"y_true, y_score",
[
(
[0, 0, 1, 2],
np.array(
[
[0.7, 0.2, 0.1],
[0.4, 0.3, 0.3],
[0.1, 0.8, 0.1],
[0.2, 0.3, 0.5],
]
),
),
(
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0.1, 0.1, 0.4, 0.5, 0.6, 0.6, 0.9, 0.9, 1, 1],
),
],
)
def test_average_precision_score_duplicate_values(y_true, y_score):
"""
Duplicate values with precision-recall require a different
processing than when computing the AUC of a ROC, because the
precision-recall curve is a decreasing curve
The following situation corresponds to a perfect
test statistic, the average_precision_score should be 1.
"""
assert average_precision_score(y_true, y_score) == 1
@pytest.mark.parametrize(
"y_true, y_score",
[
(
[2, 2, 1, 1, 0],
np.array(
[
[0.2, 0.3, 0.5],
[0.2, 0.3, 0.5],
[0.4, 0.5, 0.3],
[0.4, 0.5, 0.3],
[0.8, 0.5, 0.3],
]
),
),
(
[0, 1, 1],
[0.5, 0.5, 0.6],
),
],
)
def test_average_precision_score_tied_values(y_true, y_score):
# Here if we go from left to right in y_true, the 0 values are
# separated from the 1 values, so it appears that we've
# correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
assert average_precision_score(y_true, y_score) != 1.0
def test_precision_recall_f_unused_pos_label():
# Check warning that pos_label unused when set to non-default value
# but average != 'binary'; even if data is binary.
msg = (
r"Note that pos_label \(set to 2\) is "
r"ignored when average != 'binary' \(got 'macro'\). You "
r"may use labels=\[pos_label\] to specify a single "
"positive class."
)
with pytest.warns(UserWarning, match=msg):
precision_recall_fscore_support(
[1, 2, 1], [1, 2, 2], pos_label=2, average="macro"
)
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = tp * tn - fp * fn
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true], [str(y) for y in y_pred])
def test_multilabel_confusion_matrix_binary():
# Test multilabel confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = multilabel_confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[[17, 8], [3, 22]], [[22, 3], [8, 17]]])
test(y_true, y_pred)
test([str(y) for y in y_true], [str(y) for y in y_pred])
def test_multilabel_confusion_matrix_multiclass():
# Test multilabel confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = multilabel_confusion_matrix(y_true, y_pred)
assert_array_equal(
cm, [[[47, 4], [5, 19]], [[38, 6], [28, 3]], [[30, 25], [2, 18]]]
)
# compute confusion matrix with explicit label ordering
labels = ["0", "2", "1"] if string_type else [0, 2, 1]
cm = multilabel_confusion_matrix(y_true, y_pred, labels=labels)
assert_array_equal(
cm, [[[47, 4], [5, 19]], [[30, 25], [2, 18]], [[38, 6], [28, 3]]]
)
# compute confusion matrix with super set of present labels
labels = ["0", "2", "1", "3"] if string_type else [0, 2, 1, 3]
cm = multilabel_confusion_matrix(y_true, y_pred, labels=labels)
assert_array_equal(
cm,
[
[[47, 4], [5, 19]],
[[30, 25], [2, 18]],
[[38, 6], [28, 3]],
[[75, 0], [0, 0]],
],
)
test(y_true, y_pred)
test([str(y) for y in y_true], [str(y) for y in y_pred], string_type=True)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_multilabel_confusion_matrix_multilabel(csc_container, csr_container):
# Test multilabel confusion matrix - multilabel-indicator case
y_true = np.array([[1, 0, 1], [0, 1, 0], [1, 1, 0]])
y_pred = np.array([[1, 0, 0], [0, 1, 1], [0, 0, 1]])
y_true_csr = csr_container(y_true)
y_pred_csr = csr_container(y_pred)
y_true_csc = csc_container(y_true)
y_pred_csc = csc_container(y_pred)
# cross test different types
sample_weight = np.array([2, 1, 3])
real_cm = [[[1, 0], [1, 1]], [[1, 0], [1, 1]], [[0, 2], [1, 0]]]
trues = [y_true, y_true_csr, y_true_csc]
preds = [y_pred, y_pred_csr, y_pred_csc]
for y_true_tmp in trues:
for y_pred_tmp in preds:
cm = multilabel_confusion_matrix(y_true_tmp, y_pred_tmp)
assert_array_equal(cm, real_cm)
# test support for samplewise
cm = multilabel_confusion_matrix(y_true, y_pred, samplewise=True)
assert_array_equal(cm, [[[1, 0], [1, 1]], [[1, 1], [0, 1]], [[0, 1], [2, 0]]])
# test support for labels
cm = multilabel_confusion_matrix(y_true, y_pred, labels=[2, 0])
assert_array_equal(cm, [[[0, 2], [1, 0]], [[1, 0], [1, 1]]])
# test support for labels with samplewise
cm = multilabel_confusion_matrix(y_true, y_pred, labels=[2, 0], samplewise=True)
assert_array_equal(cm, [[[0, 0], [1, 1]], [[1, 1], [0, 0]], [[0, 1], [1, 0]]])
# test support for sample_weight with sample_wise
cm = multilabel_confusion_matrix(
y_true, y_pred, sample_weight=sample_weight, samplewise=True
)
assert_array_equal(cm, [[[2, 0], [2, 2]], [[1, 1], [0, 1]], [[0, 3], [6, 0]]])
def test_multilabel_confusion_matrix_errors():
y_true = np.array([[1, 0, 1], [0, 1, 0], [1, 1, 0]])
y_pred = np.array([[1, 0, 0], [0, 1, 1], [0, 0, 1]])
# Bad sample_weight
with pytest.raises(ValueError, match="inconsistent numbers of samples"):
multilabel_confusion_matrix(y_true, y_pred, sample_weight=[1, 2])
with pytest.raises(ValueError, match="Sample weights must be 1D array or scalar"):
multilabel_confusion_matrix(
y_true, y_pred, sample_weight=[[1, 2, 3], [2, 3, 4], [3, 4, 5]]
)
# Bad labels
err_msg = r"All labels must be in \[0, n labels\)"
with pytest.raises(ValueError, match=err_msg):
multilabel_confusion_matrix(y_true, y_pred, labels=[-1])
err_msg = r"All labels must be in \[0, n labels\)"
with pytest.raises(ValueError, match=err_msg):
multilabel_confusion_matrix(y_true, y_pred, labels=[3])
# Using samplewise outside multilabel
with pytest.raises(ValueError, match="Samplewise metrics"):
multilabel_confusion_matrix([0, 1, 2], [1, 2, 0], samplewise=True)
# Bad y_type
err_msg = "multiclass-multioutput is not supported"
with pytest.raises(ValueError, match=err_msg):
multilabel_confusion_matrix([[0, 1, 2], [2, 1, 0]], [[1, 2, 0], [1, 0, 2]])
@pytest.mark.parametrize(
"normalize, cm_dtype, expected_results",
[
("true", "f", 0.333333333),
("pred", "f", 0.333333333),
("all", "f", 0.1111111111),
(None, "i", 2),
],
)
def test_confusion_matrix_normalize(normalize, cm_dtype, expected_results):
y_test = [0, 1, 2] * 6
y_pred = list(chain(*permutations([0, 1, 2])))
cm = confusion_matrix(y_test, y_pred, normalize=normalize)
assert_allclose(cm, expected_results)
assert cm.dtype.kind == cm_dtype
def test_confusion_matrix_normalize_single_class():
y_test = [0, 0, 0, 0, 1, 1, 1, 1]
y_pred = [0, 0, 0, 0, 0, 0, 0, 0]
cm_true = confusion_matrix(y_test, y_pred, normalize="true")
assert cm_true.sum() == pytest.approx(2.0)
# additionally check that no warnings are raised due to a division by zero
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
cm_pred = confusion_matrix(y_test, y_pred, normalize="pred")
assert cm_pred.sum() == pytest.approx(1.0)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
confusion_matrix(y_pred, y_test, normalize="true")
def test_confusion_matrix_single_label():
"""Test `confusion_matrix` warns when only one label found."""
y_test = [0, 0, 0, 0]
y_pred = [0, 0, 0, 0]
with pytest.warns(UserWarning, match="A single label was found in"):
confusion_matrix(y_pred, y_test)
@pytest.mark.parametrize(
"params, warn_msg",
[
# When `fp == 0` and `tp != 0`, LR+ is undefined
(
{
"y_true": np.array([1, 1, 1, 0, 0, 0]),
"y_pred": np.array([1, 1, 1, 0, 0, 0]),
},
"`positive_likelihood_ratio` is ill-defined and set to `np.nan`.",
),
# When `fp == 0` and `tp == 0`, LR+ is undefined
(
{
"y_true": np.array([1, 1, 1, 0, 0, 0]),
"y_pred": np.array([0, 0, 0, 0, 0, 0]),
},
(
"No samples were predicted for the positive class and "
"`positive_likelihood_ratio` is set to `np.nan`."
),
),
# When `tn == 0`, LR- is undefined
(
{
"y_true": np.array([1, 1, 1, 0, 0, 0]),
"y_pred": np.array([0, 0, 0, 1, 1, 1]),
},
"`negative_likelihood_ratio` is ill-defined and set to `np.nan`.",
),
# When `tp + fn == 0` both ratios are undefined
(
{
"y_true": np.array([0, 0, 0, 0, 0, 0]),
"y_pred": np.array([1, 1, 1, 0, 0, 0]),
},
"No samples of the positive class are present in `y_true`.",
),
],
)
def test_likelihood_ratios_warnings(params, warn_msg):
# likelihood_ratios must raise warnings when at
# least one of the ratios is ill-defined.
with pytest.warns(UserWarning, match=warn_msg):
class_likelihood_ratios(**params)
@pytest.mark.parametrize(
"params, err_msg",
[
(
{
"y_true": np.array([0, 1, 0, 1, 0]),
"y_pred": np.array([1, 1, 0, 0, 2]),
},
(
"class_likelihood_ratios only supports binary classification "
"problems, got targets of type: multiclass"
),
),
],
)
def test_likelihood_ratios_errors(params, err_msg):
# likelihood_ratios must raise error when attempting
# non-binary classes to avoid Simpson's paradox
with pytest.raises(ValueError, match=err_msg):
class_likelihood_ratios(**params)
def test_likelihood_ratios():
# Build confusion matrix with tn=9, fp=8, fn=1, tp=2,
# sensitivity=2/3, specificity=9/17, prevalence=3/20,
# LR+=34/24, LR-=17/27
y_true = np.array([1] * 3 + [0] * 17)
y_pred = np.array([1] * 2 + [0] * 10 + [1] * 8)
pos, neg = class_likelihood_ratios(y_true, y_pred)
assert_allclose(pos, 34 / 24)
assert_allclose(neg, 17 / 27)
# Build limit case with y_pred = y_true
pos, neg = class_likelihood_ratios(y_true, y_true)
assert_array_equal(pos, np.nan * 2)
assert_allclose(neg, np.zeros(2), rtol=1e-12)
# Ignore last 5 samples to get tn=9, fp=3, fn=1, tp=2,
# sensitivity=2/3, specificity=9/12, prevalence=3/20,
# LR+=24/9, LR-=12/27
sample_weight = np.array([1.0] * 15 + [0.0] * 5)
pos, neg = class_likelihood_ratios(y_true, y_pred, sample_weight=sample_weight)
assert_allclose(pos, 24 / 9)
assert_allclose(neg, 12 / 27)
# TODO(1.9): remove test
@pytest.mark.parametrize("raise_warning", [True, False])
def test_likelihood_ratios_raise_warning_deprecation(raise_warning):
"""Test that class_likelihood_ratios raises a `FutureWarning` when `raise_warning`
param is set."""
y_true = np.array([1, 0])
y_pred = np.array([1, 0])
msg = "`raise_warning` was deprecated in version 1.7 and will be removed in 1.9."
with pytest.warns(FutureWarning, match=msg):
class_likelihood_ratios(y_true, y_pred, raise_warning=raise_warning)
def test_likelihood_ratios_replace_undefined_by_worst():
"""Test that class_likelihood_ratios returns the worst scores `1.0` for both LR+ and
LR- when `replace_undefined_by=1` is set."""
# This data causes fp=0 (0 false positives) in the confusion_matrix and a division
# by zero that affects the positive_likelihood_ratio:
y_true = np.array([1, 1, 0])
y_pred = np.array([1, 0, 0])
positive_likelihood_ratio, _ = class_likelihood_ratios(
y_true, y_pred, replace_undefined_by=1
)
assert positive_likelihood_ratio == pytest.approx(1.0)
# This data causes tn=0 (0 true negatives) in the confusion_matrix and a division
# by zero that affects the negative_likelihood_ratio:
y_true = np.array([1, 0, 0])
y_pred = np.array([1, 1, 1])
_, negative_likelihood_ratio = class_likelihood_ratios(
y_true, y_pred, replace_undefined_by=1
)
assert negative_likelihood_ratio == pytest.approx(1.0)
@pytest.mark.parametrize(
"replace_undefined_by",
[
{"LR+": 0.0},
{"LR-": 0.0},
{"LR+": -5.0, "LR-": 0.0},
{"LR+": 1.0, "LR-": "nan"},
{"LR+": 0.0, "LR-": 0.0},
{"LR+": 1.0, "LR-": 2.0},
],
)
def test_likelihood_ratios_wrong_dict_replace_undefined_by(replace_undefined_by):
"""Test that class_likelihood_ratios raises a `ValueError` if the input dict for
`replace_undefined_by` is in the wrong format or contains impossible values."""
y_true = np.array([1, 0])
y_pred = np.array([1, 0])
msg = "The dictionary passed as `replace_undefined_by` needs to be in the form"
with pytest.raises(ValueError, match=msg):
class_likelihood_ratios(
y_true, y_pred, replace_undefined_by=replace_undefined_by
)
@pytest.mark.parametrize(
"replace_undefined_by, expected",
[
({"LR+": 1.0, "LR-": 1.0}, 1.0),
({"LR+": np.inf, "LR-": 0.0}, np.inf),
({"LR+": 2.0, "LR-": 0.0}, 2.0),
({"LR+": np.nan, "LR-": np.nan}, np.nan),
(np.nan, np.nan),
],
)
def test_likelihood_ratios_replace_undefined_by_0_fp(replace_undefined_by, expected):
"""Test that the `replace_undefined_by` param returns the right value for the
positive_likelihood_ratio as defined by the user."""
# This data causes fp=0 (0 false positives) in the confusion_matrix and a division
# by zero that affects the positive_likelihood_ratio:
y_true = np.array([1, 1, 0])
y_pred = np.array([1, 0, 0])
positive_likelihood_ratio, _ = class_likelihood_ratios(
y_true, y_pred, replace_undefined_by=replace_undefined_by
)
if np.isnan(expected):
assert np.isnan(positive_likelihood_ratio)
else:
assert positive_likelihood_ratio == pytest.approx(expected)
@pytest.mark.parametrize(
"replace_undefined_by, expected",
[
({"LR+": 1.0, "LR-": 1.0}, 1.0),
({"LR+": np.inf, "LR-": 0.0}, 0.0),
({"LR+": np.inf, "LR-": 0.5}, 0.5),
({"LR+": np.nan, "LR-": np.nan}, np.nan),
(np.nan, np.nan),
],
)
def test_likelihood_ratios_replace_undefined_by_0_tn(replace_undefined_by, expected):
"""Test that the `replace_undefined_by` param returns the right value for the
negative_likelihood_ratio as defined by the user."""
# This data causes tn=0 (0 true negatives) in the confusion_matrix and a division
# by zero that affects the negative_likelihood_ratio:
y_true = np.array([1, 0, 0])
y_pred = np.array([1, 1, 1])
_, negative_likelihood_ratio = class_likelihood_ratios(
y_true, y_pred, replace_undefined_by=replace_undefined_by
)
if np.isnan(expected):
assert np.isnan(negative_likelihood_ratio)
else:
assert negative_likelihood_ratio == pytest.approx(expected)
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, 0.348, decimal=3)
assert kappa == cohen_kappa_score(y2, y1)
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert cohen_kappa_score(y1, y2, labels=[0, 1]) == kappa
assert_almost_equal(cohen_kappa_score(y1, y1), 1.0)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), 0.8013, decimal=4)
# Weighting example: none, linear, quadratic.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 50 + [1] * 40 + [2] * 10)
assert_almost_equal(cohen_kappa_score(y1, y2), 0.9315, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="linear"), 0.9412, decimal=4)
assert_almost_equal(
cohen_kappa_score(y1, y2, weights="quadratic"), 0.9541, decimal=4
)
def test_cohen_kappa_score_error_wrong_label():
"""Test that correct error is raised when users pass labels that are not in y1."""
labels = [1, 2]
y1 = np.array(["a"] * 5 + ["b"] * 5)
y2 = np.array(["b"] * 10)
with pytest.raises(
ValueError, match="At least one label in `labels` must be present in `y1`"
):
cohen_kappa_score(y1, y2, labels=labels)
@pytest.mark.parametrize("zero_division", [0, 1, np.nan])
@pytest.mark.parametrize("y_true, y_pred", [([0], [0])])
@pytest.mark.parametrize(
"metric",
[
f1_score,
partial(fbeta_score, beta=1),
precision_score,
recall_score,
],
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/tests/test_common.py | sklearn/metrics/tests/test_common.py | import math
import re
from functools import partial
from inspect import signature
from itertools import chain, permutations, product
import numpy as np
import pytest
from sklearn._config import config_context
from sklearn.datasets import make_multilabel_classification
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import (
accuracy_score,
average_precision_score,
balanced_accuracy_score,
brier_score_loss,
classification_report,
cohen_kappa_score,
confusion_matrix,
confusion_matrix_at_thresholds,
coverage_error,
d2_absolute_error_score,
d2_brier_score,
d2_log_loss_score,
d2_pinball_score,
d2_tweedie_score,
dcg_score,
det_curve,
explained_variance_score,
f1_score,
fbeta_score,
hamming_loss,
hinge_loss,
jaccard_score,
label_ranking_average_precision_score,
label_ranking_loss,
log_loss,
matthews_corrcoef,
max_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_gamma_deviance,
mean_pinball_loss,
mean_poisson_deviance,
mean_squared_error,
mean_squared_log_error,
mean_tweedie_deviance,
median_absolute_error,
multilabel_confusion_matrix,
ndcg_score,
precision_recall_curve,
precision_score,
r2_score,
recall_score,
roc_auc_score,
roc_curve,
root_mean_squared_error,
root_mean_squared_log_error,
top_k_accuracy_score,
zero_one_loss,
)
from sklearn.metrics._base import _average_binary_score
from sklearn.metrics.pairwise import (
additive_chi2_kernel,
chi2_kernel,
cosine_distances,
cosine_similarity,
euclidean_distances,
laplacian_kernel,
linear_kernel,
manhattan_distances,
paired_cosine_distances,
paired_euclidean_distances,
pairwise_distances,
pairwise_kernels,
polynomial_kernel,
rbf_kernel,
sigmoid_kernel,
)
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle
from sklearn.utils._array_api import (
_atol_for_type,
_convert_to_numpy,
_get_namespace_device_dtype_ids,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._testing import (
_array_api_for_tests,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_array_less,
ignore_warnings,
)
from sklearn.utils.fixes import COO_CONTAINERS, parse_version, sp_version
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _num_samples, check_random_state
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics with common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - CONTINUOUS_CLASSIFICATION_METRICS: all classification metrics which
# compare a ground truth and a continuous score, e.g. estimated
# probabilities or decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"max_error": max_error,
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"mean_squared_log_error": mean_squared_log_error,
"mean_pinball_loss": mean_pinball_loss,
"median_absolute_error": median_absolute_error,
"mean_absolute_percentage_error": mean_absolute_percentage_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput="variance_weighted"),
"root_mean_squared_error": root_mean_squared_error,
"root_mean_squared_log_error": root_mean_squared_log_error,
"mean_normal_deviance": partial(mean_tweedie_deviance, power=0),
"mean_poisson_deviance": mean_poisson_deviance,
"mean_gamma_deviance": mean_gamma_deviance,
"mean_compound_poisson_deviance": partial(mean_tweedie_deviance, power=1.4),
"d2_tweedie_score": partial(d2_tweedie_score, power=1.4),
"d2_pinball_score": d2_pinball_score,
# The default `alpha=0.5` (median) masks differences between quantile methods,
# so we also test `alpha=0.1` and `alpha=0.9` to ensure correctness
# for non-median quantiles.
"d2_pinball_score_01": partial(d2_pinball_score, alpha=0.1),
"d2_pinball_score_09": partial(d2_pinball_score, alpha=0.9),
"d2_absolute_error_score": d2_absolute_error_score,
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"balanced_accuracy_score": balanced_accuracy_score,
"adjusted_balanced_accuracy_score": partial(balanced_accuracy_score, adjusted=True),
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"normalized_confusion_matrix": lambda *args, **kwargs: (
confusion_matrix(*args, **kwargs).astype("float")
/ confusion_matrix(*args, **kwargs).sum(axis=1)[:, np.newaxis]
),
"multilabel_confusion_matrix": multilabel_confusion_matrix,
"multilabel_confusion_matrix_sample": partial(
multilabel_confusion_matrix, samplewise=True
),
"hamming_loss": hamming_loss,
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"jaccard_score": jaccard_score,
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"weighted_jaccard_score": partial(jaccard_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"micro_jaccard_score": partial(jaccard_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"macro_jaccard_score": partial(jaccard_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"samples_jaccard_score": partial(jaccard_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
def precision_recall_curve_padded_thresholds(*args, **kwargs):
"""
The dimensions of precision-recall pairs and the threshold array as
returned by the precision_recall_curve do not match. See
func:`sklearn.metrics.precision_recall_curve`
This prevents implicit conversion of return value triple to a higher
dimensional np.array of dtype('float64') (it will be of dtype('object)
instead). This again is needed for assert_array_equal to work correctly.
As a workaround we pad the threshold array with NaN values to match
the dimension of precision and recall arrays respectively.
"""
precision, recall, thresholds = precision_recall_curve(*args, **kwargs)
pad_threshholds = len(precision) - len(thresholds)
return np.array(
[
precision,
recall,
np.pad(
thresholds.astype(np.float64),
pad_width=(0, pad_threshholds),
mode="constant",
constant_values=[np.nan],
),
]
)
CURVE_METRICS = {
"confusion_matrix_at_thresholds": confusion_matrix_at_thresholds,
"roc_curve": roc_curve,
"precision_recall_curve": precision_recall_curve_padded_thresholds,
"det_curve": det_curve,
}
CONTINUOUS_CLASSIFICATION_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score, # default: average="macro"
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"ovr_roc_auc": partial(roc_auc_score, average="macro", multi_class="ovr"),
"weighted_ovr_roc_auc": partial(
roc_auc_score, average="weighted", multi_class="ovr"
),
"ovo_roc_auc": partial(roc_auc_score, average="macro", multi_class="ovo"),
"weighted_ovo_roc_auc": partial(
roc_auc_score, average="weighted", multi_class="ovo"
),
"partial_roc_auc": partial(roc_auc_score, max_fpr=0.5),
"average_precision_score": average_precision_score, # default: average="macro"
"weighted_average_precision_score": partial(
average_precision_score, average="weighted"
),
"samples_average_precision_score": partial(
average_precision_score, average="samples"
),
"micro_average_precision_score": partial(average_precision_score, average="micro"),
"label_ranking_average_precision_score": label_ranking_average_precision_score,
"ndcg_score": ndcg_score,
"dcg_score": dcg_score,
"top_k_accuracy_score": top_k_accuracy_score,
"d2_brier_score": d2_brier_score,
"d2_log_loss_score": d2_log_loss_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(CONTINUOUS_CLASSIFICATION_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
ALL_METRICS.update(CURVE_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = {
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"samples_jaccard_score",
"coverage_error",
"multilabel_confusion_matrix_sample",
"label_ranking_loss",
"label_ranking_average_precision_score",
"dcg_score",
"ndcg_score",
}
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = {
"micro_roc_auc",
"samples_roc_auc",
"partial_roc_auc",
"roc_auc_score",
"weighted_roc_auc",
"jaccard_score",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
# curves
"confusion_matrix_at_thresholds",
"roc_curve",
"precision_recall_curve",
"det_curve",
}
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = METRIC_UNDEFINED_BINARY.union(
METRIC_UNDEFINED_MULTICLASS
)
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = {
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
"jaccard_score",
}
# Threshold-based metrics with an "average" argument
CONTINOUS_CLASSIFICATION_METRICS_WITH_AVERAGING = {
"roc_auc_score",
"average_precision_score",
"partial_roc_auc",
}
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = {
"confusion_matrix_at_thresholds",
"roc_curve",
"precision_recall_curve",
"det_curve",
"brier_score_loss",
"d2_brier_score",
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
"jaccard_score",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
}
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = {
"confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"det_curve",
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
"jaccard_score",
"weighted_f0.5_score",
"weighted_f1_score",
"weighted_f2_score",
"weighted_precision_score",
"weighted_recall_score",
"weighted_jaccard_score",
"micro_f0.5_score",
"micro_f1_score",
"micro_f2_score",
"micro_precision_score",
"micro_recall_score",
"micro_jaccard_score",
"macro_f0.5_score",
"macro_f1_score",
"macro_f2_score",
"macro_precision_score",
"macro_recall_score",
"macro_jaccard_score",
"multilabel_confusion_matrix",
"multilabel_confusion_matrix_sample",
"cohen_kappa_score",
"log_loss",
"d2_log_loss_score",
"brier_score_loss",
"d2_brier_score",
}
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = {
"accuracy_score",
"top_k_accuracy_score",
"zero_one_loss",
}
# Threshold-based metrics with "multilabel-indicator" format support
CONTINUOUS_MULTILABEL_METRICS = {
"log_loss",
"unnormalized_log_loss",
"brier_score_loss",
"roc_auc_score",
"weighted_roc_auc",
"samples_roc_auc",
"micro_roc_auc",
"partial_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"samples_average_precision_score",
"micro_average_precision_score",
"coverage_error",
"label_ranking_loss",
"ndcg_score",
"dcg_score",
"label_ranking_average_precision_score",
"d2_log_loss_score",
"d2_brier_score",
}
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = {
"accuracy_score",
"unnormalized_accuracy_score",
"hamming_loss",
"zero_one_loss",
"unnormalized_zero_one_loss",
"weighted_f0.5_score",
"weighted_f1_score",
"weighted_f2_score",
"weighted_precision_score",
"weighted_recall_score",
"weighted_jaccard_score",
"macro_f0.5_score",
"macro_f1_score",
"macro_f2_score",
"macro_precision_score",
"macro_recall_score",
"macro_jaccard_score",
"micro_f0.5_score",
"micro_f1_score",
"micro_f2_score",
"micro_precision_score",
"micro_recall_score",
"micro_jaccard_score",
"multilabel_confusion_matrix",
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"samples_jaccard_score",
}
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = {
"mean_absolute_error",
"median_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"r2_score",
"root_mean_squared_error",
"root_mean_squared_log_error",
"explained_variance_score",
"mean_absolute_percentage_error",
"mean_pinball_loss",
"d2_pinball_score",
"d2_pinball_score_01",
"d2_pinball_score_09",
"d2_absolute_error_score",
}
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = {
"accuracy_score",
"unnormalized_accuracy_score",
"hamming_loss",
"zero_one_loss",
"unnormalized_zero_one_loss",
"micro_jaccard_score",
"macro_jaccard_score",
"jaccard_score",
"samples_jaccard_score",
"f1_score",
"micro_f1_score",
"macro_f1_score",
"weighted_recall_score",
"mean_squared_log_error",
"root_mean_squared_error",
"root_mean_squared_log_error",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score",
"micro_f1_score",
"micro_f2_score",
"micro_precision_score",
"micro_recall_score",
"matthews_corrcoef_score",
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"max_error",
# Pinball loss is only symmetric for alpha=0.5 which is the default.
"mean_pinball_loss",
"cohen_kappa_score",
"mean_normal_deviance",
}
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = {
"balanced_accuracy_score",
"adjusted_balanced_accuracy_score",
"explained_variance_score",
"r2_score",
"confusion_matrix",
"normalized_confusion_matrix",
"confusion_matrix_at_thresholds",
"roc_curve",
"precision_recall_curve",
"det_curve",
"precision_score",
"recall_score",
"f2_score",
"f0.5_score",
"weighted_f0.5_score",
"weighted_f1_score",
"weighted_f2_score",
"weighted_precision_score",
"weighted_jaccard_score",
"multilabel_confusion_matrix",
"macro_f0.5_score",
"macro_f2_score",
"macro_precision_score",
"macro_recall_score",
"hinge_loss",
"mean_gamma_deviance",
"mean_poisson_deviance",
"mean_compound_poisson_deviance",
"d2_tweedie_score",
"d2_pinball_score",
"d2_pinball_score_01",
"d2_pinball_score_09",
"d2_absolute_error_score",
"mean_absolute_percentage_error",
}
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = {
"max_error",
"ovo_roc_auc",
"weighted_ovo_roc_auc",
}
WEIGHT_SCALE_DEPENDENT_METRICS = {
# 'confusion_matrix' metrics returns absolute `tps`, `fps` etc values, which
# are scaled by weights, so will vary e.g., scaling by 3 will result in 3 * `tps`
"confusion_matrix",
"confusion_matrix_at_thresholds",
"multilabel_confusion_matrix",
"multilabel_confusion_matrix_sample",
# Metrics where we set `normalize=False`
"unnormalized_accuracy_score",
"unnormalized_zero_one_loss",
"unnormalized_log_loss",
}
METRICS_REQUIRE_POSITIVE_Y = {
"mean_poisson_deviance",
"mean_gamma_deviance",
"mean_compound_poisson_deviance",
"d2_tweedie_score",
}
# Metrics involving y = log(1+x)
METRICS_WITH_LOG1P_Y = {
"mean_squared_log_error",
"root_mean_squared_log_error",
}
def _require_positive_targets(y1, y2):
"""Make targets strictly positive"""
offset = abs(min(y1.min(), y2.min())) + 1
y1 += offset
y2 += offset
return y1, y2
def _require_log1p_targets(y1, y2):
"""Make targets strictly larger than -1"""
offset = abs(min(y1.min(), y2.min())) - 0.99
y1 = y1.astype(np.float64)
y2 = y2.astype(np.float64)
y1 += offset
y2 += offset
return y1, y2
def test_symmetry_consistency():
# We shouldn't forget any metrics
assert (
SYMMETRIC_METRICS
| NOT_SYMMETRIC_METRICS
| set(CONTINUOUS_CLASSIFICATION_METRICS)
| METRIC_UNDEFINED_BINARY_MULTICLASS
) == set(ALL_METRICS)
assert (SYMMETRIC_METRICS & NOT_SYMMETRIC_METRICS) == set()
@pytest.mark.parametrize("name", sorted(SYMMETRIC_METRICS))
def test_symmetric_metric(name):
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20,))
y_pred = random_state.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
elif name in METRICS_WITH_LOG1P_Y:
y_true, y_pred = _require_log1p_targets(y_true, y_pred)
y_true_bin = random_state.randint(0, 2, size=(20, 25))
y_pred_bin = random_state.randint(0, 2, size=(20, 25))
metric = ALL_METRICS[name]
if name in METRIC_UNDEFINED_BINARY:
if name in MULTILABELS_METRICS:
assert_allclose(
metric(y_true_bin, y_pred_bin),
metric(y_pred_bin, y_true_bin),
err_msg="%s is not symmetric" % name,
)
else:
assert False, "This case is currently unhandled"
else:
assert_allclose(
metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name,
)
@pytest.mark.parametrize("name", sorted(NOT_SYMMETRIC_METRICS))
def test_not_symmetric_metric(name):
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
metric = ALL_METRICS[name]
# The metric can be accidentally symmetric on a random draw.
# We run several random draws to check that at least of them
# gives an asymmetric result.
always_symmetric = True
for _ in range(5):
y_true = random_state.randint(0, 2, size=(20,))
y_pred = random_state.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
nominal = metric(y_true, y_pred)
swapped = metric(y_pred, y_true)
if not np.allclose(nominal, swapped):
always_symmetric = False
break
if always_symmetric:
raise ValueError(f"{name} seems to be symmetric")
def test_symmetry_tests():
# check test_symmetric_metric and test_not_symmetric_metric
sym = "accuracy_score"
not_sym = "recall_score"
# test_symmetric_metric passes on a symmetric metric
# but fails on a not symmetric metric
test_symmetric_metric(sym)
with pytest.raises(AssertionError, match=f"{not_sym} is not symmetric"):
test_symmetric_metric(not_sym)
# test_not_symmetric_metric passes on a not symmetric metric
# but fails on a symmetric metric
test_not_symmetric_metric(not_sym)
with pytest.raises(ValueError, match=f"{sym} seems to be symmetric"):
test_not_symmetric_metric(sym)
@pytest.mark.parametrize(
"name", sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)
)
def test_sample_order_invariance(name):
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20,))
y_pred = random_state.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
elif name in METRICS_WITH_LOG1P_Y:
y_true, y_pred = _require_log1p_targets(y_true, y_pred)
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
with ignore_warnings():
metric = ALL_METRICS[name]
assert_allclose(
metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name,
)
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.uniform(size=y_true.shape)
# Some metrics (e.g. log_loss) require y_score to be probabilities (sum to 1)
y_score /= y_score.sum(axis=1, keepdims=True)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(
y_true, y_pred, y_score, random_state=0
)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_allclose(
metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name,
)
for name in CONTINUOUS_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_allclose(
metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name,
)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_allclose(
metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name,
)
assert_allclose(
metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name,
)
@pytest.mark.parametrize(
"name", sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)
)
def test_format_invariance_with_1d_vectors(name):
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20,))
y2 = random_state.randint(0, 2, size=(20,))
if name in METRICS_REQUIRE_POSITIVE_Y:
y1, y2 = _require_positive_targets(y1, y2)
elif name in METRICS_WITH_LOG1P_Y:
y1, y2 = _require_log1p_targets(y1, y2)
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_array_equal(y1_1d.ndim, 1)
assert_array_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
with ignore_warnings():
metric = ALL_METRICS[name]
measure = metric(y1, y2)
assert_allclose(
metric(y1_list, y2_list),
measure,
err_msg="%s is not representation invariant with list" % name,
)
assert_allclose(
metric(y1_1d, y2_1d),
measure,
err_msg="%s is not representation invariant with np-array-1d" % name,
)
assert_allclose(
metric(y1_column, y2_column),
measure,
err_msg="%s is not representation invariant with np-array-column" % name,
)
# Mix format support
assert_allclose(
metric(y1_1d, y2_list),
measure,
err_msg="%s is not representation invariant with mix np-array-1d and list"
% name,
)
assert_allclose(
metric(y1_list, y2_1d),
measure,
err_msg="%s is not representation invariant with mix np-array-1d and list"
% name,
)
assert_allclose(
metric(y1_1d, y2_column),
measure,
err_msg=(
"%s is not representation invariant with mix "
"np-array-1d and np-array-column"
)
% name,
)
assert_allclose(
metric(y1_column, y2_1d),
measure,
err_msg=(
"%s is not representation invariant with mix "
"np-array-1d and np-array-column"
)
% name,
)
assert_allclose(
metric(y1_list, y2_column),
measure,
err_msg=(
"%s is not representation invariant with mix list and np-array-column"
)
% name,
)
assert_allclose(
metric(y1_column, y2_list),
measure,
err_msg=(
"%s is not representation invariant with mix list and np-array-column"
)
% name,
)
# These mix representations aren't allowed
with pytest.raises(ValueError):
metric(y1_1d, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_1d)
with pytest.raises(ValueError):
metric(y1_list, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_list)
with pytest.raises(ValueError):
metric(y1_column, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if name not in (
MULTIOUTPUT_METRICS | CONTINUOUS_MULTILABEL_METRICS | MULTILABELS_METRICS
):
if "roc_auc" in name:
# for consistency between the `roc_cuve` and `roc_auc_score`
# np.nan is returned and an `UndefinedMetricWarning` is raised
with pytest.warns(UndefinedMetricWarning):
assert math.isnan(metric(y1_row, y2_row))
else:
with pytest.raises(ValueError):
metric(y1_row, y2_row)
CLASSIFICATION_METRICS_REPORT = {
**CLASSIFICATION_METRICS,
"classification_report": classification_report,
}
@pytest.mark.parametrize("metric", CLASSIFICATION_METRICS_REPORT.values())
def test_classification_metrics_raise_on_empty_input(metric):
msg = "Found empty input array (e.g., `y_true` or `y_pred`) while a minimum of 1"
with pytest.raises(ValueError, match=re.escape(msg)):
metric(np.array([]), np.array([]))
@pytest.mark.parametrize("metric", CLASSIFICATION_METRICS.values())
def test_classification_with_invalid_sample_weight(metric):
# Check invalid `sample_weight` raises correct error
random_state = check_random_state(0)
n_samples = 20
y1 = random_state.randint(0, 2, size=(n_samples,))
y2 = random_state.randint(0, 2, size=(n_samples,))
sample_weight = random_state.random_sample(size=(n_samples - 1,))
with pytest.raises(ValueError, match="Found input variables with inconsistent"):
metric(y1, y2, sample_weight=sample_weight)
sample_weight = random_state.random_sample(size=(n_samples,))
sample_weight[0] = np.inf
with pytest.raises(ValueError, match="Input sample_weight contains infinity"):
metric(y1, y2, sample_weight=sample_weight)
sample_weight[0] = np.nan
with pytest.raises(ValueError, match="Input sample_weight contains NaN"):
metric(y1, y2, sample_weight=sample_weight)
sample_weight = np.array([1 + 2j, 3 + 4j, 5 + 7j])
with pytest.raises(ValueError, match="Complex data not supported"):
metric(y1[:3], y2[:3], sample_weight=sample_weight)
sample_weight = random_state.random_sample(size=(n_samples * 2,)).reshape(
(n_samples, 2)
)
with pytest.raises(ValueError, match="Sample weights must be 1D array or scalar"):
metric(y1, y2, sample_weight=sample_weight)
@pytest.mark.parametrize(
"name", sorted(set(CLASSIFICATION_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS)
)
def test_classification_invariance_string_vs_numbers_labels(name):
# Ensure that classification metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20,))
y2 = random_state.randint(0, 2, size=(20,))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
with ignore_warnings():
metric = CLASSIFICATION_METRICS[name]
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(
measure_with_number,
measure_with_str,
err_msg="{0} failed string vs number invariance test".format(name),
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/tests/test_score_objects.py | sklearn/metrics/tests/test_score_objects.py | import numbers
import pickle
import re
from copy import deepcopy
from functools import partial
import joblib
import numpy as np
import pytest
from numpy.testing import assert_allclose
from sklearn import config_context
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.cluster import KMeans
from sklearn.datasets import (
load_diabetes,
make_blobs,
make_classification,
make_multilabel_classification,
make_regression,
)
from sklearn.linear_model import LogisticRegression, Perceptron, Ridge
from sklearn.metrics import (
accuracy_score,
average_precision_score,
balanced_accuracy_score,
brier_score_loss,
check_scoring,
f1_score,
fbeta_score,
get_scorer,
get_scorer_names,
jaccard_score,
log_loss,
make_scorer,
matthews_corrcoef,
precision_score,
r2_score,
recall_score,
roc_auc_score,
top_k_accuracy_score,
)
from sklearn.metrics import cluster as cluster_module
from sklearn.metrics._scorer import (
_check_multimetric_scoring,
_CurveScorer,
_MultimetricScorer,
_PassthroughScorer,
_Scorer,
)
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tests.metadata_routing_common import (
assert_request_is_empty,
)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_equal,
ignore_warnings,
)
from sklearn.utils.metadata_routing import MetadataRouter, MethodMapping
REGRESSION_SCORERS = [
"d2_absolute_error_score",
"explained_variance",
"r2",
"neg_mean_absolute_error",
"neg_mean_squared_error",
"neg_mean_absolute_percentage_error",
"neg_mean_squared_log_error",
"neg_median_absolute_error",
"neg_root_mean_squared_error",
"neg_root_mean_squared_log_error",
"mean_absolute_error",
"mean_absolute_percentage_error",
"mean_squared_error",
"median_absolute_error",
"neg_max_error",
"neg_mean_poisson_deviance",
"neg_mean_gamma_deviance",
]
CLF_SCORERS = [
"accuracy",
"balanced_accuracy",
"d2_brier_score",
"d2_log_loss_score",
"top_k_accuracy",
"f1",
"f1_weighted",
"f1_macro",
"f1_micro",
"roc_auc",
"average_precision",
"precision",
"precision_weighted",
"precision_macro",
"precision_micro",
"recall",
"recall_weighted",
"recall_macro",
"recall_micro",
"neg_log_loss",
"neg_brier_score",
"jaccard",
"jaccard_weighted",
"jaccard_macro",
"jaccard_micro",
"roc_auc_ovr",
"roc_auc_ovo",
"roc_auc_ovr_weighted",
"roc_auc_ovo_weighted",
"matthews_corrcoef",
"positive_likelihood_ratio",
"neg_negative_likelihood_ratio",
]
# All supervised cluster scorers (They behave like classification metric)
CLUSTER_SCORERS = [
"adjusted_rand_score",
"rand_score",
"homogeneity_score",
"completeness_score",
"v_measure_score",
"mutual_info_score",
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"fowlkes_mallows_score",
]
MULTILABEL_ONLY_SCORERS = [
"precision_samples",
"recall_samples",
"f1_samples",
"jaccard_samples",
]
REQUIRE_POSITIVE_Y_SCORERS = ["neg_mean_poisson_deviance", "neg_mean_gamma_deviance"]
def _require_positive_y(y):
"""Make targets strictly positive"""
offset = abs(y.min()) + 1
y = y + offset
return y
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DecisionTreeRegressor(random_state=0)
# some of the regressions scorers require strictly positive input.
sensible_regr.fit(X_train, _require_positive_y(y_train))
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS]
+ [(name, sensible_clf) for name in CLF_SCORERS]
+ [(name, sensible_clf) for name in CLUSTER_SCORERS]
+ [(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
@pytest.fixture(scope="module")
def memmap_data_and_estimators(tmp_path_factory):
temp_folder = tmp_path_factory.mktemp("sklearn_test_score_objects")
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0], random_state=0)
filename = temp_folder / "test_data.pkl"
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode="r")
estimators = _make_estimators(X_mm, y_mm, y_ml_mm)
yield X_mm, y_mm, y_ml_mm, estimators
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(BaseEstimator):
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(BaseEstimator):
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer:
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name in get_scorer_names():
repr(get_scorer(name))
def test_repr_partial():
metric = partial(precision_score, pos_label=1)
scorer = make_scorer(metric)
pattern = (
"functools\\.partial\\(<function\\ precision_score\\ at\\ .*>,\\ pos_label=1\\)"
)
assert re.search(pattern, repr(scorer))
def check_scoring_validator_for_single_metric_usecases(scoring_validator):
# Test all branches of single metric usecases
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = scoring_validator(estimator)
assert isinstance(scorer, _PassthroughScorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (
r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\."
)
with pytest.raises(TypeError, match=pattern):
scoring_validator(estimator)
scorer = scoring_validator(estimator, scoring="accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = scoring_validator(estimator, scoring="accuracy")
assert isinstance(scorer, _Scorer)
assert scorer._response_method == "predict"
# Test the allow_none parameter for check_scoring alone
if scoring_validator is check_scoring:
estimator = EstimatorWithFit()
scorer = scoring_validator(estimator, allow_none=True)
assert scorer is None
@pytest.mark.parametrize(
"scoring",
(
("accuracy",),
["precision"],
{"acc": "accuracy", "precision": "precision"},
("accuracy", "precision"),
["precision", "accuracy"],
{
"accuracy": make_scorer(accuracy_score),
"precision": make_scorer(precision_score),
},
),
ids=[
"single_tuple",
"single_list",
"dict_str",
"multi_tuple",
"multi_list",
"dict_callable",
],
)
def test_check_scoring_and_check_multimetric_scoring(scoring):
check_scoring_validator_for_single_metric_usecases(check_scoring)
# To make sure the check_scoring is correctly applied to the constituent
# scorers
estimator = LinearSVC(random_state=0)
estimator.fit([[1], [2], [3]], [1, 1, 0])
scorers = _check_multimetric_scoring(estimator, scoring)
assert isinstance(scorers, dict)
assert sorted(scorers.keys()) == sorted(list(scoring))
assert all([isinstance(scorer, _Scorer) for scorer in list(scorers.values())])
assert all(scorer._response_method == "predict" for scorer in scorers.values())
if "acc" in scoring:
assert_almost_equal(
scorers["acc"](estimator, [[1], [2], [3]], [1, 0, 0]), 2.0 / 3.0
)
if "accuracy" in scoring:
assert_almost_equal(
scorers["accuracy"](estimator, [[1], [2], [3]], [1, 0, 0]), 2.0 / 3.0
)
if "precision" in scoring:
assert_almost_equal(
scorers["precision"](estimator, [[1], [2], [3]], [1, 0, 0]), 0.5
)
@pytest.mark.parametrize(
"scoring, msg",
[
(
(make_scorer(precision_score), make_scorer(accuracy_score)),
"One or more of the elements were callables",
),
([5], "Non-string types were found"),
((make_scorer(precision_score),), "One or more of the elements were callables"),
((), "Empty list was given"),
(("f1", "f1"), "Duplicate elements were found"),
({4: "accuracy"}, "Non-string types were found in the keys"),
({}, "An empty dict was passed"),
],
ids=[
"tuple of callables",
"list of int",
"tuple of one callable",
"empty tuple",
"non-unique str",
"non-string key dict",
"empty dict",
],
)
def test_check_scoring_and_check_multimetric_scoring_errors(scoring, msg):
# Make sure it raises errors when scoring parameter is not valid.
# More weird corner cases are tested at test_validation.py
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
with pytest.raises(ValueError, match=msg):
_check_multimetric_scoring(estimator, scoring=scoring)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={"C": [0.1, 1]}, cv=3)
scorer = check_scoring(grid, scoring="f1")
assert isinstance(scorer, _Scorer)
assert scorer._response_method == "predict"
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, scoring="f1")
assert isinstance(scorer, _Scorer)
assert scorer._response_method == "predict"
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(
EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1], scoring=DummyScorer(), cv=3
)
assert_array_equal(scores, 1)
@pytest.mark.parametrize(
"scorer_name, metric",
[
("f1", f1_score),
("f1_weighted", partial(f1_score, average="weighted")),
("f1_macro", partial(f1_score, average="macro")),
("f1_micro", partial(f1_score, average="micro")),
("precision", precision_score),
("precision_weighted", partial(precision_score, average="weighted")),
("precision_macro", partial(precision_score, average="macro")),
("precision_micro", partial(precision_score, average="micro")),
("recall", recall_score),
("recall_weighted", partial(recall_score, average="weighted")),
("recall_macro", partial(recall_score, average="macro")),
("recall_micro", partial(recall_score, average="micro")),
("jaccard", jaccard_score),
("jaccard_weighted", partial(jaccard_score, average="weighted")),
("jaccard_macro", partial(jaccard_score, average="macro")),
("jaccard_micro", partial(jaccard_score, average="micro")),
("top_k_accuracy", top_k_accuracy_score),
("matthews_corrcoef", matthews_corrcoef),
],
)
def test_classification_binary_scores(scorer_name, metric):
# check consistency between score and scorer for scores supporting
# binary classification.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
score = get_scorer(scorer_name)(clf, X_test, y_test)
expected_score = metric(y_test, clf.predict(X_test))
assert_almost_equal(score, expected_score)
@pytest.mark.parametrize(
"scorer_name, metric",
[
("accuracy", accuracy_score),
("balanced_accuracy", balanced_accuracy_score),
("f1_weighted", partial(f1_score, average="weighted")),
("f1_macro", partial(f1_score, average="macro")),
("f1_micro", partial(f1_score, average="micro")),
("precision_weighted", partial(precision_score, average="weighted")),
("precision_macro", partial(precision_score, average="macro")),
("precision_micro", partial(precision_score, average="micro")),
("recall_weighted", partial(recall_score, average="weighted")),
("recall_macro", partial(recall_score, average="macro")),
("recall_micro", partial(recall_score, average="micro")),
("jaccard_weighted", partial(jaccard_score, average="weighted")),
("jaccard_macro", partial(jaccard_score, average="macro")),
("jaccard_micro", partial(jaccard_score, average="micro")),
],
)
def test_classification_multiclass_scores(scorer_name, metric):
# check consistency between score and scorer for scores supporting
# multiclass classification.
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=30, random_state=0
)
# use `stratify` = y to ensure train and test sets capture all classes
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=0, stratify=y
)
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X_train, y_train)
score = get_scorer(scorer_name)(clf, X_test, y_test)
expected_score = metric(y_test, clf.predict(X_test))
assert score == pytest.approx(expected_score)
def test_custom_scorer_pickling():
# test that custom scorer can be pickled
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score2 = unpickled_scorer(clf, X_test, y_test)
assert score1 == pytest.approx(score2)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer("r2")(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer("roc_auc")(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer("neg_log_loss")(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer("roc_auc")(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
err_msg = "DecisionTreeRegressor has none of the following attributes"
with pytest.raises(AttributeError, match=err_msg):
get_scorer("roc_auc")(reg, X_test, y_test)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
with pytest.raises(ValueError, match="multi_class must be in \\('ovo', 'ovr'\\)"):
get_scorer("roc_auc")(clf, X_test, y_test)
# test error is raised with a single class present in model
# (predict_proba shape is not suitable for binary auc)
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = DecisionTreeClassifier()
clf.fit(X_train, np.zeros_like(y_train))
with pytest.raises(ValueError, match="need classifier with two classes"):
get_scorer("roc_auc")(clf, X_test, y_test)
# for proba scorers
with pytest.raises(ValueError, match="need classifier with two classes"):
get_scorer("neg_log_loss")(clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer("roc_auc")(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack([p[:, -1] for p in y_proba]).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer("roc_auc")(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer("roc_auc")(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_supervised_cluster_scorers():
# Test clustering scorers against gold standard labeling.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3, n_init="auto")
km.fit(X_train)
for name in CLUSTER_SCORERS:
score1 = get_scorer(name)(km, X_test, y_test)
score2 = getattr(cluster_module, name)(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
with pytest.raises(ValueError):
cross_val_score(clf, X, y, scoring=f1_scorer_no_average)
grid_search = GridSearchCV(
clf, scoring=f1_scorer_no_average, param_grid={"max_depth": [1, 2]}
)
with pytest.raises(ValueError):
grid_search.fit(X, y)
def test_classification_scorer_sample_weight():
# Test that classification scorers support sample_weight or raise sensible
# errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0], random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name in get_scorer_names():
scorer = get_scorer(name)
if name in REGRESSION_SCORERS:
# skip the regression scores
continue
if name == "top_k_accuracy":
# in the binary case k > 1 will always lead to a perfect score
scorer._kwargs = {"k": 1}
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(
estimator[name], X_test, target, sample_weight=sample_weight
)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
# this should not raise. sample_weight should be ignored if None.
_ = scorer(estimator[name], X_test[:10], target[:10], sample_weight=None)
assert weighted != unweighted, (
f"scorer {name} behaves identically when called with "
f"sample weights: {weighted} vs {unweighted}"
)
assert_almost_equal(
weighted,
ignored,
err_msg=(
f"scorer {name} behaves differently "
"when ignoring samples and setting "
f"sample_weight to 0: {weighted} vs {ignored}"
),
)
except TypeError as e:
assert "sample_weight" in str(e), (
f"scorer {name} raises unhelpful exception when called "
f"with sample weights: {e}"
)
def test_regression_scorer_sample_weight():
# Test that regression scorers support sample_weight or raise sensible
# errors
# Odd number of test samples req for neg_median_absolute_error
X, y = make_regression(n_samples=101, n_features=20, random_state=0)
y = _require_positive_y(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
sample_weight = np.ones_like(y_test)
# Odd number req for neg_median_absolute_error
sample_weight[:11] = 0
reg = DecisionTreeRegressor(random_state=0)
reg.fit(X_train, y_train)
for name in get_scorer_names():
scorer = get_scorer(name)
if name not in REGRESSION_SCORERS:
# skip classification scorers
continue
try:
weighted = scorer(reg, X_test, y_test, sample_weight=sample_weight)
ignored = scorer(reg, X_test[11:], y_test[11:])
unweighted = scorer(reg, X_test, y_test)
assert weighted != unweighted, (
f"scorer {name} behaves identically when called with "
f"sample weights: {weighted} vs {unweighted}"
)
assert_almost_equal(
weighted,
ignored,
err_msg=(
f"scorer {name} behaves differently "
"when ignoring samples and setting "
f"sample_weight to 0: {weighted} vs {ignored}"
),
)
except TypeError as e:
assert "sample_weight" in str(e), (
f"scorer {name} raises unhelpful exception when called "
f"with sample weights: {e}"
)
@pytest.mark.parametrize("name", get_scorer_names())
def test_scorer_memmap_input(name, memmap_data_and_estimators):
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
X_mm, y_mm, y_ml_mm, estimators = memmap_data_and_estimators
if name in REQUIRE_POSITIVE_Y_SCORERS:
y_mm_1 = _require_positive_y(y_mm)
y_ml_mm_1 = _require_positive_y(y_ml_mm)
else:
y_mm_1, y_ml_mm_1 = y_mm, y_ml_mm
# UndefinedMetricWarning for P / R scores
with ignore_warnings():
scorer, estimator = get_scorer(name), estimators[name]
if name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm_1)
else:
score = scorer(estimator, X_mm, y_mm_1)
assert isinstance(score, numbers.Number), name
def test_scoring_is_not_metric():
with pytest.raises(ValueError, match="make_scorer"):
check_scoring(LogisticRegression(), scoring=f1_score)
with pytest.raises(ValueError, match="make_scorer"):
check_scoring(LogisticRegression(), scoring=roc_auc_score)
with pytest.raises(ValueError, match="make_scorer"):
check_scoring(Ridge(), scoring=r2_score)
with pytest.raises(ValueError, match="make_scorer"):
check_scoring(KMeans(), scoring=cluster_module.adjusted_rand_score)
with pytest.raises(ValueError, match="make_scorer"):
check_scoring(KMeans(), scoring=cluster_module.rand_score)
@pytest.mark.parametrize(
(
"scorers,expected_predict_count,"
"expected_predict_proba_count,expected_decision_func_count"
),
[
(
{
"a1": "accuracy",
"a2": "accuracy",
"ll1": "neg_log_loss",
"ll2": "neg_log_loss",
"ra1": "roc_auc",
"ra2": "roc_auc",
},
1,
1,
1,
),
(["roc_auc", "accuracy"], 1, 0, 1),
(["neg_log_loss", "accuracy"], 1, 1, 0),
],
)
def test_multimetric_scorer_calls_method_once(
scorers,
expected_predict_count,
expected_predict_proba_count,
expected_decision_func_count,
):
X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])
pos_proba = np.random.rand(X.shape[0])
proba = np.c_[1 - pos_proba, pos_proba]
class MyClassifier(ClassifierMixin, BaseEstimator):
def __init__(self):
self._expected_predict_count = 0
self._expected_predict_proba_count = 0
self._expected_decision_function_count = 0
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
self._expected_predict_count += 1
return y
def predict_proba(self, X):
self._expected_predict_proba_count += 1
return proba
def decision_function(self, X):
self._expected_decision_function_count += 1
return pos_proba
mock_est = MyClassifier().fit(X, y)
scorer_dict = _check_multimetric_scoring(LogisticRegression(), scorers)
multi_scorer = _MultimetricScorer(scorers=scorer_dict)
results = multi_scorer(mock_est, X, y)
assert set(scorers) == set(results) # compare dict keys
assert mock_est._expected_predict_count == expected_predict_count
assert mock_est._expected_predict_proba_count == expected_predict_proba_count
assert mock_est._expected_decision_function_count == expected_decision_func_count
@pytest.mark.parametrize(
"scorers",
[
(["roc_auc", "neg_log_loss"]),
(
{
"roc_auc": make_scorer(
roc_auc_score,
response_method=["predict_proba", "decision_function"],
),
"neg_log_loss": make_scorer(log_loss, response_method="predict_proba"),
}
),
],
)
def test_multimetric_scorer_calls_method_once_classifier_no_decision(scorers):
predict_proba_call_cnt = 0
class MockKNeighborsClassifier(KNeighborsClassifier):
def predict_proba(self, X):
nonlocal predict_proba_call_cnt
predict_proba_call_cnt += 1
return super().predict_proba(X)
X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])
# no decision function
clf = MockKNeighborsClassifier(n_neighbors=1)
clf.fit(X, y)
scorer_dict = _check_multimetric_scoring(clf, scorers)
scorer = _MultimetricScorer(scorers=scorer_dict)
scorer(clf, X, y)
assert predict_proba_call_cnt == 1
def test_multimetric_scorer_calls_method_once_regressor_threshold():
predict_called_cnt = 0
class MockDecisionTreeRegressor(DecisionTreeRegressor):
def predict(self, X):
nonlocal predict_called_cnt
predict_called_cnt += 1
return super().predict(X)
X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])
# no decision function
clf = MockDecisionTreeRegressor()
clf.fit(X, y)
scorers = {"neg_mse": "neg_mean_squared_error", "r2": "r2"}
scorer_dict = _check_multimetric_scoring(clf, scorers)
scorer = _MultimetricScorer(scorers=scorer_dict)
scorer(clf, X, y)
assert predict_called_cnt == 1
def test_multimetric_scorer_sanity_check():
# scoring dictionary returned is the same as calling each scorer separately
scorers = {
"a1": "accuracy",
"a2": "accuracy",
"ll1": "neg_log_loss",
"ll2": "neg_log_loss",
"ra1": "roc_auc",
"ra2": "roc_auc",
}
X, y = make_classification(random_state=0)
clf = DecisionTreeClassifier()
clf.fit(X, y)
scorer_dict = _check_multimetric_scoring(clf, scorers)
multi_scorer = _MultimetricScorer(scorers=scorer_dict)
result = multi_scorer(clf, X, y)
separate_scores = {
name: get_scorer(name)(clf, X, y)
for name in ["accuracy", "neg_log_loss", "roc_auc"]
}
for key, value in result.items():
score_name = scorers[key]
assert_allclose(value, separate_scores[score_name])
@pytest.mark.parametrize("raise_exc", [True, False])
def test_multimetric_scorer_exception_handling(raise_exc):
"""Check that the calling of the `_MultimetricScorer` returns
exception messages in the result dict for the failing scorers
in case of `raise_exc` is `False` and if `raise_exc` is `True`,
then the proper exception is raised.
"""
scorers = {
"failing_1": "neg_mean_squared_log_error",
"non_failing": "neg_median_absolute_error",
"failing_2": "neg_mean_squared_log_error",
}
X, y = make_classification(
n_samples=50, n_features=2, n_redundant=0, random_state=0
)
# neg_mean_squared_log_error fails if y contains values less than or equal to -1
y *= -1
clf = DecisionTreeClassifier().fit(X, y)
scorer_dict = _check_multimetric_scoring(clf, scorers)
multi_scorer = _MultimetricScorer(scorers=scorer_dict, raise_exc=raise_exc)
error_msg = (
"Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
if raise_exc:
with pytest.raises(ValueError, match=error_msg):
multi_scorer(clf, X, y)
else:
result = multi_scorer(clf, X, y)
exception_message_1 = result["failing_1"]
score = result["non_failing"]
exception_message_2 = result["failing_2"]
assert isinstance(exception_message_1, str) and error_msg in exception_message_1
assert isinstance(score, float)
assert isinstance(exception_message_2, str) and error_msg in exception_message_2
@pytest.mark.parametrize(
"scorer_name, metric",
[
("roc_auc_ovr", partial(roc_auc_score, multi_class="ovr")),
("roc_auc_ovo", partial(roc_auc_score, multi_class="ovo")),
(
"roc_auc_ovr_weighted",
partial(roc_auc_score, multi_class="ovr", average="weighted"),
),
(
"roc_auc_ovo_weighted",
partial(roc_auc_score, multi_class="ovo", average="weighted"),
),
],
)
def test_multiclass_roc_proba_scorer(scorer_name, metric):
scorer = get_scorer(scorer_name)
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=20, random_state=0
)
lr = LogisticRegression().fit(X, y)
y_proba = lr.predict_proba(X)
expected_score = metric(y, y_proba)
assert scorer(lr, X, y) == pytest.approx(expected_score)
def test_multiclass_roc_proba_scorer_label():
scorer = make_scorer(
roc_auc_score,
multi_class="ovo",
labels=[0, 1, 2],
response_method="predict_proba",
)
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=20, random_state=0
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/tests/test_dist_metrics.py | sklearn/metrics/tests/test_dist_metrics.py | import copy
import itertools
import pickle
import numpy as np
import pytest
from scipy.spatial.distance import cdist
from sklearn.metrics import DistanceMetric
from sklearn.metrics._dist_metrics import (
BOOL_METRICS,
DEPRECATED_METRICS,
DistanceMetric32,
DistanceMetric64,
)
from sklearn.utils import check_random_state
from sklearn.utils._testing import (
assert_allclose,
create_memmap_backed_data,
ignore_warnings,
)
from sklearn.utils.fixes import CSR_CONTAINERS
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1.0 / p)
rng = check_random_state(0)
d = 4
n1 = 20
n2 = 25
X64 = rng.random_sample((n1, d))
Y64 = rng.random_sample((n2, d))
X32 = X64.astype("float32")
Y32 = Y64.astype("float32")
[X_mmap, Y_mmap] = create_memmap_backed_data([X64, Y64])
# make boolean arrays: ones and zeros
X_bool = (X64 < 0.3).astype(np.float64) # quite sparse
Y_bool = (Y64 < 0.7).astype(np.float64) # not too sparse
[X_bool_mmap, Y_bool_mmap] = create_memmap_backed_data([X_bool, Y_bool])
V = rng.random_sample((d, d))
VI = np.dot(V, V.T)
METRICS_DEFAULT_PARAMS = [
("euclidean", {}),
("cityblock", {}),
("minkowski", dict(p=(0.5, 1, 1.5, 2, 3))),
("chebyshev", {}),
("seuclidean", dict(V=(rng.random_sample(d),))),
("mahalanobis", dict(VI=(VI,))),
("hamming", {}),
("canberra", {}),
("braycurtis", {}),
("minkowski", dict(p=(0.5, 1, 1.5, 3), w=(rng.random_sample(d),))),
]
@pytest.mark.parametrize(
"metric_param_grid", METRICS_DEFAULT_PARAMS, ids=lambda params: params[0]
)
@pytest.mark.parametrize("X, Y", [(X64, Y64), (X32, Y32), (X_mmap, Y_mmap)])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_cdist(metric_param_grid, X, Y, csr_container):
metric, param_grid = metric_param_grid
keys = param_grid.keys()
X_csr, Y_csr = csr_container(X), csr_container(Y)
for vals in itertools.product(*param_grid.values()):
kwargs = dict(zip(keys, vals))
rtol_dict = {}
if metric == "mahalanobis" and X.dtype == np.float32:
# Computation of mahalanobis differs between
# the scipy and scikit-learn implementation.
# Hence, we increase the relative tolerance.
# TODO: Inspect slight numerical discrepancy
# with scipy
rtol_dict = {"rtol": 1e-6}
D_scipy_cdist = cdist(X, Y, metric, **kwargs)
dm = DistanceMetric.get_metric(metric, X.dtype, **kwargs)
# DistanceMetric.pairwise must be consistent for all
# combinations of formats in {sparse, dense}.
D_sklearn = dm.pairwise(X, Y)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_scipy_cdist, **rtol_dict)
D_sklearn = dm.pairwise(X_csr, Y_csr)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_scipy_cdist, **rtol_dict)
D_sklearn = dm.pairwise(X_csr, Y)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_scipy_cdist, **rtol_dict)
D_sklearn = dm.pairwise(X, Y_csr)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_scipy_cdist, **rtol_dict)
@pytest.mark.parametrize("metric", BOOL_METRICS)
@pytest.mark.parametrize(
"X_bool, Y_bool", [(X_bool, Y_bool), (X_bool_mmap, Y_bool_mmap)]
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_cdist_bool_metric(metric, X_bool, Y_bool, csr_container):
if metric in DEPRECATED_METRICS:
with ignore_warnings(category=DeprecationWarning):
# Some metrics can be deprecated depending on the scipy version.
# But if they are present, we still want to test whether
# scikit-learn gives the same result, whether or not they are
# deprecated.
D_scipy_cdist = cdist(X_bool, Y_bool, metric)
else:
D_scipy_cdist = cdist(X_bool, Y_bool, metric)
dm = DistanceMetric.get_metric(metric)
D_sklearn = dm.pairwise(X_bool, Y_bool)
assert_allclose(D_sklearn, D_scipy_cdist)
# DistanceMetric.pairwise must be consistent
# on all combinations of format in {sparse, dense}².
X_bool_csr, Y_bool_csr = csr_container(X_bool), csr_container(Y_bool)
D_sklearn = dm.pairwise(X_bool, Y_bool)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_scipy_cdist)
D_sklearn = dm.pairwise(X_bool_csr, Y_bool_csr)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_scipy_cdist)
D_sklearn = dm.pairwise(X_bool, Y_bool_csr)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_scipy_cdist)
D_sklearn = dm.pairwise(X_bool_csr, Y_bool)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_scipy_cdist)
@pytest.mark.parametrize(
"metric_param_grid", METRICS_DEFAULT_PARAMS, ids=lambda params: params[0]
)
@pytest.mark.parametrize("X", [X64, X32, X_mmap])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_pdist(metric_param_grid, X, csr_container):
metric, param_grid = metric_param_grid
keys = param_grid.keys()
X_csr = csr_container(X)
for vals in itertools.product(*param_grid.values()):
kwargs = dict(zip(keys, vals))
rtol_dict = {}
if metric == "mahalanobis" and X.dtype == np.float32:
# Computation of mahalanobis differs between
# the scipy and scikit-learn implementation.
# Hence, we increase the relative tolerance.
# TODO: Inspect slight numerical discrepancy
# with scipy
rtol_dict = {"rtol": 1e-6}
D_scipy_pdist = cdist(X, X, metric, **kwargs)
dm = DistanceMetric.get_metric(metric, X.dtype, **kwargs)
D_sklearn = dm.pairwise(X)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_scipy_pdist, **rtol_dict)
D_sklearn_csr = dm.pairwise(X_csr)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn_csr, D_scipy_pdist, **rtol_dict)
D_sklearn_csr = dm.pairwise(X_csr, X_csr)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn_csr, D_scipy_pdist, **rtol_dict)
@pytest.mark.parametrize(
"metric_param_grid", METRICS_DEFAULT_PARAMS, ids=lambda params: params[0]
)
def test_distance_metrics_dtype_consistency(metric_param_grid):
# DistanceMetric must return similar distances for both float32 and float64
# input data.
metric, param_grid = metric_param_grid
keys = param_grid.keys()
# Choose rtol to make sure that this test is robust to changes in the random
# seed in the module-level test data generation code.
rtol = 1e-5
for vals in itertools.product(*param_grid.values()):
kwargs = dict(zip(keys, vals))
dm64 = DistanceMetric.get_metric(metric, np.float64, **kwargs)
dm32 = DistanceMetric.get_metric(metric, np.float32, **kwargs)
D64 = dm64.pairwise(X64)
D32 = dm32.pairwise(X32)
assert D64.dtype == np.float64
assert D32.dtype == np.float32
# assert_allclose introspects the dtype of the input arrays to decide
# which rtol value to use by default but in this case we know that D32
# is not computed with the same precision so we set rtol manually.
assert_allclose(D64, D32, rtol=rtol)
D64 = dm64.pairwise(X64, Y64)
D32 = dm32.pairwise(X32, Y32)
assert_allclose(D64, D32, rtol=rtol)
@pytest.mark.parametrize("metric", BOOL_METRICS)
@pytest.mark.parametrize("X_bool", [X_bool, X_bool_mmap])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_pdist_bool_metrics(metric, X_bool, csr_container):
if metric in DEPRECATED_METRICS:
with ignore_warnings(category=DeprecationWarning):
# Some metrics can be deprecated depending on the scipy version.
# But if they are present, we still want to test whether
# scikit-learn gives the same result, whether or not they are
# deprecated.
D_scipy_pdist = cdist(X_bool, X_bool, metric)
else:
D_scipy_pdist = cdist(X_bool, X_bool, metric)
dm = DistanceMetric.get_metric(metric)
D_sklearn = dm.pairwise(X_bool)
assert_allclose(D_sklearn, D_scipy_pdist)
X_bool_csr = csr_container(X_bool)
D_sklearn = dm.pairwise(X_bool_csr)
assert_allclose(D_sklearn, D_scipy_pdist)
@pytest.mark.parametrize("writable_kwargs", [True, False])
@pytest.mark.parametrize(
"metric_param_grid", METRICS_DEFAULT_PARAMS, ids=lambda params: params[0]
)
@pytest.mark.parametrize("X", [X64, X32])
def test_pickle(writable_kwargs, metric_param_grid, X):
metric, param_grid = metric_param_grid
keys = param_grid.keys()
for vals in itertools.product(*param_grid.values()):
if any(isinstance(val, np.ndarray) for val in vals):
vals = copy.deepcopy(vals)
for val in vals:
if isinstance(val, np.ndarray):
val.setflags(write=writable_kwargs)
kwargs = dict(zip(keys, vals))
dm = DistanceMetric.get_metric(metric, X.dtype, **kwargs)
D1 = dm.pairwise(X)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(X)
assert_allclose(D1, D2)
@pytest.mark.parametrize("metric", BOOL_METRICS)
@pytest.mark.parametrize("X_bool", [X_bool, X_bool_mmap])
def test_pickle_bool_metrics(metric, X_bool):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(X_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(X_bool)
assert_allclose(D1, D2)
@pytest.mark.parametrize("X, Y", [(X64, Y64), (X32, Y32), (X_mmap, Y_mmap)])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_haversine_metric(X, Y, csr_container):
# The Haversine DistanceMetric only works on 2 features.
X = np.asarray(X[:, :2])
Y = np.asarray(Y[:, :2])
X_csr, Y_csr = csr_container(X), csr_container(Y)
# Haversine is not supported by scipy.special.distance.{cdist,pdist}
# So we reimplement it to have a reference.
def haversine_slow(x1, x2):
return 2 * np.arcsin(
np.sqrt(
np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) * np.sin(0.5 * (x1[1] - x2[1])) ** 2
)
)
D_reference = np.zeros((X_csr.shape[0], Y_csr.shape[0]))
for i, xi in enumerate(X):
for j, yj in enumerate(Y):
D_reference[i, j] = haversine_slow(xi, yj)
haversine = DistanceMetric.get_metric("haversine", X.dtype)
D_sklearn = haversine.pairwise(X, Y)
assert_allclose(
haversine.dist_to_rdist(D_sklearn), np.sin(0.5 * D_reference) ** 2, rtol=1e-6
)
assert_allclose(D_sklearn, D_reference)
D_sklearn = haversine.pairwise(X_csr, Y_csr)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_reference)
D_sklearn = haversine.pairwise(X_csr, Y)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_reference)
D_sklearn = haversine.pairwise(X, Y_csr)
assert D_sklearn.flags.c_contiguous
assert_allclose(D_sklearn, D_reference)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_allclose(D1, D2)
assert_allclose(D1_pkl, D2_pkl)
def test_input_data_size():
# Regression test for #6288
# Previously, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = check_random_state(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=custom_metric)
eucl = DistanceMetric.get_metric("euclidean")
assert_allclose(pyfunc.pairwise(X), eucl.pairwise(X) ** 2)
def test_readonly_kwargs():
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/21685
rng = check_random_state(0)
weights = rng.rand(100)
VI = rng.rand(10, 10)
weights.setflags(write=False)
VI.setflags(write=False)
# Those distances metrics have to support readonly buffers.
DistanceMetric.get_metric("seuclidean", V=weights)
DistanceMetric.get_metric("mahalanobis", VI=VI)
@pytest.mark.parametrize(
"w, err_type, err_msg",
[
(np.array([1, 1.5, -13]), ValueError, "w cannot contain negative weights"),
(np.array([1, 1.5, np.nan]), ValueError, "w contains NaN"),
*[
(
csr_container([[1, 1.5, 1]]),
TypeError,
"Sparse data was passed for w, but dense data is required",
)
for csr_container in CSR_CONTAINERS
],
(np.array(["a", "b", "c"]), ValueError, "could not convert string to float"),
(np.array([]), ValueError, "a minimum of 1 is required"),
],
)
def test_minkowski_metric_validate_weights_values(w, err_type, err_msg):
with pytest.raises(err_type, match=err_msg):
DistanceMetric.get_metric("minkowski", p=3, w=w)
def test_minkowski_metric_validate_weights_size():
w2 = rng.random_sample(d + 1)
dm = DistanceMetric.get_metric("minkowski", p=3, w=w2)
msg = (
"MinkowskiDistance: the size of w must match "
f"the number of features \\({X64.shape[1]}\\). "
f"Currently len\\(w\\)={w2.shape[0]}."
)
with pytest.raises(ValueError, match=msg):
dm.pairwise(X64, Y64)
@pytest.mark.parametrize("metric, metric_kwargs", METRICS_DEFAULT_PARAMS)
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
def test_get_metric_dtype(metric, metric_kwargs, dtype):
specialized_cls = {
np.float32: DistanceMetric32,
np.float64: DistanceMetric64,
}[dtype]
# We don't need the entire grid, just one for a sanity check
metric_kwargs = {k: v[0] for k, v in metric_kwargs.items()}
generic_type = type(DistanceMetric.get_metric(metric, dtype, **metric_kwargs))
specialized_type = type(specialized_cls.get_metric(metric, **metric_kwargs))
assert generic_type is specialized_type
def test_get_metric_bad_dtype():
dtype = np.int32
msg = r"Unexpected dtype .* provided. Please select a dtype from"
with pytest.raises(ValueError, match=msg):
DistanceMetric.get_metric("manhattan", dtype)
def test_minkowski_metric_validate_bad_p_parameter():
msg = "p must be greater than 0"
with pytest.raises(ValueError, match=msg):
DistanceMetric.get_metric("minkowski", p=0)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/tests/test_ranking.py | sklearn/metrics/tests/test_ranking.py | import math
import re
import numpy as np
import pytest
from scipy import stats
from sklearn import datasets
from sklearn.datasets import make_multilabel_classification
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
accuracy_score,
auc,
average_precision_score,
confusion_matrix_at_thresholds,
coverage_error,
dcg_score,
det_curve,
label_ranking_average_precision_score,
label_ranking_loss,
ndcg_score,
precision_recall_curve,
roc_auc_score,
roc_curve,
top_k_accuracy_score,
)
from sklearn.metrics._ranking import _dcg_sample_scores, _ndcg_sample_scores
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.random_projection import _sparse_random_matrix
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.extmath import softmax
from sklearn.utils.fixes import CSR_CONTAINERS
from sklearn.utils.validation import (
check_array,
check_consistent_length,
check_random_state,
)
###############################################################################
# Utilities for testing
CURVE_FUNCS = [
confusion_matrix_at_thresholds,
det_curve,
precision_recall_curve,
roc_curve,
]
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using an SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = LogisticRegression(random_state=0)
y_score = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
y_score = y_score[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, y_score
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`.
Note that this implementation fails on some edge cases.
For example, for constant predictions e.g. [0.5, 0.5, 0.5],
y_true = [1, 0, 0] returns an average precision of 0.33...
but y_true = [0, 0, 1] returns 1.0.
"""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= i + 1.0
score += prec
return score / n_pos
def _average_precision_slow(y_true, y_score):
"""A second alternative implementation of average precision that closely
follows the Wikipedia article's definition (see References). This should
give identical results as `average_precision_score` for all inputs.
References
----------
.. [1] `Wikipedia entry for the Average precision
<https://en.wikipedia.org/wiki/Average_precision>`_
"""
precision, recall, threshold = precision_recall_curve(y_true, y_score)
precision = list(reversed(precision))
recall = list(reversed(recall))
average_precision = 0
for i in range(1, len(precision)):
average_precision += precision[i] * (recall[i] - recall[i - 1])
return average_precision
def _partial_roc_auc_score(y_true, y_predict, max_fpr):
"""Alternative implementation to check for correctness of `roc_auc_score`
with `max_fpr` set.
"""
def _partial_roc(y_true, y_predict, max_fpr):
fpr, tpr, _ = roc_curve(y_true, y_predict)
new_fpr = fpr[fpr <= max_fpr]
new_fpr = np.append(new_fpr, max_fpr)
new_tpr = tpr[fpr <= max_fpr]
idx_out = np.argmax(fpr > max_fpr)
idx_in = idx_out - 1
x_interp = [fpr[idx_in], fpr[idx_out]]
y_interp = [tpr[idx_in], tpr[idx_out]]
new_tpr = np.append(new_tpr, np.interp(max_fpr, x_interp, y_interp))
return (new_fpr, new_tpr)
new_fpr, new_tpr = _partial_roc(y_true, y_predict, max_fpr)
partial_auc = auc(new_fpr, new_tpr)
# Formula (5) from McClish 1989
fpr1 = 0
fpr2 = max_fpr
min_area = 0.5 * (fpr2 - fpr1) * (fpr2 + fpr1)
max_area = fpr2 - fpr1
return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
def test_confusion_matrix_at_thresholds(global_random_seed):
"""Smoke test for confusion_matrix_at_thresholds."""
rng = np.random.RandomState(global_random_seed)
n_samples = 100
y_true = rng.randint(0, 2, size=100)
y_score = rng.uniform(size=100)
n_pos = np.sum(y_true)
n_neg = n_samples - n_pos
tns, fps, fns, tps, thresholds = confusion_matrix_at_thresholds(y_true, y_score)
assert len(tns) == len(fps) == len(fns) == len(tps) == len(thresholds)
assert_allclose(tps + fns, n_pos)
assert_allclose(tns + fps, n_neg)
assert_allclose(tns + fps + fns + tps, n_samples)
@pytest.mark.parametrize("drop", [True, False])
def test_roc_curve(drop):
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, y_score = make_prediction(binary=True)
expected_auc = _auc(y_true, y_score)
fpr, tpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, y_score))
assert fpr.shape == tpr.shape
assert fpr.shape == thresholds.shape
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert fpr[0] == 0
assert fpr[-1] == 1
assert fpr.shape == tpr.shape
assert fpr.shape == thr.shape
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, y_score = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, y_score)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((y_score >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert fpr.shape == tpr.shape
assert fpr.shape == thresholds.shape
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, y_score = make_prediction(binary=False)
with pytest.raises(ValueError):
roc_curve(y_true, y_score)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, y_score = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, y_score - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert fpr.shape == tpr.shape
assert fpr.shape == thresholds.shape
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, y_score = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert fpr.shape == tpr.shape
assert fpr.shape == thresholds.shape
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert fpr.shape == tpr.shape
assert fpr.shape == thresholds.shape
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert fpr.shape == tpr.shape
assert fpr.shape == thresholds.shape
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
expected_message = (
"No negative samples in y_true, false positive value should be meaningless"
)
with pytest.warns(UndefinedMetricWarning, match=expected_message):
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr, np.full(len(thresholds), np.nan))
assert fpr.shape == tpr.shape
assert fpr.shape == thresholds.shape
# assert there are warnings
expected_message = (
"No positive samples in y_true, true positive value should be meaningless"
)
with pytest.warns(UndefinedMetricWarning, match=expected_message):
fpr, tpr, thresholds = roc_curve([1 - x for x in y_true], y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr, np.full(len(thresholds), np.nan))
assert fpr.shape == tpr.shape
assert fpr.shape == thresholds.shape
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 0, 1])
assert_array_almost_equal(fpr, [0, 1, 1])
assert_almost_equal(roc_auc, 1.0)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.0)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 0, 1])
assert_array_almost_equal(fpr, [0, 1, 1])
assert_almost_equal(roc_auc, 1.0)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
# case with no positive samples
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
expected_message = (
"No positive samples in y_true, true positive value should be meaningless"
)
with pytest.warns(UndefinedMetricWarning, match=expected_message):
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_array_almost_equal(tpr, [0.0, 0.5, 1.0])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
expected_message = (
"Only one class is present in y_true. "
"ROC AUC score is not defined in that case."
)
with pytest.warns(UndefinedMetricWarning, match=expected_message):
auc = roc_auc_score(y_true, y_score)
assert math.isnan(auc)
# case with no negative samples
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
expected_message = (
"No negative samples in y_true, false positive value should be meaningless"
)
with pytest.warns(UndefinedMetricWarning, match=expected_message):
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan, np.nan])
assert_array_almost_equal(fpr, [0.0, 0.5, 1.0])
expected_message = (
"Only one class is present in y_true. "
"ROC AUC score is not defined in that case."
)
with pytest.warns(UndefinedMetricWarning, match=expected_message):
auc = roc_auc_score(y_true, y_score)
assert math.isnan(auc)
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
with pytest.warns(UndefinedMetricWarning, match=expected_message):
roc_auc_score(y_true, y_score, average="macro")
with pytest.warns(UndefinedMetricWarning, match=expected_message):
roc_auc_score(y_true, y_score, average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.0)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
with pytest.warns(UndefinedMetricWarning, match=expected_message):
roc_auc_score(y_true, y_score, average="macro")
with pytest.warns(UndefinedMetricWarning, match=expected_message):
roc_auc_score(y_true, y_score, average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0.0, 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [np.inf, 1.0, 0.7, 0.0])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
y_score = [0.0, 0.1, 0.6, 0.6, 0.7, 0.8, 0.9, 0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [np.inf, 1.0, 0.9, 0.7, 0.6, 0.0])
def test_roc_curve_fpr_tpr_increasing():
# Ensure that fpr and tpr returned by roc_curve are increasing.
# Construct an edge case with float y_score and sample_weight
# when some adjacent values of fpr and tpr are actually the same.
y_true = [0, 0, 1, 1, 1]
y_score = [0.1, 0.7, 0.3, 0.4, 0.5]
sample_weight = np.repeat(0.2, 5)
fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight)
assert (np.diff(fpr) < 0).sum() == 0
assert (np.diff(tpr) < 0).sum() == 0
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_errors():
# Incompatible shapes
with pytest.raises(ValueError):
auc([0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
with pytest.raises(ValueError):
auc([0.0], [0.1])
# x is not in order
x = [2, 1, 3, 4]
y = [5, 6, 7, 8]
error_message = "x is neither increasing nor decreasing : {}".format(np.array(x))
with pytest.raises(ValueError, match=re.escape(error_message)):
auc(x, y)
@pytest.mark.parametrize(
"y_true, labels",
[
(np.array([0, 1, 0, 2]), [0, 1, 2]),
(np.array([0, 1, 0, 2]), None),
(["a", "b", "a", "c"], ["a", "b", "c"]),
(["a", "b", "a", "c"], None),
],
)
def test_multiclass_ovo_roc_auc_toydata(y_true, labels):
# Tests the one-vs-one multiclass ROC AUC algorithm
# on a small example, representative of an expected use case.
y_scores = np.array(
[[0.1, 0.8, 0.1], [0.3, 0.4, 0.3], [0.35, 0.5, 0.15], [0, 0.2, 0.8]]
)
# Used to compute the expected output.
# Consider labels 0 and 1:
# positive label is 0, negative label is 1
score_01 = roc_auc_score([1, 0, 1], [0.1, 0.3, 0.35])
# positive label is 1, negative label is 0
score_10 = roc_auc_score([0, 1, 0], [0.8, 0.4, 0.5])
average_score_01 = (score_01 + score_10) / 2
# Consider labels 0 and 2:
score_02 = roc_auc_score([1, 1, 0], [0.1, 0.35, 0])
score_20 = roc_auc_score([0, 0, 1], [0.1, 0.15, 0.8])
average_score_02 = (score_02 + score_20) / 2
# Consider labels 1 and 2:
score_12 = roc_auc_score([1, 0], [0.4, 0.2])
score_21 = roc_auc_score([0, 1], [0.3, 0.8])
average_score_12 = (score_12 + score_21) / 2
# Unweighted, one-vs-one multiclass ROC AUC algorithm
ovo_unweighted_score = (average_score_01 + average_score_02 + average_score_12) / 3
assert_almost_equal(
roc_auc_score(y_true, y_scores, labels=labels, multi_class="ovo"),
ovo_unweighted_score,
)
# Weighted, one-vs-one multiclass ROC AUC algorithm
# Each term is weighted by the prevalence for the positive label.
pair_scores = [average_score_01, average_score_02, average_score_12]
prevalence = [0.75, 0.75, 0.50]
ovo_weighted_score = np.average(pair_scores, weights=prevalence)
assert_almost_equal(
roc_auc_score(
y_true, y_scores, labels=labels, multi_class="ovo", average="weighted"
),
ovo_weighted_score,
)
# Check that average=None raises NotImplemented error
error_message = "average=None is not implemented for multi_class='ovo'."
with pytest.raises(NotImplementedError, match=error_message):
roc_auc_score(y_true, y_scores, labels=labels, multi_class="ovo", average=None)
@pytest.mark.parametrize(
"y_true, labels",
[
(np.array([0, 2, 0, 2]), [0, 1, 2]),
(np.array(["a", "d", "a", "d"]), ["a", "b", "d"]),
],
)
def test_multiclass_ovo_roc_auc_toydata_binary(y_true, labels):
# Tests the one-vs-one multiclass ROC AUC algorithm for binary y_true
#
# on a small example, representative of an expected use case.
y_scores = np.array(
[[0.2, 0.0, 0.8], [0.6, 0.0, 0.4], [0.55, 0.0, 0.45], [0.4, 0.0, 0.6]]
)
# Used to compute the expected output.
# Consider labels 0 and 1:
# positive label is 0, negative label is 1
score_01 = roc_auc_score([1, 0, 1, 0], [0.2, 0.6, 0.55, 0.4])
# positive label is 1, negative label is 0
score_10 = roc_auc_score([0, 1, 0, 1], [0.8, 0.4, 0.45, 0.6])
ovo_score = (score_01 + score_10) / 2
assert_almost_equal(
roc_auc_score(y_true, y_scores, labels=labels, multi_class="ovo"), ovo_score
)
# Weighted, one-vs-one multiclass ROC AUC algorithm
assert_almost_equal(
roc_auc_score(
y_true, y_scores, labels=labels, multi_class="ovo", average="weighted"
),
ovo_score,
)
@pytest.mark.parametrize(
"y_true, labels",
[
(np.array([0, 1, 2, 2]), None),
(["a", "b", "c", "c"], None),
([0, 1, 2, 2], [0, 1, 2]),
(["a", "b", "c", "c"], ["a", "b", "c"]),
],
)
def test_multiclass_ovr_roc_auc_toydata(y_true, labels):
# Tests the unweighted, one-vs-rest multiclass ROC AUC algorithm
# on a small example, representative of an expected use case.
y_scores = np.array(
[[1.0, 0.0, 0.0], [0.1, 0.5, 0.4], [0.1, 0.1, 0.8], [0.3, 0.3, 0.4]]
)
# Compute the expected result by individually computing the 'one-vs-rest'
# ROC AUC scores for classes 0, 1, and 2.
out_0 = roc_auc_score([1, 0, 0, 0], y_scores[:, 0])
out_1 = roc_auc_score([0, 1, 0, 0], y_scores[:, 1])
out_2 = roc_auc_score([0, 0, 1, 1], y_scores[:, 2])
assert_almost_equal(
roc_auc_score(y_true, y_scores, multi_class="ovr", labels=labels, average=None),
[out_0, out_1, out_2],
)
# Compute unweighted results (default behaviour is average="macro")
result_unweighted = (out_0 + out_1 + out_2) / 3.0
assert_almost_equal(
roc_auc_score(y_true, y_scores, multi_class="ovr", labels=labels),
result_unweighted,
)
# Tests the weighted, one-vs-rest multiclass ROC AUC algorithm
# on the same input (Provost & Domingos, 2000)
result_weighted = out_0 * 0.25 + out_1 * 0.25 + out_2 * 0.5
assert_almost_equal(
roc_auc_score(
y_true, y_scores, multi_class="ovr", labels=labels, average="weighted"
),
result_weighted,
)
@pytest.mark.parametrize(
"multi_class, average",
[
("ovr", "macro"),
("ovr", "micro"),
("ovo", "macro"),
],
)
def test_perfect_imperfect_chance_multiclass_roc_auc(multi_class, average):
y_true = np.array([3, 1, 2, 0])
# Perfect classifier (from a ranking point of view) has roc_auc_score = 1.0
y_perfect = [
[0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.75, 0.05, 0.05, 0.15],
]
assert_almost_equal(
roc_auc_score(y_true, y_perfect, multi_class=multi_class, average=average),
1.0,
)
# Imperfect classifier has roc_auc_score < 1.0
y_imperfect = [
[0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
assert (
roc_auc_score(y_true, y_imperfect, multi_class=multi_class, average=average)
< 1.0
)
# Chance level classifier has roc_auc_score = 5.0
y_chance = 0.25 * np.ones((4, 4))
assert roc_auc_score(
y_true, y_chance, multi_class=multi_class, average=average
) == pytest.approx(0.5)
def test_micro_averaged_ovr_roc_auc(global_random_seed):
seed = global_random_seed
# Let's generate a set of random predictions and matching true labels such
# that the predictions are not perfect. To make the problem more interesting,
# we use an imbalanced class distribution (by using different parameters
# in the Dirichlet prior (conjugate prior of the multinomial distribution).
y_pred = stats.dirichlet.rvs([2.0, 1.0, 0.5], size=1000, random_state=seed)
y_true = np.asarray(
[
stats.multinomial.rvs(n=1, p=y_pred_i, random_state=seed).argmax()
for y_pred_i in y_pred
]
)
y_onehot = label_binarize(y_true, classes=[0, 1, 2])
fpr, tpr, _ = roc_curve(y_onehot.ravel(), y_pred.ravel())
roc_auc_by_hand = auc(fpr, tpr)
roc_auc_auto = roc_auc_score(y_true, y_pred, multi_class="ovr", average="micro")
assert roc_auc_by_hand == pytest.approx(roc_auc_auto)
@pytest.mark.parametrize(
"msg, y_true, labels",
[
("Parameter 'labels' must be unique", np.array([0, 1, 2, 2]), [0, 2, 0]),
(
"Parameter 'labels' must be unique",
np.array(["a", "b", "c", "c"]),
["a", "a", "b"],
),
(
(
"Number of classes in y_true not equal to the number of columns "
"in 'y_score'"
),
np.array([0, 2, 0, 2]),
None,
),
(
"Parameter 'labels' must be ordered",
np.array(["a", "b", "c", "c"]),
["a", "c", "b"],
),
(
(
"Number of given labels, 2, not equal to the number of columns in "
"'y_score', 3"
),
np.array([0, 1, 2, 2]),
[0, 1],
),
(
(
"Number of given labels, 2, not equal to the number of columns in "
"'y_score', 3"
),
np.array(["a", "b", "c", "c"]),
["a", "b"],
),
(
(
"Number of given labels, 4, not equal to the number of columns in "
"'y_score', 3"
),
np.array([0, 1, 2, 2]),
[0, 1, 2, 3],
),
(
(
"Number of given labels, 4, not equal to the number of columns in "
"'y_score', 3"
),
np.array(["a", "b", "c", "c"]),
["a", "b", "c", "d"],
),
(
"'y_true' contains labels not in parameter 'labels'",
np.array(["a", "b", "c", "e"]),
["a", "b", "c"],
),
(
"'y_true' contains labels not in parameter 'labels'",
np.array(["a", "b", "c", "d"]),
["a", "b", "c"],
),
(
"'y_true' contains labels not in parameter 'labels'",
np.array([0, 1, 2, 3]),
[0, 1, 2],
),
],
)
@pytest.mark.parametrize("multi_class", ["ovo", "ovr"])
def test_roc_auc_score_multiclass_labels_error(msg, y_true, labels, multi_class):
y_scores = np.array(
[[0.1, 0.8, 0.1], [0.3, 0.4, 0.3], [0.35, 0.5, 0.15], [0, 0.2, 0.8]]
)
with pytest.raises(ValueError, match=msg):
roc_auc_score(y_true, y_scores, labels=labels, multi_class=multi_class)
@pytest.mark.parametrize(
"msg, kwargs",
[
(
(
r"average must be one of \('macro', 'weighted', None\) for "
r"multiclass problems"
),
{"average": "samples", "multi_class": "ovo"},
),
(
(
r"average must be one of \('micro', 'macro', 'weighted', None\) for "
r"multiclass problems"
),
{"average": "samples", "multi_class": "ovr"},
),
(
(
r"sample_weight is not supported for multiclass one-vs-one "
r"ROC AUC, 'sample_weight' must be None in this case"
),
{"multi_class": "ovo", "sample_weight": []},
),
(
(
r"Partial AUC computation not available in multiclass setting, "
r"'max_fpr' must be set to `None`, received `max_fpr=0.5` "
r"instead"
),
{"multi_class": "ovo", "max_fpr": 0.5},
),
(r"multi_class must be in \('ovo', 'ovr'\)", {}),
],
)
def test_roc_auc_score_multiclass_error(msg, kwargs):
# Test that roc_auc_score function returns an error when trying
# to compute multiclass AUC for parameters where an output
# is not defined.
rng = check_random_state(404)
y_score = rng.rand(20, 3)
y_prob = softmax(y_score)
y_true = rng.randint(0, 3, size=20)
with pytest.raises(ValueError, match=msg):
roc_auc_score(y_true, y_prob, **kwargs)
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
warn_message = (
"Only one class is present in y_true. "
"ROC AUC score is not defined in that case."
)
with pytest.warns(UndefinedMetricWarning, match=warn_message):
roc_auc_score(y_true, y_pred)
y_true = np.ones(10, dtype="int")
with pytest.warns(UndefinedMetricWarning, match=warn_message):
roc_auc_score(y_true, y_pred)
y_true = np.full(10, -1, dtype="int")
with pytest.warns(UndefinedMetricWarning, match=warn_message):
roc_auc_score(y_true, y_pred)
@pytest.mark.parametrize("curve_func", CURVE_FUNCS)
def test_confusion_matrix_at_thresholds_multiclass_error(curve_func):
rng = check_random_state(404)
y_true = rng.randint(0, 3, size=10)
y_pred = rng.rand(10)
msg = "multiclass format is not supported"
with pytest.raises(ValueError, match=msg):
curve_func(y_true, y_pred)
@pytest.mark.parametrize("curve_func", CURVE_FUNCS)
def test_confusion_matrix_at_thresholds_implicit_pos_label(curve_func):
# Check that using string class labels raises an informative
# error for any supported string dtype:
msg = (
"y_true takes value in {'a', 'b'} and pos_label is "
"not specified: either make y_true take "
"value in {0, 1} or {-1, 1} or pass pos_label "
"explicitly."
)
with pytest.raises(ValueError, match=msg):
curve_func(np.array(["a", "b"], dtype="<U1"), [0.0, 1.0])
with pytest.raises(ValueError, match=msg):
curve_func(np.array(["a", "b"], dtype=object), [0.0, 1.0])
# Check that it is possible to use floating point class labels
# that are interpreted similarly to integer class labels:
y_pred = [0.0, 1.0, 0.2, 0.42]
int_curve = curve_func([0, 1, 1, 0], y_pred)
float_curve = curve_func([0.0, 1.0, 1.0, 0.0], y_pred)
for int_curve_part, float_curve_part in zip(int_curve, float_curve):
np.testing.assert_allclose(int_curve_part, float_curve_part)
@pytest.mark.filterwarnings("ignore:Support for labels represented as bytes")
@pytest.mark.parametrize("curve_func", [precision_recall_curve, roc_curve])
@pytest.mark.parametrize("labels_type", ["list", "array"])
def test_confusion_matrix_at_thresholds_implicit_bytes_pos_label(
curve_func, labels_type
):
# Check that using bytes class labels raises an informative
# error for any supported string dtype:
labels = _convert_container([b"a", b"b"], labels_type)
msg = "Support for labels represented as bytes is not supported"
with pytest.raises(TypeError, match=msg):
curve_func(labels, [0.0, 1.0])
@pytest.mark.parametrize("curve_func", CURVE_FUNCS)
def test_confusion_matrix_at_thresholds_zero_sample_weight(curve_func):
y_true = [0, 0, 1, 1, 1]
y_score = [0.1, 0.2, 0.3, 0.4, 0.5]
sample_weight = [1, 1, 1, 0.5, 0]
result_1 = curve_func(y_true, y_score, sample_weight=sample_weight)
result_2 = curve_func(y_true[:-1], y_score[:-1], sample_weight=sample_weight[:-1])
for arr_1, arr_2 in zip(result_1, result_2):
assert_allclose(arr_1, arr_2)
@pytest.mark.parametrize("drop", [True, False])
def test_precision_recall_curve(drop):
y_true, _, y_score = make_prediction(binary=True)
_test_precision_recall_curve(y_true, y_score, drop)
# Make sure the first point of the Precision-Recall on the right is:
# (p=1.0, r=class balance) on a non-balanced dataset [1:]
p, r, t = precision_recall_curve(y_true[1:], y_score[1:], drop_intermediate=drop)
assert r[0] == 1.0
assert p[0] == y_true[1:].mean()
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/tests/__init__.py | sklearn/metrics/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/tests/test_regression.py | sklearn/metrics/tests/test_regression.py | from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy import optimize
from scipy.special import factorial, xlogy
from sklearn.dummy import DummyRegressor
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import (
d2_absolute_error_score,
d2_pinball_score,
d2_tweedie_score,
explained_variance_score,
make_scorer,
max_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_pinball_loss,
mean_squared_error,
mean_squared_log_error,
mean_tweedie_deviance,
median_absolute_error,
r2_score,
root_mean_squared_error,
root_mean_squared_log_error,
)
from sklearn.metrics._regression import _check_reg_targets
from sklearn.model_selection import GridSearchCV
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
y_pred_2 = y_true - 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.0)
assert_almost_equal(
mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred)),
)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.0)
assert_almost_equal(mean_pinball_loss(y_true, y_pred), 0.5)
assert_almost_equal(mean_pinball_loss(y_true, y_pred_2), 0.5)
assert_almost_equal(mean_pinball_loss(y_true, y_pred, alpha=0.4), 0.6)
assert_almost_equal(mean_pinball_loss(y_true, y_pred_2, alpha=0.4), 0.4)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.0)
mape = mean_absolute_percentage_error(y_true, y_pred)
assert np.isfinite(mape)
assert mape > 1e6
assert_almost_equal(max_error(y_true, y_pred), 1.0)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(r2_score(y_true, y_pred, force_finite=False), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.0)
assert_almost_equal(
explained_variance_score(y_true, y_pred, force_finite=False), 1.0
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=0),
mean_squared_error(y_true, y_pred),
)
assert_almost_equal(
d2_tweedie_score(y_true, y_pred, power=0), r2_score(y_true, y_pred)
)
dev_median = np.abs(y_true - np.median(y_true)).sum()
assert_array_almost_equal(
d2_absolute_error_score(y_true, y_pred),
1 - np.abs(y_true - y_pred).sum() / dev_median,
)
alpha = 0.2
pinball_loss = lambda y_true, y_pred, alpha: alpha * np.maximum(
y_true - y_pred, 0
) + (1 - alpha) * np.maximum(y_pred - y_true, 0)
y_quantile = np.percentile(y_true, q=alpha * 100)
assert_almost_equal(
d2_pinball_score(y_true, y_pred, alpha=alpha),
1
- pinball_loss(y_true, y_pred, alpha).sum()
/ pinball_loss(y_true, y_quantile, alpha).sum(),
)
assert_almost_equal(
d2_absolute_error_score(y_true, y_pred),
d2_pinball_score(y_true, y_pred, alpha=0.5),
)
# Tweedie deviance needs positive y_pred, except for p=0,
# p>=2 needs positive y_true
# results evaluated by sympy
y_true = np.arange(1, 1 + n_samples)
y_pred = 2 * y_true
n = n_samples
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=-1),
5 / 12 * n * (n**2 + 2 * n + 1),
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=1), (n + 1) * (1 - np.log(2))
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=2), 2 * np.log(2) - 1
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=3 / 2),
((6 * np.sqrt(2) - 8) / n) * np.sqrt(y_true).sum(),
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=3), np.sum(1 / y_true) / (4 * n)
)
dev_mean = 2 * np.mean(xlogy(y_true, 2 * y_true / (n + 1)))
assert_almost_equal(
d2_tweedie_score(y_true, y_pred, power=1),
1 - (n + 1) * (1 - np.log(2)) / dev_mean,
)
dev_mean = 2 * np.log((n + 1) / 2) - 2 / n * np.log(factorial(n))
assert_almost_equal(
d2_tweedie_score(y_true, y_pred, power=2), 1 - (2 * np.log(2) - 1) / dev_mean
)
def test_root_mean_squared_error_multioutput_raw_value():
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/pull/16323
mse = mean_squared_error([[1]], [[10]], multioutput="raw_values")
rmse = root_mean_squared_error([[1]], [[10]], multioutput="raw_values")
assert np.sqrt(mse) == pytest.approx(rmse)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1.0 / 3 + 2.0 / 3 + 2.0 / 3) / 4.0)
error = root_mean_squared_error(y_true, y_pred)
assert_almost_equal(error, 0.454, decimal=2)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
error = root_mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.315, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1.0 + 2.0 / 3) / 4.0)
error = mean_pinball_loss(y_true, y_pred)
assert_almost_equal(error, (1.0 + 2.0 / 3) / 8.0)
error = np.around(mean_absolute_percentage_error(y_true, y_pred), decimals=2)
assert np.isfinite(error)
assert error > 1e6
error = median_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1.0 + 1.0) / 4.0)
error = r2_score(y_true, y_pred, multioutput="variance_weighted")
assert_almost_equal(error, 1.0 - 5.0 / 2)
error = r2_score(y_true, y_pred, multioutput="uniform_average")
assert_almost_equal(error, -0.875)
score = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="raw_values")
raw_expected_score = [
1
- np.abs(y_true[:, i] - y_pred[:, i]).sum()
/ np.abs(y_true[:, i] - np.median(y_true[:, i])).sum()
for i in range(y_true.shape[1])
]
# in the last case, the denominator vanishes and hence we get nan,
# but since the numerator vanishes as well the expected score is 1.0
raw_expected_score = np.where(np.isnan(raw_expected_score), 1, raw_expected_score)
assert_array_almost_equal(score, raw_expected_score)
score = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="uniform_average")
assert_almost_equal(score, raw_expected_score.mean())
# constant `y_true` with force_finite=True leads to 1. or 0.
yc = [5.0, 5.0]
error = r2_score(yc, [5.0, 5.0], multioutput="variance_weighted")
assert_almost_equal(error, 1.0)
error = r2_score(yc, [5.0, 5.1], multioutput="variance_weighted")
assert_almost_equal(error, 0.0)
# Setting force_finite=False results in the nan for 4th output propagating
error = r2_score(
y_true, y_pred, multioutput="variance_weighted", force_finite=False
)
assert_almost_equal(error, np.nan)
error = r2_score(y_true, y_pred, multioutput="uniform_average", force_finite=False)
assert_almost_equal(error, np.nan)
# Dropping the 4th output to check `force_finite=False` for nominal
y_true = y_true[:, :-1]
y_pred = y_pred[:, :-1]
error = r2_score(y_true, y_pred, multioutput="variance_weighted")
error2 = r2_score(
y_true, y_pred, multioutput="variance_weighted", force_finite=False
)
assert_almost_equal(error, error2)
error = r2_score(y_true, y_pred, multioutput="uniform_average")
error2 = r2_score(y_true, y_pred, multioutput="uniform_average", force_finite=False)
assert_almost_equal(error, error2)
# constant `y_true` with force_finite=False leads to NaN or -Inf.
error = r2_score(
yc, [5.0, 5.0], multioutput="variance_weighted", force_finite=False
)
assert_almost_equal(error, np.nan)
error = r2_score(
yc, [5.0, 6.0], multioutput="variance_weighted", force_finite=False
)
assert_almost_equal(error, -np.inf)
def test_regression_metrics_at_limits():
# Single-sample case
# Note: for r2 and d2_tweedie see also test_regression_single_sample
assert_almost_equal(mean_squared_error([0.0], [0.0]), 0.0)
assert_almost_equal(root_mean_squared_error([0.0], [0.0]), 0.0)
assert_almost_equal(mean_squared_log_error([0.0], [0.0]), 0.0)
assert_almost_equal(mean_absolute_error([0.0], [0.0]), 0.0)
assert_almost_equal(mean_pinball_loss([0.0], [0.0]), 0.0)
assert_almost_equal(mean_absolute_percentage_error([0.0], [0.0]), 0.0)
assert_almost_equal(median_absolute_error([0.0], [0.0]), 0.0)
assert_almost_equal(max_error([0.0], [0.0]), 0.0)
assert_almost_equal(explained_variance_score([0.0], [0.0]), 1.0)
# Perfect cases
assert_almost_equal(r2_score([0.0, 1], [0.0, 1]), 1.0)
assert_almost_equal(d2_pinball_score([0.0, 1], [0.0, 1]), 1.0)
# Non-finite cases
# R² and explained variance have a fix by default for non-finite cases
for s in (r2_score, explained_variance_score):
assert_almost_equal(s([0, 0], [1, -1]), 0.0)
assert_almost_equal(s([0, 0], [1, -1], force_finite=False), -np.inf)
assert_almost_equal(s([1, 1], [1, 1]), 1.0)
assert_almost_equal(s([1, 1], [1, 1], force_finite=False), np.nan)
msg = (
"Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
with pytest.raises(ValueError, match=msg):
mean_squared_log_error([-1.0], [-1.0])
msg = (
"Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
with pytest.raises(ValueError, match=msg):
mean_squared_log_error([1.0, 2.0, 3.0], [1.0, -2.0, 3.0])
msg = (
"Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
with pytest.raises(ValueError, match=msg):
mean_squared_log_error([1.0, -2.0, 3.0], [1.0, 2.0, 3.0])
msg = (
"Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
with pytest.raises(ValueError, match=msg):
root_mean_squared_log_error([1.0, -2.0, 3.0], [1.0, 2.0, 3.0])
msg = (
"Root Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
# Tweedie deviance error
power = -1.2
assert_allclose(
mean_tweedie_deviance([0], [1.0], power=power), 2 / (2 - power), rtol=1e-3
)
msg = "can only be used on strictly positive y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
assert_almost_equal(mean_tweedie_deviance([0.0], [0.0], power=0), 0.0, 2)
power = 1.0
msg = "only be used on non-negative y and strictly positive y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 1.5
assert_allclose(mean_tweedie_deviance([0.0], [1.0], power=power), 2 / (2 - power))
msg = "only be used on non-negative y and strictly positive y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 2.0
assert_allclose(mean_tweedie_deviance([1.0], [1.0], power=power), 0.00, atol=1e-8)
msg = "can only be used on strictly positive y and y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 3.0
assert_allclose(mean_tweedie_deviance([1.0], [1.0], power=power), 0.00, atol=1e-8)
msg = "can only be used on strictly positive y and y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES, repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, _, _ = _check_reg_targets(
y1, y2, sample_weight=None, multioutput=None
)
assert type1 == y_type
if type1 == "continuous":
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
with pytest.raises(ValueError):
_check_reg_targets(y1, y2, sample_weight=None, multioutput=None)
def test__check_reg_targets_exception():
invalid_multioutput = "this_value_is_not_valid"
expected_message = (
"Allowed 'multioutput' string values are.+You provided multioutput={!r}".format(
invalid_multioutput
)
)
with pytest.raises(ValueError, match=expected_message):
_check_reg_targets([1, 2, 3], [[1], [2], [3]], None, invalid_multioutput)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput="raw_values")
mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values")
pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values")
mape = mean_absolute_percentage_error(y_true, y_pred, multioutput="raw_values")
r = r2_score(y_true, y_pred, multioutput="raw_values")
evs = explained_variance_score(y_true, y_pred, multioutput="raw_values")
d2ps = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="raw_values")
evs2 = explained_variance_score(
y_true, y_pred, multioutput="raw_values", force_finite=False
)
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(pbl, [0.25 / 2, 0.625 / 2], decimal=2)
assert_array_almost_equal(mape, [0.0778, 0.2262], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
assert_array_almost_equal(d2ps, [0.833, 0.722], decimal=2)
assert_array_almost_equal(evs2, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]] * 4
y_pred = [[1, 1]] * 4
mse = mean_squared_error(y_true, y_pred, multioutput="raw_values")
mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values")
pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values")
r = r2_score(y_true, y_pred, multioutput="raw_values")
d2ps = d2_pinball_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(mse, [1.0, 1.0], decimal=2)
assert_array_almost_equal(mae, [1.0, 1.0], decimal=2)
assert_array_almost_equal(pbl, [0.5, 0.5], decimal=2)
assert_array_almost_equal(r, [0.0, 0.0], decimal=2)
assert_array_almost_equal(d2ps, [0.0, 0.0], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="raw_values")
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert np.mean(r) == r2_score(
[[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="uniform_average"
)
evs = explained_variance_score(
[[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="raw_values"
)
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
evs2 = explained_variance_score(
[[0, -1], [0, 1]],
[[2, 2], [1, 1]],
multioutput="raw_values",
force_finite=False,
)
assert_array_almost_equal(evs2, [-np.inf, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [1, 2]]
y_pred = [[1, 4], [1, 1]]
r2 = r2_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(r2, [1.0, -3.0], decimal=2)
assert np.mean(r2) == r2_score(y_true, y_pred, multioutput="uniform_average")
r22 = r2_score(y_true, y_pred, multioutput="raw_values", force_finite=False)
assert_array_almost_equal(r22, [np.nan, -3.0], decimal=2)
assert_almost_equal(
np.mean(r22),
r2_score(y_true, y_pred, multioutput="uniform_average", force_finite=False),
)
evs = explained_variance_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(evs, [1.0, -3.0], decimal=2)
assert np.mean(evs) == explained_variance_score(y_true, y_pred)
d2ps = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="raw_values")
assert_array_almost_equal(d2ps, [1.0, -1.0], decimal=2)
evs2 = explained_variance_score(
y_true, y_pred, multioutput="raw_values", force_finite=False
)
assert_array_almost_equal(evs2, [np.nan, -3.0], decimal=2)
assert_almost_equal(
np.mean(evs2), explained_variance_score(y_true, y_pred, force_finite=False)
)
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput="raw_values")
msle2 = mean_squared_error(
np.log(1 + y_true), np.log(1 + y_pred), multioutput="raw_values"
)
assert_array_almost_equal(msle, msle2, decimal=2)
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
rmsew = root_mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
mapew = mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
d2psw = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput=[0.4, 0.6])
evsw2 = explained_variance_score(
y_true, y_pred, multioutput=[0.4, 0.6], force_finite=False
)
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(rmsew, 0.59, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(mapew, 0.1668, decimal=2)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
assert_almost_equal(d2psw, 0.766, decimal=2)
assert_almost_equal(evsw2, 0.94, decimal=2)
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
msle2 = mean_squared_error(
np.log(1 + y_true), np.log(1 + y_pred), multioutput=[0.3, 0.7]
)
assert_almost_equal(msle, msle2, decimal=2)
@pytest.mark.parametrize("metric", [r2_score, d2_tweedie_score, d2_pinball_score])
def test_regression_single_sample(metric):
y_true = [0]
y_pred = [1]
warning_msg = "not well-defined with less than two samples."
# Trigger the warning
with pytest.warns(UndefinedMetricWarning, match=warning_msg):
score = metric(y_true, y_pred)
assert np.isnan(score)
def test_tweedie_deviance_continuity(global_random_seed):
n_samples = 100
rng = np.random.RandomState(global_random_seed)
y_true = rng.rand(n_samples) + 0.1
y_pred = rng.rand(n_samples) + 0.1
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=0 - 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=0),
)
# Ws we get closer to the limit, with 1e-12 difference the
# tolerance to pass the below check increases. There are likely
# numerical precision issues on the edges of different definition
# regions.
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=1 + 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=1),
rtol=1e-5,
)
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=2 - 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=2),
rtol=1e-5,
)
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=2 + 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=2),
rtol=1e-5,
)
def test_mean_absolute_percentage_error(global_random_seed):
random_number_generator = np.random.RandomState(global_random_seed)
y_true = random_number_generator.exponential(size=100)
y_pred = 1.2 * y_true
assert mean_absolute_percentage_error(y_true, y_pred) == pytest.approx(0.2)
@pytest.mark.parametrize(
"distribution", ["normal", "lognormal", "exponential", "uniform"]
)
@pytest.mark.parametrize("target_quantile", [0.05, 0.5, 0.75])
def test_mean_pinball_loss_on_constant_predictions(
distribution, target_quantile, global_random_seed
):
if not hasattr(np, "quantile"):
pytest.skip(
"This test requires a more recent version of numpy "
"with support for np.quantile."
)
# Check that the pinball loss is minimized by the empirical quantile.
n_samples = 3000
rng = np.random.RandomState(global_random_seed)
data = getattr(rng, distribution)(size=n_samples)
# Compute the best possible pinball loss for any constant predictor:
best_pred = np.quantile(data, target_quantile)
best_constant_pred = np.full(n_samples, fill_value=best_pred)
best_pbl = mean_pinball_loss(data, best_constant_pred, alpha=target_quantile)
# Evaluate the loss on a grid of quantiles
candidate_predictions = np.quantile(data, np.linspace(0, 1, 100))
for pred in candidate_predictions:
# Compute the pinball loss of a constant predictor:
constant_pred = np.full(n_samples, fill_value=pred)
pbl = mean_pinball_loss(data, constant_pred, alpha=target_quantile)
# Check that the loss of this constant predictor is greater or equal
# than the loss of using the optimal quantile (up to machine
# precision):
assert pbl >= best_pbl - np.finfo(np.float64).eps
# Check that the value of the pinball loss matches the analytical
# formula.
expected_pbl = (pred - data[data < pred]).sum() * (1 - target_quantile) + (
data[data >= pred] - pred
).sum() * target_quantile
expected_pbl /= n_samples
assert_almost_equal(expected_pbl, pbl)
# Check that we can actually recover the target_quantile by minimizing the
# pinball loss w.r.t. the constant prediction quantile.
def objective_func(x):
constant_pred = np.full(n_samples, fill_value=x)
return mean_pinball_loss(data, constant_pred, alpha=target_quantile)
result = optimize.minimize(objective_func, data.mean())
assert result.success
# The minimum is not unique with limited data, hence the large tolerance.
# For the normal distribution and the 0.5 quantile, the expected result is close to
# 0, hence the additional use of absolute tolerance.
assert_allclose(result.x, best_pred, rtol=1e-1, atol=1e-3)
assert result.fun == pytest.approx(best_pbl)
def test_dummy_quantile_parameter_tuning(global_random_seed):
# Integration test to check that it is possible to use the pinball loss to
# tune the hyperparameter of a quantile regressor. This is conceptually
# similar to the previous test but using the scikit-learn estimator and
# scoring API instead.
n_samples = 1000
rng = np.random.RandomState(global_random_seed)
X = rng.normal(size=(n_samples, 5)) # Ignored
y = rng.exponential(size=n_samples)
all_quantiles = [0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95]
for alpha in all_quantiles:
neg_mean_pinball_loss = make_scorer(
mean_pinball_loss,
alpha=alpha,
greater_is_better=False,
)
regressor = DummyRegressor(strategy="quantile", quantile=0.25)
grid_search = GridSearchCV(
regressor,
param_grid=dict(quantile=all_quantiles),
scoring=neg_mean_pinball_loss,
).fit(X, y)
assert grid_search.best_params_["quantile"] == pytest.approx(alpha)
def test_pinball_loss_relation_with_mae(global_random_seed):
# Test that mean_pinball loss with alpha=0.5 if half of mean absolute error
rng = np.random.RandomState(global_random_seed)
n = 100
y_true = rng.normal(size=n)
y_pred = y_true.copy() + rng.uniform(n)
assert (
mean_absolute_error(y_true, y_pred)
== mean_pinball_loss(y_true, y_pred, alpha=0.5) * 2
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/tests/test_pairwise.py | sklearn/metrics/tests/test_pairwise.py | import warnings
from types import GeneratorType
import numpy as np
import pytest
from numpy import linalg
from scipy.sparse import issparse
from scipy.spatial.distance import (
cdist,
cityblock,
cosine,
minkowski,
pdist,
squareform,
)
from sklearn import config_context
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.pairwise import (
PAIRED_DISTANCES,
PAIRWISE_BOOLEAN_FUNCTIONS,
PAIRWISE_DISTANCE_FUNCTIONS,
PAIRWISE_KERNEL_FUNCTIONS,
_euclidean_distances_upcast,
additive_chi2_kernel,
check_paired_arrays,
check_pairwise_arrays,
chi2_kernel,
cosine_distances,
cosine_similarity,
euclidean_distances,
haversine_distances,
laplacian_kernel,
linear_kernel,
manhattan_distances,
nan_euclidean_distances,
paired_cosine_distances,
paired_distances,
paired_euclidean_distances,
paired_manhattan_distances,
pairwise_distances,
pairwise_distances_argmin,
pairwise_distances_argmin_min,
pairwise_distances_chunked,
pairwise_kernels,
polynomial_kernel,
rbf_kernel,
sigmoid_kernel,
)
from sklearn.preprocessing import normalize
from sklearn.utils._array_api import (
_convert_to_numpy,
_get_namespace_device_dtype_ids,
get_namespace,
xpx,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._testing import (
_array_api_for_tests,
assert_allclose,
assert_almost_equal,
assert_array_equal,
ignore_warnings,
)
from sklearn.utils.fixes import (
BSR_CONTAINERS,
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
DOK_CONTAINERS,
)
from sklearn.utils.parallel import Parallel, delayed
def test_pairwise_distances_for_dense_data(global_dtype):
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4)).astype(global_dtype, copy=False)
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_allclose(S, S2)
assert S.dtype == S2.dtype == global_dtype
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4)).astype(global_dtype, copy=False)
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_allclose(S, S2)
assert S.dtype == S2.dtype == global_dtype
# Check to ensure NaNs work with pairwise_distances.
X_masked = rng.random_sample((5, 4)).astype(global_dtype, copy=False)
Y_masked = rng.random_sample((2, 4)).astype(global_dtype, copy=False)
X_masked[0, 0] = np.nan
Y_masked[0, 0] = np.nan
S_masked = pairwise_distances(X_masked, Y_masked, metric="nan_euclidean")
S2_masked = nan_euclidean_distances(X_masked, Y_masked)
assert_allclose(S_masked, S2_masked)
assert S_masked.dtype == S2_masked.dtype == global_dtype
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_allclose(S, S2)
assert S.dtype == S2.dtype == global_dtype
# Test haversine distance
# The data should be valid latitude and longitude
# haversine converts to float64 currently so we don't check dtypes.
X = rng.random_sample((5, 2)).astype(global_dtype, copy=False)
X[:, 0] = (X[:, 0] - 0.5) * 2 * np.pi / 2
X[:, 1] = (X[:, 1] - 0.5) * 2 * np.pi
S = pairwise_distances(X, metric="haversine")
S2 = haversine_distances(X)
assert_allclose(S, S2)
# Test haversine distance, with Y != X
Y = rng.random_sample((2, 2)).astype(global_dtype, copy=False)
Y[:, 0] = (Y[:, 0] - 0.5) * 2 * np.pi / 2
Y[:, 1] = (Y[:, 1] - 0.5) * 2 * np.pi
S = pairwise_distances(X, Y, metric="haversine")
S2 = haversine_distances(X, Y)
assert_allclose(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
# The metric functions from scipy converts to float64 so we don't check the dtypes.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert S.shape[0] == S.shape[1]
assert S.shape[0] == X.shape[0]
assert_allclose(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert S.shape[0] == X.shape[0]
assert S.shape[1] == Y.shape[0]
assert_allclose(S, S2)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert S.shape[0] == X.shape[0]
assert S.shape[1] == Y.shape[0]
assert_allclose(S, S2)
@pytest.mark.parametrize(
"array_namespace, device, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize("metric", ["cosine", "euclidean", "manhattan"])
def test_pairwise_distances_array_api(array_namespace, device, dtype_name, metric):
# Test array API support in pairwise_distances.
xp = _array_api_for_tests(array_namespace, device)
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X_np = rng.random_sample((5, 4)).astype(dtype_name, copy=False)
Y_np = rng.random_sample((5, 4)).astype(dtype_name, copy=False)
X_xp = xp.asarray(X_np, device=device)
Y_xp = xp.asarray(Y_np, device=device)
with config_context(array_api_dispatch=True):
# Test with Y=None
D_xp = pairwise_distances(X_xp, metric=metric)
D_xp_np = _convert_to_numpy(D_xp, xp=xp)
assert get_namespace(D_xp)[0].__name__ == xp.__name__
assert D_xp.device == X_xp.device
assert D_xp.dtype == X_xp.dtype
D_np = pairwise_distances(X_np, metric=metric)
assert_allclose(D_xp_np, D_np)
# Test with Y=Y_np/Y_xp
D_xp = pairwise_distances(X_xp, Y=Y_xp, metric=metric)
D_xp_np = _convert_to_numpy(D_xp, xp=xp)
assert get_namespace(D_xp)[0].__name__ == xp.__name__
assert D_xp.device == X_xp.device
assert D_xp.dtype == X_xp.dtype
D_np = pairwise_distances(X_np, Y=Y_np, metric=metric)
assert_allclose(D_xp_np, D_np)
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
@pytest.mark.parametrize("bsr_container", BSR_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_pairwise_distances_for_sparse_data(
coo_container, csc_container, bsr_container, csr_container, global_dtype
):
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4)).astype(global_dtype, copy=False)
Y = rng.random_sample((2, 4)).astype(global_dtype, copy=False)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_container(X)
Y_sparse = csr_container(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_allclose(S, S2)
assert S.dtype == S2.dtype == global_dtype
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_allclose(S, S2)
assert S.dtype == S2.dtype == global_dtype
S = pairwise_distances(X_sparse, csc_container(Y), metric="manhattan")
S2 = manhattan_distances(bsr_container(X), coo_container(Y))
assert_allclose(S, S2)
if global_dtype == np.float64:
assert S.dtype == S2.dtype == global_dtype
else:
# TODO Fix manhattan_distances to preserve dtype.
# currently pairwise_distances uses manhattan_distances but converts the result
# back to the input dtype
with pytest.raises(AssertionError):
assert S.dtype == S2.dtype == global_dtype
S2 = manhattan_distances(X, Y)
assert_allclose(S, S2)
if global_dtype == np.float64:
assert S.dtype == S2.dtype == global_dtype
else:
# TODO Fix manhattan_distances to preserve dtype.
# currently pairwise_distances uses manhattan_distances but converts the result
# back to the input dtype
with pytest.raises(AssertionError):
assert S.dtype == S2.dtype == global_dtype
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_allclose(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_allclose(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
with pytest.raises(TypeError):
pairwise_distances(X_sparse, metric="minkowski")
with pytest.raises(TypeError):
pairwise_distances(X, Y_sparse, metric="minkowski")
# Some scipy metrics are deprecated (depending on the scipy version) but we
# still want to test them.
@ignore_warnings(category=DeprecationWarning)
@pytest.mark.parametrize("metric", PAIRWISE_BOOLEAN_FUNCTIONS)
def test_pairwise_boolean_distance(metric):
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
# ignore conversion to boolean in pairwise_distances
with ignore_warnings(category=DataConversionWarning):
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
xpx.nan_to_num(res, fill_value=0)
assert np.sum(res != 0) == 0
# non-boolean arrays are converted to boolean for boolean
# distance metrics with a data conversion warning
msg = "Data was converted to boolean for metric %s" % metric
with pytest.warns(DataConversionWarning, match=msg):
pairwise_distances(X, metric=metric)
# Check that the warning is raised if X is boolean by Y is not boolean:
with pytest.warns(DataConversionWarning, match=msg):
pairwise_distances(X.astype(bool), Y=Y, metric=metric)
# Check that no warning is raised if X is already boolean and Y is None:
with warnings.catch_warnings():
warnings.simplefilter("error", DataConversionWarning)
pairwise_distances(X.astype(bool), metric=metric)
def test_no_data_conversion_warning():
# No warnings issued if metric is not a boolean distance function
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
with warnings.catch_warnings():
warnings.simplefilter("error", DataConversionWarning)
pairwise_distances(X, metric="minkowski")
@pytest.mark.parametrize("func", [pairwise_distances, pairwise_kernels])
def test_pairwise_precomputed(func):
# Test correct shape
with pytest.raises(ValueError, match=".* shape .*"):
func(np.zeros((5, 3)), metric="precomputed")
# with two args
with pytest.raises(ValueError, match=".* shape .*"):
func(np.zeros((5, 3)), np.zeros((4, 4)), metric="precomputed")
# even if shape[1] agrees (although thus second arg is spurious)
with pytest.raises(ValueError, match=".* shape .*"):
func(np.zeros((5, 3)), np.zeros((4, 3)), metric="precomputed")
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert S is S2
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert S is S2
# Test always returns float dtype
S = func(np.array([[1]], dtype="int"), metric="precomputed")
assert "f" == S.dtype.kind
# Test converts list to array-like
S = func([[1.0]], metric="precomputed")
assert isinstance(S, np.ndarray)
def test_pairwise_precomputed_non_negative():
# Test non-negative values
with pytest.raises(ValueError, match=".* non-negative values.*"):
pairwise_distances(np.full((5, 5), -1), metric="precomputed")
_minkowski_kwds = {"w": np.arange(1, 5).astype("double", copy=False), "p": 1}
def callable_rbf_kernel(x, y, **kwds):
xp, _ = get_namespace(x, y)
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(
xpx.atleast_nd(x, ndim=2, xp=xp), xpx.atleast_nd(y, ndim=2, xp=xp), **kwds
)
# unpack the output since this is a scalar packed in a 0-dim array
# Note below is array API version of numpys `item()`
if K.ndim > 0:
K_flat = xp.reshape(K, (-1,))
if K_flat.shape == (1,):
return float(K_flat[0])
raise ValueError("can only convert an array of size 1 to a Python scalar")
@pytest.mark.parametrize(
"func, metric, kwds",
[
(pairwise_distances, "euclidean", {}),
(
pairwise_distances,
minkowski,
_minkowski_kwds,
),
(
pairwise_distances,
"minkowski",
_minkowski_kwds,
),
(pairwise_kernels, "polynomial", {"degree": 1}),
(pairwise_kernels, callable_rbf_kernel, {"gamma": 0.1}),
],
)
@pytest.mark.parametrize("dtype", [np.float64, np.float32, int])
def test_pairwise_parallel(func, metric, kwds, dtype):
rng = np.random.RandomState(0)
X = np.array(5 * rng.random_sample((5, 4)), dtype=dtype)
Y = np.array(5 * rng.random_sample((3, 4)), dtype=dtype)
S = func(X, metric=metric, n_jobs=1, **kwds)
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_allclose(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_allclose(S, S2)
@pytest.mark.parametrize(
"array_namespace, device, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize(
"func, metric, kwds",
[
(pairwise_distances, "euclidean", {}),
(pairwise_distances, "manhattan", {}),
(pairwise_kernels, "polynomial", {"degree": 1}),
(pairwise_kernels, callable_rbf_kernel, {"gamma": 0.1}),
(pairwise_kernels, "laplacian", {"gamma": 0.1}),
],
)
def test_pairwise_parallel_array_api(
func, metric, kwds, array_namespace, device, dtype_name
):
xp = _array_api_for_tests(array_namespace, device)
rng = np.random.RandomState(0)
X_np = np.array(5 * rng.random_sample((5, 4)), dtype=dtype_name)
Y_np = np.array(5 * rng.random_sample((3, 4)), dtype=dtype_name)
X_xp = xp.asarray(X_np, device=device)
Y_xp = xp.asarray(Y_np, device=device)
with config_context(array_api_dispatch=True):
for y_val in (None, "not none"):
Y_xp = None if y_val is None else Y_xp
Y_np = None if y_val is None else Y_np
n_job1_xp = func(X_xp, Y_xp, metric=metric, n_jobs=1, **kwds)
n_job1_xp_np = _convert_to_numpy(n_job1_xp, xp=xp)
assert get_namespace(n_job1_xp)[0].__name__ == xp.__name__
assert n_job1_xp.device == X_xp.device
assert n_job1_xp.dtype == X_xp.dtype
n_job2_xp = func(X_xp, Y_xp, metric=metric, n_jobs=2, **kwds)
n_job2_xp_np = _convert_to_numpy(n_job2_xp, xp=xp)
assert get_namespace(n_job2_xp)[0].__name__ == xp.__name__
assert n_job2_xp.device == X_xp.device
assert n_job2_xp.dtype == X_xp.dtype
n_job2_np = func(X_np, metric=metric, n_jobs=2, **kwds)
assert_allclose(n_job1_xp_np, n_job2_xp_np)
assert_allclose(n_job2_xp_np, n_job2_np)
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert pairwise_distances([[1.0]], metric=lambda x, y: 5)[0, 0] == 5
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
@pytest.mark.parametrize(
"metric",
["rbf", "laplacian", "sigmoid", "polynomial", "linear", "chi2", "additive_chi2"],
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_pairwise_kernels(metric, csr_container):
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_allclose(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_allclose(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_allclose(K1, K2)
# Test with sparse X and Y
X_sparse = csr_container(X)
Y_sparse = csr_container(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
return
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_allclose(K1, K2)
@pytest.mark.parametrize(
"array_namespace, device, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize(
"metric",
["rbf", "sigmoid", "polynomial", "linear", "laplacian", "chi2", "additive_chi2"],
)
def test_pairwise_kernels_array_api(metric, array_namespace, device, dtype_name):
# Test array API support in pairwise_kernels.
xp = _array_api_for_tests(array_namespace, device)
rng = np.random.RandomState(0)
X_np = 10 * rng.random_sample((5, 4))
X_np = X_np.astype(dtype_name, copy=False)
Y_np = 10 * rng.random_sample((2, 4))
Y_np = Y_np.astype(dtype_name, copy=False)
X_xp = xp.asarray(X_np, device=device)
Y_xp = xp.asarray(Y_np, device=device)
with config_context(array_api_dispatch=True):
# Test with Y=None
K_xp = pairwise_kernels(X_xp, metric=metric)
K_xp_np = _convert_to_numpy(K_xp, xp=xp)
assert get_namespace(K_xp)[0].__name__ == xp.__name__
assert K_xp.device == X_xp.device
assert K_xp.dtype == X_xp.dtype
K_np = pairwise_kernels(X_np, metric=metric)
assert_allclose(K_xp_np, K_np)
# Test with Y=Y_np/Y_xp
K_xp = pairwise_kernels(X_xp, Y=Y_xp, metric=metric)
K_xp_np = _convert_to_numpy(K_xp, xp=xp)
assert get_namespace(K_xp)[0].__name__ == xp.__name__
assert K_xp.device == X_xp.device
assert K_xp.dtype == X_xp.dtype
K_np = pairwise_kernels(X_np, Y=Y_np, metric=metric)
assert_allclose(K_xp_np, K_np)
def test_pairwise_kernels_callable():
# Test the pairwise_kernels helper function
# with a callable function, with given keywords.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
metric = callable_rbf_kernel
kwds = {"gamma": 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_allclose(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_allclose(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_allclose(K, K2)
with pytest.raises(TypeError):
pairwise_kernels(X, Y, metric="rbf", **params)
@pytest.mark.parametrize("metric, func", PAIRED_DISTANCES.items())
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_paired_distances(metric, func, csr_container):
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_allclose(S, S2)
S3 = func(csr_container(X), csr_container(Y))
assert_allclose(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_allclose(distances, S)
def test_paired_distances_callable(global_dtype):
# Test the paired_distance helper function
# with the callable implementation
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4)).astype(global_dtype, copy=False)
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4)).astype(global_dtype, copy=False)
S = paired_distances(X, Y, metric="manhattan")
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_allclose(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
with pytest.raises(ValueError):
paired_distances(X, Y)
# XXX: thread-safety bug tracked at:
# https://github.com/scikit-learn/scikit-learn/issues/31884
@pytest.mark.thread_unsafe
@pytest.mark.parametrize("dok_container", DOK_CONTAINERS)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_pairwise_distances_argmin_min(dok_container, csr_container, global_dtype):
# Check pairwise minimum distances computation for any metric
X = np.asarray([[0], [1]], dtype=global_dtype)
Y = np.asarray([[-2], [3]], dtype=global_dtype)
Xsp = dok_container(X)
Ysp = csr_container(Y, dtype=global_dtype)
expected_idx = [0, 1]
expected_vals = [2, 2]
expected_vals_sq = [4, 4]
# euclidean metric
idx, vals = pairwise_distances_argmin_min(X, Y, metric="euclidean")
idx2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_allclose(idx, expected_idx)
assert_allclose(idx2, expected_idx)
assert_allclose(vals, expected_vals)
# sparse matrix case
idxsp, valssp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
idxsp2 = pairwise_distances_argmin(Xsp, Ysp, metric="euclidean")
assert_allclose(idxsp, expected_idx)
assert_allclose(idxsp2, expected_idx)
assert_allclose(valssp, expected_vals)
# We don't want np.matrix here
assert type(idxsp) == np.ndarray
assert type(valssp) == np.ndarray
# Squared Euclidean metric
idx, vals = pairwise_distances_argmin_min(X, Y, metric="sqeuclidean")
idx2, vals2 = pairwise_distances_argmin_min(
X, Y, metric="euclidean", metric_kwargs={"squared": True}
)
idx3 = pairwise_distances_argmin(X, Y, metric="sqeuclidean")
idx4 = pairwise_distances_argmin(
X, Y, metric="euclidean", metric_kwargs={"squared": True}
)
assert_allclose(vals, expected_vals_sq)
assert_allclose(vals2, expected_vals_sq)
assert_allclose(idx, expected_idx)
assert_allclose(idx2, expected_idx)
assert_allclose(idx3, expected_idx)
assert_allclose(idx4, expected_idx)
# Non-euclidean scikit-learn metric
idx, vals = pairwise_distances_argmin_min(X, Y, metric="manhattan")
idx2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_allclose(idx, expected_idx)
assert_allclose(idx2, expected_idx)
assert_allclose(vals, expected_vals)
# sparse matrix case
idxsp, valssp = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
idxsp2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_allclose(idxsp, expected_idx)
assert_allclose(idxsp2, expected_idx)
assert_allclose(valssp, expected_vals)
# Non-euclidean Scipy distance (callable)
idx, vals = pairwise_distances_argmin_min(
X, Y, metric=minkowski, metric_kwargs={"p": 2}
)
assert_allclose(idx, expected_idx)
assert_allclose(vals, expected_vals)
# Non-euclidean Scipy distance (string)
idx, vals = pairwise_distances_argmin_min(
X, Y, metric="minkowski", metric_kwargs={"p": 2}
)
assert_allclose(idx, expected_idx)
assert_allclose(vals, expected_vals)
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan"
)
assert_allclose(dist_orig_ind, dist_chunked_ind, rtol=1e-7)
assert_allclose(dist_orig_val, dist_chunked_val, rtol=1e-7)
# Changing the axis and permuting datasets must give the same results
argmin_0, dist_0 = pairwise_distances_argmin_min(X, Y, axis=0)
argmin_1, dist_1 = pairwise_distances_argmin_min(Y, X, axis=1)
assert_allclose(dist_0, dist_1)
assert_array_equal(argmin_0, argmin_1)
argmin_0, dist_0 = pairwise_distances_argmin_min(X, X, axis=0)
argmin_1, dist_1 = pairwise_distances_argmin_min(X, X, axis=1)
assert_allclose(dist_0, dist_1)
assert_array_equal(argmin_0, argmin_1)
# Changing the axis and permuting datasets must give the same results
argmin_0 = pairwise_distances_argmin(X, Y, axis=0)
argmin_1 = pairwise_distances_argmin(Y, X, axis=1)
assert_array_equal(argmin_0, argmin_1)
argmin_0 = pairwise_distances_argmin(X, X, axis=0)
argmin_1 = pairwise_distances_argmin(X, X, axis=1)
assert_array_equal(argmin_0, argmin_1)
# F-contiguous arrays must be supported and must return identical results.
argmin_C_contiguous = pairwise_distances_argmin(X, Y)
argmin_F_contiguous = pairwise_distances_argmin(
np.asfortranarray(X), np.asfortranarray(Y)
)
assert_array_equal(argmin_C_contiguous, argmin_F_contiguous)
def _reduce_func(dist, start):
return dist[:, :100]
def test_pairwise_distances_chunked_reduce(global_dtype):
rng = np.random.RandomState(0)
X = rng.random_sample((400, 4)).astype(global_dtype, copy=False)
# Reduced Euclidean distance
S = pairwise_distances(X)[:, :100]
S_chunks = pairwise_distances_chunked(
X, None, reduce_func=_reduce_func, working_memory=2**-16
)
assert isinstance(S_chunks, GeneratorType)
S_chunks = list(S_chunks)
assert len(S_chunks) > 1
assert S_chunks[0].dtype == X.dtype
# atol is for diagonal where S is explicitly zeroed on the diagonal
assert_allclose(np.vstack(S_chunks), S, atol=1e-7)
def test_pairwise_distances_chunked_reduce_none(global_dtype):
# check that the reduce func is allowed to return None
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4)).astype(global_dtype, copy=False)
S_chunks = pairwise_distances_chunked(
X, None, reduce_func=lambda dist, start: None, working_memory=2**-16
)
assert isinstance(S_chunks, GeneratorType)
S_chunks = list(S_chunks)
assert len(S_chunks) > 1
assert all(chunk is None for chunk in S_chunks)
@pytest.mark.parametrize(
"good_reduce",
[
lambda D, start: list(D),
lambda D, start: np.array(D),
lambda D, start: (list(D), list(D)),
]
+ [
lambda D, start, scipy_csr_type=scipy_csr_type: scipy_csr_type(D)
for scipy_csr_type in CSR_CONTAINERS
]
+ [
lambda D, start, scipy_dok_type=scipy_dok_type: (
scipy_dok_type(D),
np.array(D),
list(D),
)
for scipy_dok_type in DOK_CONTAINERS
],
)
def test_pairwise_distances_chunked_reduce_valid(good_reduce):
X = np.arange(10).reshape(-1, 1)
S_chunks = pairwise_distances_chunked(
X, None, reduce_func=good_reduce, working_memory=64
)
next(S_chunks)
@pytest.mark.parametrize(
("bad_reduce", "err_type", "message"),
[
(
lambda D, s: np.concatenate([D, D[-1:]]),
ValueError,
r"length 11\..* input: 10\.",
),
(
lambda D, s: (D, np.concatenate([D, D[-1:]])),
ValueError,
r"length \(10, 11\)\..* input: 10\.",
),
(lambda D, s: (D[:9], D), ValueError, r"length \(9, 10\)\..* input: 10\."),
(
lambda D, s: 7,
TypeError,
r"returned 7\. Expected sequence\(s\) of length 10\.",
),
(
lambda D, s: (7, 8),
TypeError,
r"returned \(7, 8\)\. Expected sequence\(s\) of length 10\.",
),
(
lambda D, s: (np.arange(10), 9),
TypeError,
r", 9\)\. Expected sequence\(s\) of length 10\.",
),
],
)
def test_pairwise_distances_chunked_reduce_invalid(
global_dtype, bad_reduce, err_type, message
):
X = np.arange(10).reshape(-1, 1).astype(global_dtype, copy=False)
S_chunks = pairwise_distances_chunked(
X, None, reduce_func=bad_reduce, working_memory=64
)
with pytest.raises(err_type, match=message):
next(S_chunks)
def check_pairwise_distances_chunked(X, Y, working_memory, metric="euclidean"):
gen = pairwise_distances_chunked(X, Y, working_memory=working_memory, metric=metric)
assert isinstance(gen, GeneratorType)
blockwise_distances = list(gen)
Y = X if Y is None else Y
min_block_mib = len(Y) * 8 * 2**-20
for block in blockwise_distances:
memory_used = block.nbytes
assert memory_used <= max(working_memory, min_block_mib) * 2**20
blockwise_distances = np.vstack(blockwise_distances)
S = pairwise_distances(X, Y, metric=metric)
assert_allclose(blockwise_distances, S, atol=1e-7)
@pytest.mark.parametrize("metric", ("euclidean", "l2", "sqeuclidean"))
def test_pairwise_distances_chunked_diagonal(metric, global_dtype):
rng = np.random.RandomState(0)
X = rng.normal(size=(1000, 10), scale=1e10).astype(global_dtype, copy=False)
chunks = list(pairwise_distances_chunked(X, working_memory=1, metric=metric))
assert len(chunks) > 1
assert_allclose(np.diag(np.vstack(chunks)), 0, rtol=1e-10)
@pytest.mark.parametrize("metric", ("euclidean", "l2", "sqeuclidean"))
def test_parallel_pairwise_distances_diagonal(metric, global_dtype):
rng = np.random.RandomState(0)
X = rng.normal(size=(1000, 10), scale=1e10).astype(global_dtype, copy=False)
distances = pairwise_distances(X, metric=metric, n_jobs=2)
assert_allclose(np.diag(distances), 0, atol=1e-10)
@pytest.mark.filterwarnings("ignore:Could not adhere to working_memory config")
def test_pairwise_distances_chunked(global_dtype):
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((200, 4)).astype(global_dtype, copy=False)
check_pairwise_distances_chunked(X, None, working_memory=1, metric="euclidean")
# Test small amounts of memory
for power in range(-16, 0):
check_pairwise_distances_chunked(
X, None, working_memory=2**power, metric="euclidean"
)
# X as list
check_pairwise_distances_chunked(
X.tolist(), None, working_memory=1, metric="euclidean"
)
# Euclidean distance, with Y != X.
Y = rng.random_sample((100, 4)).astype(global_dtype, copy=False)
check_pairwise_distances_chunked(X, Y, working_memory=1, metric="euclidean")
check_pairwise_distances_chunked(
X.tolist(), Y.tolist(), working_memory=1, metric="euclidean"
)
# absurdly large working_memory
check_pairwise_distances_chunked(X, Y, working_memory=10000, metric="euclidean")
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
check_pairwise_distances_chunked(X, Y, working_memory=1, metric="cityblock")
# Test precomputed returns all at once
D = pairwise_distances(X)
gen = pairwise_distances_chunked(D, working_memory=2**-16, metric="precomputed")
assert isinstance(gen, GeneratorType)
assert next(gen) is D
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/cluster/_supervised.py | sklearn/metrics/cluster/_supervised.py | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from math import log
from numbers import Real
import numpy as np
from scipy import sparse as sp
from sklearn.metrics.cluster._expected_mutual_info_fast import (
expected_mutual_information,
)
from sklearn.utils import deprecated
from sklearn.utils._array_api import (
_max_precision_float_dtype,
get_namespace_and_device,
)
from sklearn.utils._param_validation import (
Hidden,
Interval,
StrOptions,
validate_params,
)
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_array, check_consistent_length
def check_clusterings(labels_true, labels_pred):
"""Check that the labels arrays are 1D and of same dimension.
Parameters
----------
labels_true : array-like of shape (n_samples,)
The true labels.
labels_pred : array-like of shape (n_samples,)
The predicted labels.
"""
labels_true = check_array(
labels_true,
ensure_2d=False,
ensure_min_samples=0,
dtype=None,
)
labels_pred = check_array(
labels_pred,
ensure_2d=False,
ensure_min_samples=0,
dtype=None,
)
type_label = type_of_target(labels_true)
type_pred = type_of_target(labels_pred)
if "continuous" in (type_pred, type_label):
msg = (
"Clustering metrics expects discrete values but received"
f" {type_label} values for label, and {type_pred} values "
"for target"
)
warnings.warn(msg, UserWarning)
# input checks
if labels_true.ndim != 1:
raise ValueError("labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError("labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
check_consistent_length(labels_true, labels_pred)
return labels_true, labels_pred
def _generalized_average(U, V, average_method):
"""Return a particular mean of two numbers."""
if average_method == "min":
return min(U, V)
elif average_method == "geometric":
return np.sqrt(U * V)
elif average_method == "arithmetic":
return np.mean([U, V])
elif average_method == "max":
return max(U, V)
else:
raise ValueError(
"'average_method' must be 'min', 'geometric', 'arithmetic', or 'max'"
)
@validate_params(
{
"labels_true": ["array-like", None],
"labels_pred": ["array-like", None],
"eps": [Interval(Real, 0, None, closed="left"), None],
"sparse": ["boolean"],
"dtype": "no_validation", # delegate the validation to SciPy
},
prefer_skip_nested_validation=True,
)
def contingency_matrix(
labels_true, labels_pred, *, eps=None, sparse=False, dtype=np.int64
):
"""Build a contingency matrix describing the relationship between labels.
Read more in the :ref:`User Guide <contingency_matrix>`.
Parameters
----------
labels_true : array-like of shape (n_samples,)
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
eps : float, default=None
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : bool, default=False
If `True`, return a sparse CSR contingency matrix. If `eps` is not
`None` and `sparse` is `True` will raise ValueError.
.. versionadded:: 0.18
dtype : numeric type, default=np.int64
Output dtype. Ignored if `eps` is not `None`.
.. versionadded:: 0.24
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer unless set
otherwise with the ``dtype`` argument. If ``eps`` is given, the dtype
will be float.
Will be a ``sklearn.sparse.csr_matrix`` if ``sparse=True``.
Examples
--------
>>> from sklearn.metrics.cluster import contingency_matrix
>>> labels_true = [0, 0, 1, 1, 2, 2]
>>> labels_pred = [1, 0, 2, 1, 0, 2]
>>> contingency_matrix(labels_true, labels_pred)
array([[1, 1, 0],
[0, 1, 1],
[1, 0, 1]])
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix(
(np.ones(class_idx.shape[0]), (class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=dtype,
)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
@validate_params(
{
"labels_true": ["array-like"],
"labels_pred": ["array-like"],
},
prefer_skip_nested_validation=True,
)
def pair_confusion_matrix(labels_true, labels_pred):
"""Pair confusion matrix arising from two clusterings.
The pair confusion matrix :math:`C` computes a 2 by 2 similarity matrix
between two clusterings by considering all pairs of samples and counting
pairs that are assigned into the same or into different clusters under
the true and predicted clusterings [1]_.
Considering a pair of samples that is clustered together a positive pair,
then as in binary classification the count of true negatives is
:math:`C_{00}`, false negatives is :math:`C_{10}`, true positives is
:math:`C_{11}` and false positives is :math:`C_{01}`.
Read more in the :ref:`User Guide <pair_confusion_matrix>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,), dtype=integral
Cluster labels to evaluate.
Returns
-------
C : ndarray of shape (2, 2), dtype=np.int64
The contingency matrix.
See Also
--------
sklearn.metrics.rand_score : Rand Score.
sklearn.metrics.adjusted_rand_score : Adjusted Rand Score.
sklearn.metrics.adjusted_mutual_info_score : Adjusted Mutual Information.
References
----------
.. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions."
Journal of Classification 2, 193–218 (1985).
<10.1007/BF01908075>`
Examples
--------
Perfectly matching labelings have all non-zero entries on the
diagonal regardless of actual label values:
>>> from sklearn.metrics.cluster import pair_confusion_matrix
>>> pair_confusion_matrix([0, 0, 1, 1], [1, 1, 0, 0])
array([[8, 0],
[0, 4]]...
Labelings that assign all classes members to the same clusters
are complete but may be not always pure, hence penalized, and
have some off-diagonal non-zero entries:
>>> pair_confusion_matrix([0, 0, 1, 2], [0, 0, 1, 1])
array([[8, 2],
[0, 2]]...
Note that the matrix is not symmetric.
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = np.int64(labels_true.shape[0])
# Computation using the contingency data
contingency = contingency_matrix(
labels_true, labels_pred, sparse=True, dtype=np.int64
)
n_c = np.ravel(contingency.sum(axis=1))
n_k = np.ravel(contingency.sum(axis=0))
sum_squares = (contingency.data**2).sum()
C = np.empty((2, 2), dtype=np.int64)
C[1, 1] = sum_squares - n_samples
C[0, 1] = contingency.dot(n_k).sum() - sum_squares
C[1, 0] = contingency.transpose().dot(n_c).sum() - sum_squares
C[0, 0] = n_samples**2 - C[0, 1] - C[1, 0] - sum_squares
return C
@validate_params(
{
"labels_true": ["array-like"],
"labels_pred": ["array-like"],
},
prefer_skip_nested_validation=True,
)
def rand_score(labels_true, labels_pred):
"""Rand index.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings [1]_ [2]_.
The raw RI score [3]_ is:
.. code-block:: text
RI = (number of agreeing pairs) / (number of pairs)
Read more in the :ref:`User Guide <rand_score>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,), dtype=integral
Cluster labels to evaluate.
Returns
-------
RI : float
Similarity score between 0.0 and 1.0, inclusive, 1.0 stands for
perfect match.
See Also
--------
adjusted_rand_score: Adjusted Rand Score.
adjusted_mutual_info_score: Adjusted Mutual Information.
References
----------
.. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions."
Journal of Classification 2, 193–218 (1985).
<10.1007/BF01908075>`.
.. [2] `Wikipedia: Simple Matching Coefficient
<https://en.wikipedia.org/wiki/Simple_matching_coefficient>`_
.. [3] `Wikipedia: Rand Index <https://en.wikipedia.org/wiki/Rand_index>`_
Examples
--------
Perfectly matching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import rand_score
>>> rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete but may not always be pure, hence penalized:
>>> rand_score([0, 0, 1, 2], [0, 0, 1, 1])
0.83
"""
contingency = pair_confusion_matrix(labels_true, labels_pred)
numerator = contingency.diagonal().sum()
denominator = contingency.sum()
if numerator == denominator or denominator == 0:
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique
# cluster. These are perfect matches hence return 1.0.
return 1.0
return float(numerator / denominator)
@validate_params(
{
"labels_true": ["array-like"],
"labels_pred": ["array-like"],
},
prefer_skip_nested_validation=True,
)
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation). The adjusted Rand index is bounded below by -0.5 for
especially discordant clusterings.
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=int
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,), dtype=int
Cluster labels to evaluate.
Returns
-------
ARI : float
Similarity score between -0.5 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
See Also
--------
adjusted_mutual_info_score : Adjusted Mutual Information.
References
----------
.. [Hubert1985] L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985
https://link.springer.com/article/10.1007%2FBF01908075
.. [Steinley2004] D. Steinley, Properties of the Hubert-Arabie
adjusted Rand index, Psychological Methods 2004
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
.. [Chacon] :doi:`Minimum adjusted Rand index for two clusterings of a given size,
2022, J. E. Chacón and A. I. Rastrojo <10.1007/s11634-022-00491-w>`
Examples
--------
Perfectly matching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete but may not always be pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1])
0.57
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2])
0.57
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
ARI may take a negative value for especially discordant labelings that
are a worse choice than the expected value of random labels::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 1, 0, 1])
-0.5
See :ref:`sphx_glr_auto_examples_cluster_plot_adjusted_for_chance_measures.py`
for a more detailed example.
"""
(tn, fp), (fn, tp) = pair_confusion_matrix(labels_true, labels_pred)
# convert to Python integer types, to avoid overflow or underflow
tn, fp, fn, tp = int(tn), int(fp), int(fn), int(tp)
# Special cases: empty data or full agreement
if fn == 0 and fp == 0:
return 1.0
return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn))
@validate_params(
{
"labels_true": ["array-like"],
"labels_pred": ["array-like"],
"beta": [Interval(Real, 0, None, closed="left")],
},
prefer_skip_nested_validation=True,
)
def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness. V-Measure is identical to
:func:`normalized_mutual_info_score` with the arithmetic averaging
method.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : array-like of shape (n_samples,)
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
beta : float, default=1.0
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
Returns
-------
homogeneity : float
Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.
completeness : float
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
v_measure : float
Harmonic mean of the first two.
See Also
--------
homogeneity_score : Homogeneity metric of cluster labeling.
completeness_score : Completeness metric of cluster labeling.
v_measure_score : V-Measure (NMI with arithmetic mean option).
Examples
--------
>>> from sklearn.metrics import homogeneity_completeness_v_measure
>>> y_true, y_pred = [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 2, 2]
>>> homogeneity_completeness_v_measure(y_true, y_pred)
(0.71, 0.771, 0.74)
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = _entropy(labels_true)
entropy_K = _entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (
(1 + beta)
* homogeneity
* completeness
/ (beta * homogeneity + completeness)
)
return float(homogeneity), float(completeness), float(v_measure_score)
@validate_params(
{
"labels_true": ["array-like"],
"labels_pred": ["array-like"],
},
prefer_skip_nested_validation=True,
)
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : array-like of shape (n_samples,)
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
Returns
-------
homogeneity : float
Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.
See Also
--------
completeness_score : Completeness metric of cluster labeling.
v_measure_score : V-Measure (NMI with arithmetic mean option).
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
1.000000
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
1.000000
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
@validate_params(
{
"labels_true": ["array-like"],
"labels_pred": ["array-like"],
},
prefer_skip_nested_validation=True,
)
def completeness_score(labels_true, labels_pred):
"""Compute completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : array-like of shape (n_samples,)
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
Returns
-------
completeness : float
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
See Also
--------
homogeneity_score : Homogeneity metric of cluster labeling.
v_measure_score : V-Measure (NMI with arithmetic mean option).
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
0.999
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
@validate_params(
{
"labels_true": ["array-like"],
"labels_pred": ["array-like"],
"beta": [Interval(Real, 0, None, closed="left")],
},
prefer_skip_nested_validation=True,
)
def v_measure_score(labels_true, labels_pred, *, beta=1.0):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score` with
the ``'arithmetic'`` option for averaging.
The V-measure is the harmonic mean between homogeneity and completeness::
v = (1 + beta) * homogeneity * completeness
/ (beta * homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : array-like of shape (n_samples,)
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
beta : float, default=1.0
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
Returns
-------
v_measure : float
Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.
See Also
--------
homogeneity_score : Homogeneity metric of cluster labeling.
completeness_score : Completeness metric of cluster labeling.
normalized_mutual_info_score : Normalized Mutual Information.
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete but not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
0.8
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
0.67
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harm completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
0.8
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
0.67
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred, beta=beta)[2]
@validate_params(
{
"labels_true": ["array-like", None],
"labels_pred": ["array-like", None],
"contingency": ["array-like", "sparse matrix", None],
},
prefer_skip_nested_validation=True,
)
def mutual_info_score(labels_true, labels_pred, *, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels
of the same data. Where :math:`|U_i|` is the number of the samples
in cluster :math:`U_i` and :math:`|V_j|` is the number of the
samples in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\\sum_{i=1}^{|U|} \\sum_{j=1}^{|V|} \\frac{|U_i\\cap V_j|}{N}
\\log\\frac{N|U_i \\cap V_j|}{|U_i||V_j|}
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching :math:`U` (i.e
``label_true``) with :math:`V` (i.e. ``label_pred``) will return the
same score value. This can be useful to measure the agreement of two
independent label assignments strategies on the same dataset when the
real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
A clustering of the data into disjoint subsets, called :math:`U` in
the above formula.
labels_pred : array-like of shape (n_samples,), dtype=integral
A clustering of the data into disjoint subsets, called :math:`V` in
the above formula.
contingency : {array-like, sparse matrix} of shape \
(n_classes_true, n_classes_pred), default=None
A contingency matrix given by the
:func:`~sklearn.metrics.cluster.contingency_matrix` function. If value
is ``None``, it will be computed, otherwise the given value is used,
with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value, measured in nats using the
natural logarithm.
See Also
--------
adjusted_mutual_info_score : Adjusted against chance Mutual Information.
normalized_mutual_info_score : Normalized Mutual Information.
Notes
-----
The logarithm used is the natural logarithm (base-e).
Examples
--------
>>> from sklearn.metrics import mutual_info_score
>>> labels_true = [0, 1, 1, 0, 1, 0]
>>> labels_pred = [0, 1, 0, 0, 1, 1]
>>> mutual_info_score(labels_true, labels_pred)
0.0566
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(
contingency,
accept_sparse=["csr", "csc", "coo"],
dtype=[int, np.int32, np.int64],
)
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
else:
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
# Since MI <= min(H(X), H(Y)), any labelling with zero entropy, i.e. containing a
# single cluster, implies MI = 0
if pi.size == 1 or pj.size == 1:
return 0.0
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx).astype(np.int64, copy=False) * pj.take(nzy).astype(
np.int64, copy=False
)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (
contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer
)
mi = np.where(np.abs(mi) < np.finfo(mi.dtype).eps, 0.0, mi)
return float(np.clip(mi.sum(), 0.0, None))
@validate_params(
{
"labels_true": ["array-like"],
"labels_pred": ["array-like"],
"average_method": [StrOptions({"arithmetic", "max", "min", "geometric"})],
},
prefer_skip_nested_validation=True,
)
def adjusted_mutual_info_score(
labels_true, labels_pred, *, average_method="arithmetic"
):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [avg(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching :math:`U` (``label_true``)
with :math:`V` (``labels_pred``) will return the same score value. This can
be useful to measure the agreement of two independent label assignments
strategies on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/cluster/_bicluster.py | sklearn/metrics/cluster/_bicluster.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.optimize import linear_sum_assignment
from sklearn.utils._param_validation import StrOptions, validate_params
from sklearn.utils.validation import check_array, check_consistent_length
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = (a_rows * b_rows).sum() * (a_cols * b_cols).sum()
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(
[
[similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)]
for i in range(n_a)
]
)
return result
@validate_params(
{
"a": [tuple],
"b": [tuple],
"similarity": [callable, StrOptions({"jaccard"})],
},
prefer_skip_nested_validation=True,
)
def consensus_score(a, b, *, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the best
matching between sets is found by solving a linear sum assignment problem,
using a modified Jonker-Volgenant algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : tuple (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : tuple (rows, columns)
Another set of biclusters like ``a``.
similarity : 'jaccard' or callable, default='jaccard'
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
Returns
-------
consensus_score : float
Consensus score, a non-negative value, sum of similarities
divided by size of larger set.
See Also
--------
scipy.optimize.linear_sum_assignment : Solve the linear sum assignment problem.
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
Examples
--------
>>> from sklearn.metrics import consensus_score
>>> a = ([[True, False], [False, True]], [[False, True], [True, False]])
>>> b = ([[False, True], [True, False]], [[True, False], [False, True]])
>>> consensus_score(a, b, similarity='jaccard')
1.0
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
row_indices, col_indices = linear_sum_assignment(1.0 - matrix)
n_a = len(a[0])
n_b = len(b[0])
return float(matrix[row_indices, col_indices].sum() / max(n_a, n_b))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/cluster/__init__.py | sklearn/metrics/cluster/__init__.py | """Evaluation metrics for cluster analysis results.
- Supervised evaluation uses a ground truth class values for each sample.
- Unsupervised evaluation does not use ground truths and measures the "quality" of the
model itself.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.metrics.cluster._bicluster import consensus_score
from sklearn.metrics.cluster._supervised import (
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
contingency_matrix,
entropy,
expected_mutual_information,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
v_measure_score,
)
from sklearn.metrics.cluster._unsupervised import (
calinski_harabasz_score,
davies_bouldin_score,
silhouette_samples,
silhouette_score,
)
__all__ = [
"adjusted_mutual_info_score",
"adjusted_rand_score",
"calinski_harabasz_score",
"completeness_score",
"consensus_score",
"contingency_matrix",
"davies_bouldin_score",
# TODO(1.10): Remove
"entropy",
"expected_mutual_information",
"fowlkes_mallows_score",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"mutual_info_score",
"normalized_mutual_info_score",
"pair_confusion_matrix",
"rand_score",
"silhouette_samples",
"silhouette_score",
"v_measure_score",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/cluster/_unsupervised.py | sklearn/metrics/cluster/_unsupervised.py | """Unsupervised evaluation metrics."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import functools
from numbers import Integral
import numpy as np
from scipy.sparse import issparse
from sklearn.externals.array_api_compat import is_numpy_array
from sklearn.metrics.pairwise import (
_VALID_METRICS,
pairwise_distances,
pairwise_distances_chunked,
)
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import _safe_indexing, check_random_state, check_X_y
from sklearn.utils._array_api import (
_average,
_convert_to_numpy,
_is_numpy_namespace,
_max_precision_float_dtype,
get_namespace_and_device,
xpx,
)
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
def check_number_of_labels(n_labels, n_samples):
"""Check that number of labels are valid.
Parameters
----------
n_labels : int
Number of labels.
n_samples : int
Number of samples.
"""
if not 1 < n_labels < n_samples:
raise ValueError(
"Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)"
% n_labels
)
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"labels": ["array-like"],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
"sample_size": [Interval(Integral, 1, None, closed="left"), None],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def silhouette_score(
X, labels, *, metric="euclidean", sample_size=None, random_state=None, **kwds
):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is ``2 <= n_labels <= n_samples - 1``.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_a, n_samples_a) if metric == \
"precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`~sklearn.metrics.pairwise_distances`. If ``X`` is
the distance array itself, use ``metric="precomputed"``.
sample_size : int, default=None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, default=None
Determines random number generation for selecting a subset of samples.
Used when ``sample_size is not None``.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> from sklearn.cluster import KMeans
>>> from sklearn.metrics import silhouette_score
>>> X, y = make_blobs(random_state=42)
>>> kmeans = KMeans(n_clusters=2, random_state=42)
>>> silhouette_score(X, kmeans.fit_predict(X))
0.49...
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=["csc", "csr"])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return float(np.mean(silhouette_samples(X, labels, metric=metric, **kwds)))
def _silhouette_reduce(D_chunk, start, labels, label_freqs):
"""Accumulate silhouette statistics for vertical chunk of X.
Parameters
----------
D_chunk : {array-like, sparse matrix} of shape (n_chunk_samples, n_samples)
Precomputed distances for a chunk. If a sparse matrix is provided,
only CSR format is accepted.
start : int
First index in the chunk.
labels : array-like of shape (n_samples,)
Corresponding cluster labels, encoded as {0, ..., n_clusters-1}.
label_freqs : array-like
Distribution of cluster labels in ``labels``.
"""
n_chunk_samples = D_chunk.shape[0]
# accumulate distances from each sample to each cluster
cluster_distances = np.zeros(
(n_chunk_samples, len(label_freqs)), dtype=D_chunk.dtype
)
if issparse(D_chunk):
if D_chunk.format != "csr":
raise TypeError(
"Expected CSR matrix. Please pass sparse matrix in CSR format."
)
for i in range(n_chunk_samples):
indptr = D_chunk.indptr
indices = D_chunk.indices[indptr[i] : indptr[i + 1]]
sample_weights = D_chunk.data[indptr[i] : indptr[i + 1]]
sample_labels = np.take(labels, indices)
cluster_distances[i] += np.bincount(
sample_labels, weights=sample_weights, minlength=len(label_freqs)
)
else:
for i in range(n_chunk_samples):
sample_weights = D_chunk[i]
sample_labels = labels
cluster_distances[i] += np.bincount(
sample_labels, weights=sample_weights, minlength=len(label_freqs)
)
# intra_index selects intra-cluster distances within cluster_distances
end = start + n_chunk_samples
intra_index = (np.arange(n_chunk_samples), labels[start:end])
# intra_cluster_distances are averaged over cluster size outside this function
intra_cluster_distances = cluster_distances[intra_index]
# of the remaining distances we normalise and extract the minimum
cluster_distances[intra_index] = np.inf
cluster_distances /= label_freqs
inter_cluster_distances = cluster_distances.min(axis=1)
return intra_cluster_distances, inter_cluster_distances
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"labels": ["array-like"],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
},
prefer_skip_nested_validation=True,
)
def silhouette_samples(X, labels, *, metric="euclidean", **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 ``<= n_labels <= n_samples - 1``.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_a, n_samples_a) if metric == \
"precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array. If
a sparse matrix is provided, CSR format should be favoured avoiding
an additional copy.
labels : array-like of shape (n_samples,)
Label values for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`~sklearn.metrics.pairwise_distances`.
If ``X`` is the distance array itself, use "precomputed" as the metric.
Precomputed distance matrices must have 0 along the diagonal.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array-like of shape (n_samples,)
Silhouette Coefficients for each sample.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
Examples
--------
>>> from sklearn.metrics import silhouette_samples
>>> from sklearn.datasets import make_blobs
>>> from sklearn.cluster import KMeans
>>> X, y = make_blobs(n_samples=50, random_state=42)
>>> kmeans = KMeans(n_clusters=3, random_state=42)
>>> labels = kmeans.fit_predict(X)
>>> silhouette_samples(X, labels)
array([...])
"""
X, labels = check_X_y(X, labels, accept_sparse=["csr"])
# Check for non-zero diagonal entries in precomputed distance matrix
if metric == "precomputed":
error_msg = ValueError(
"The precomputed distance matrix contains non-zero "
"elements on the diagonal. Use np.fill_diagonal(X, 0)."
)
if X.dtype.kind == "f":
atol = np.finfo(X.dtype).eps * 100
if np.any(np.abs(X.diagonal()) > atol):
raise error_msg
elif np.any(X.diagonal() != 0): # integral dtype
raise error_msg
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples = len(labels)
label_freqs = np.bincount(labels)
check_number_of_labels(len(le.classes_), n_samples)
kwds["metric"] = metric
reduce_func = functools.partial(
_silhouette_reduce, labels=labels, label_freqs=label_freqs
)
results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds))
intra_clust_dists, inter_clust_dists = results
intra_clust_dists = np.concatenate(intra_clust_dists)
inter_clust_dists = np.concatenate(inter_clust_dists)
denom = (label_freqs - 1).take(labels, mode="clip")
with np.errstate(divide="ignore", invalid="ignore"):
intra_clust_dists /= denom
sil_samples = inter_clust_dists - intra_clust_dists
with np.errstate(divide="ignore", invalid="ignore"):
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# nan values are for clusters of size 1, and should be 0
return xpx.nan_to_num(sil_samples)
@validate_params(
{
"X": ["array-like"],
"labels": ["array-like"],
},
prefer_skip_nested_validation=True,
)
def calinski_harabasz_score(X, labels):
"""Compute the Calinski and Harabasz score.
It is also known as the Variance Ratio Criterion.
The score is defined as ratio of the sum of between-cluster dispersion and
of within-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabasz_index>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabasz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<https://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> from sklearn.cluster import KMeans
>>> from sklearn.metrics import calinski_harabasz_score
>>> X, _ = make_blobs(random_state=0)
>>> kmeans = KMeans(n_clusters=3, random_state=0,).fit(X)
>>> calinski_harabasz_score(X, kmeans.labels_)
114.8...
"""
xp, _, device_ = get_namespace_and_device(X, labels)
if _is_numpy_namespace(xp) and not is_numpy_array(X):
# This is required to handle the case where `array_api_dispatch` is False but
# we are still dealing with `X` as a non-NumPy array e.g. a PyTorch tensor.
X = _convert_to_numpy(X, xp=xp)
else:
X = xp.astype(X, _max_precision_float_dtype(xp, device_), copy=False)
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = le.classes_.shape[0]
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0.0, 0.0
mean = xp.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = xp.mean(cluster_k, axis=0)
extra_disp += cluster_k.shape[0] * xp.sum((mean_k - mean) ** 2)
intra_disp += xp.sum((cluster_k - mean_k) ** 2)
return float(
1.0
if intra_disp == 0.0
else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.0))
)
@validate_params(
{
"X": ["array-like"],
"labels": ["array-like"],
},
prefer_skip_nested_validation=True,
)
def davies_bouldin_score(X, labels):
"""Compute the Davies-Bouldin score.
The score is defined as the average similarity measure of each cluster with
its most similar cluster, where similarity is the ratio of within-cluster
distances to between-cluster distances. Thus, clusters which are farther
apart and less dispersed will result in a better score.
The minimum score is zero, with lower values indicating better clustering.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] Davies, David L.; Bouldin, Donald W. (1979).
`"A Cluster Separation Measure"
<https://ieeexplore.ieee.org/document/4766909>`__.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
PAMI-1 (2): 224-227
Examples
--------
>>> from sklearn.metrics import davies_bouldin_score
>>> X = [[0, 1], [1, 1], [3, 4]]
>>> labels = [0, 0, 1]
>>> davies_bouldin_score(X, labels)
0.12...
"""
xp, _, device_ = get_namespace_and_device(X, labels)
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = le.classes_.shape[0]
check_number_of_labels(n_labels, n_samples)
dtype = _max_precision_float_dtype(xp, device_)
intra_dists = xp.zeros(n_labels, dtype=dtype, device=device_)
centroids = xp.zeros((n_labels, X.shape[1]), dtype=dtype, device=device_)
for k in range(n_labels):
cluster_k = _safe_indexing(X, xp.nonzero(labels == k)[0])
centroid = _average(cluster_k, axis=0, xp=xp)
centroids[k, ...] = centroid
intra_dists[k] = _average(
pairwise_distances(cluster_k, xp.stack([centroid])), xp=xp
)
centroid_distances = pairwise_distances(centroids)
zero = xp.asarray(0.0, device=device_, dtype=dtype)
if xp.all(xpx.isclose(intra_dists, zero)) or xp.all(
xpx.isclose(centroid_distances, zero)
):
return 0.0
centroid_distances[centroid_distances == 0] = xp.inf
combined_intra_dists = intra_dists[:, None] + intra_dists
scores = xp.max(combined_intra_dists / centroid_distances, axis=1)
return float(_average(scores, xp=xp))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/cluster/tests/test_bicluster.py | sklearn/metrics/cluster/tests/test_bicluster.py | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.metrics import consensus_score
from sklearn.metrics.cluster._bicluster import _jaccard
from sklearn.utils._testing import assert_almost_equal
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert _jaccard(a1, a1, a1, a1) == 1
assert _jaccard(a1, a1, a2, a2) == 0.25
assert _jaccard(a1, a1, a3, a3) == 1.0 / 7
assert _jaccard(a1, a1, a4, a4) == 0
def test_consensus_score():
a = [[True, True, False, False], [False, False, True, True]]
b = a[::-1]
assert consensus_score((a, a), (a, a)) == 1
assert consensus_score((a, a), (b, b)) == 1
assert consensus_score((a, b), (a, b)) == 1
assert consensus_score((a, b), (b, a)) == 1
assert consensus_score((a, a), (b, a)) == 0
assert consensus_score((a, a), (a, b)) == 0
assert consensus_score((b, b), (a, b)) == 0
assert consensus_score((b, b), (b, a)) == 0
def test_consensus_score_issue2445():
"""Different number of biclusters in A and B"""
a_rows = np.array(
[
[True, True, False, False],
[False, False, True, True],
[False, False, False, True],
]
)
a_cols = np.array(
[
[True, True, False, False],
[False, False, True, True],
[False, False, False, True],
]
)
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0 / 3.0)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/cluster/tests/test_common.py | sklearn/metrics/cluster/tests/test_common.py | from functools import partial
from itertools import chain
import numpy as np
import pytest
from sklearn.metrics.cluster import (
adjusted_mutual_info_score,
adjusted_rand_score,
calinski_harabasz_score,
completeness_score,
davies_bouldin_score,
fowlkes_mallows_score,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
rand_score,
silhouette_score,
v_measure_score,
)
from sklearn.metrics.tests.test_common import check_array_api_metric
from sklearn.utils._array_api import (
_get_namespace_device_dtype_ids,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._testing import assert_allclose
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
# - SUPERVISED_METRICS: all supervised cluster metrics - (when given a
# ground truth value)
# - UNSUPERVISED_METRICS: all unsupervised cluster metrics
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
SUPERVISED_METRICS = {
"adjusted_mutual_info_score": adjusted_mutual_info_score,
"adjusted_rand_score": adjusted_rand_score,
"rand_score": rand_score,
"completeness_score": completeness_score,
"homogeneity_score": homogeneity_score,
"mutual_info_score": mutual_info_score,
"normalized_mutual_info_score": normalized_mutual_info_score,
"v_measure_score": v_measure_score,
"fowlkes_mallows_score": fowlkes_mallows_score,
}
UNSUPERVISED_METRICS = {
"silhouette_score": silhouette_score,
"silhouette_manhattan": partial(silhouette_score, metric="manhattan"),
"calinski_harabasz_score": calinski_harabasz_score,
"davies_bouldin_score": davies_bouldin_score,
}
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics
# that are symmetric with respect to their input argument y_true and y_pred.
#
# --------------------------------------------------------------------
# Symmetric with respect to their input arguments y_true and y_pred.
# Symmetric metrics only apply to supervised clusters.
SYMMETRIC_METRICS = [
"adjusted_rand_score",
"rand_score",
"v_measure_score",
"mutual_info_score",
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"fowlkes_mallows_score",
]
NON_SYMMETRIC_METRICS = ["homogeneity_score", "completeness_score"]
# Metrics whose upper bound is 1
NORMALIZED_METRICS = [
"adjusted_rand_score",
"rand_score",
"homogeneity_score",
"completeness_score",
"v_measure_score",
"adjusted_mutual_info_score",
"fowlkes_mallows_score",
"normalized_mutual_info_score",
]
rng = np.random.RandomState(0)
y1 = rng.randint(3, size=30)
y2 = rng.randint(3, size=30)
def test_symmetric_non_symmetric_union():
assert sorted(SYMMETRIC_METRICS + NON_SYMMETRIC_METRICS) == sorted(
SUPERVISED_METRICS
)
@pytest.mark.parametrize(
"metric_name, y1, y2", [(name, y1, y2) for name in SYMMETRIC_METRICS]
)
def test_symmetry(metric_name, y1, y2):
metric = SUPERVISED_METRICS[metric_name]
assert metric(y1, y2) == pytest.approx(metric(y2, y1))
@pytest.mark.parametrize(
"metric_name, y1, y2", [(name, y1, y2) for name in NON_SYMMETRIC_METRICS]
)
def test_non_symmetry(metric_name, y1, y2):
metric = SUPERVISED_METRICS[metric_name]
assert metric(y1, y2) != pytest.approx(metric(y2, y1))
@pytest.mark.parametrize("metric_name", NORMALIZED_METRICS)
def test_normalized_output(metric_name):
upper_bound_1 = [0, 0, 0, 1, 1, 1]
upper_bound_2 = [0, 0, 0, 1, 1, 1]
metric = SUPERVISED_METRICS[metric_name]
assert metric([0, 0, 0, 1, 1], [0, 0, 0, 1, 2]) > 0.0
assert metric([0, 0, 1, 1, 2], [0, 0, 1, 1, 1]) > 0.0
assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0
assert metric([0, 0, 0, 1, 2], [0, 1, 1, 1, 1]) < 1.0
assert metric(upper_bound_1, upper_bound_2) == pytest.approx(1.0)
lower_bound_1 = [0, 0, 0, 0, 0, 0]
lower_bound_2 = [0, 1, 2, 3, 4, 5]
score = np.array(
[metric(lower_bound_1, lower_bound_2), metric(lower_bound_2, lower_bound_1)]
)
assert not (score < 0).any()
@pytest.mark.parametrize("metric_name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS))
def test_permute_labels(metric_name):
# All clustering metrics do not change score due to permutations of labels
# that is when 0 and 1 exchanged.
y_label = np.array([0, 0, 0, 1, 1, 0, 1])
y_pred = np.array([1, 0, 1, 0, 1, 1, 0])
if metric_name in SUPERVISED_METRICS:
metric = SUPERVISED_METRICS[metric_name]
score_1 = metric(y_pred, y_label)
assert_allclose(score_1, metric(1 - y_pred, y_label))
assert_allclose(score_1, metric(1 - y_pred, 1 - y_label))
assert_allclose(score_1, metric(y_pred, 1 - y_label))
else:
metric = UNSUPERVISED_METRICS[metric_name]
X = np.random.randint(10, size=(7, 10))
score_1 = metric(X, y_pred)
assert_allclose(score_1, metric(X, 1 - y_pred))
@pytest.mark.parametrize("metric_name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS))
# For all clustering metrics Input parameters can be both
# in the form of arrays lists, positive, negative or string
def test_format_invariance(metric_name):
y_true = [0, 0, 0, 0, 1, 1, 1, 1]
y_pred = [0, 1, 2, 3, 4, 5, 6, 7]
def generate_formats(y):
y = np.array(y)
yield y, "array of ints"
yield y.tolist(), "list of ints"
yield [str(x) + "-a" for x in y.tolist()], "list of strs"
yield (
np.array([str(x) + "-a" for x in y.tolist()], dtype=object),
"array of strs",
)
yield y - 1, "including negative ints"
yield y + 1, "strictly positive ints"
if metric_name in SUPERVISED_METRICS:
metric = SUPERVISED_METRICS[metric_name]
score_1 = metric(y_true, y_pred)
y_true_gen = generate_formats(y_true)
y_pred_gen = generate_formats(y_pred)
for (y_true_fmt, fmt_name), (y_pred_fmt, _) in zip(y_true_gen, y_pred_gen):
assert score_1 == metric(y_true_fmt, y_pred_fmt)
else:
metric = UNSUPERVISED_METRICS[metric_name]
X = np.random.randint(10, size=(8, 10))
score_1 = metric(X, y_true)
assert score_1 == metric(X.astype(float), y_true)
y_true_gen = generate_formats(y_true)
for y_true_fmt, fmt_name in y_true_gen:
assert score_1 == metric(X, y_true_fmt)
@pytest.mark.parametrize("metric", SUPERVISED_METRICS.values())
def test_single_sample(metric):
# only the supervised metrics support single sample
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
metric([i], [j])
@pytest.mark.parametrize(
"metric_name, metric_func", dict(SUPERVISED_METRICS, **UNSUPERVISED_METRICS).items()
)
def test_inf_nan_input(metric_name, metric_func):
if metric_name in SUPERVISED_METRICS:
invalids = [
([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf]),
]
else:
X = np.random.randint(10, size=(2, 10))
invalids = [(X, [np.inf, np.inf]), (X, [np.nan, np.nan]), (X, [np.nan, np.inf])]
with pytest.raises(ValueError, match=r"contains (NaN|infinity)"):
for args in invalids:
metric_func(*args)
@pytest.mark.parametrize("name", chain(SUPERVISED_METRICS, UNSUPERVISED_METRICS))
def test_returned_value_consistency(name):
"""Ensure that the returned values of all metrics are consistent.
It can only be a float. It should not be a numpy float64 or float32.
"""
rng = np.random.RandomState(0)
X = rng.randint(10, size=(20, 10))
labels_true = rng.randint(0, 3, size=(20,))
labels_pred = rng.randint(0, 3, size=(20,))
if name in SUPERVISED_METRICS:
metric = SUPERVISED_METRICS[name]
score = metric(labels_true, labels_pred)
else:
metric = UNSUPERVISED_METRICS[name]
score = metric(X, labels_pred)
assert isinstance(score, float)
assert not isinstance(score, (np.float64, np.float32))
def check_array_api_unsupervised_metric(metric, array_namespace, device, dtype_name):
y_pred = np.array([1, 0, 1, 0, 1, 1, 0])
X = np.random.randint(10, size=(7, 10))
check_array_api_metric(
metric,
array_namespace,
device,
dtype_name,
a_np=X,
b_np=y_pred,
)
array_api_metric_checkers = {
calinski_harabasz_score: [
check_array_api_unsupervised_metric,
],
davies_bouldin_score: [
check_array_api_unsupervised_metric,
],
}
def yield_metric_checker_combinations(metric_checkers=array_api_metric_checkers):
for metric, checkers in metric_checkers.items():
for checker in checkers:
yield metric, checker
@pytest.mark.parametrize(
"array_namespace, device, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize("metric, check_func", yield_metric_checker_combinations())
def test_array_api_compliance(metric, array_namespace, device, dtype_name, check_func):
check_func(metric, array_namespace, device, dtype_name)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/cluster/tests/test_unsupervised.py | sklearn/metrics/cluster/tests/test_unsupervised.py | import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy.sparse import issparse
from sklearn import datasets
from sklearn.metrics import pairwise_distances
from sklearn.metrics.cluster import (
calinski_harabasz_score,
davies_bouldin_score,
silhouette_samples,
silhouette_score,
)
from sklearn.metrics.cluster._unsupervised import _silhouette_reduce
from sklearn.utils._testing import assert_array_equal
from sklearn.utils.fixes import (
CSC_CONTAINERS,
CSR_CONTAINERS,
DOK_CONTAINERS,
LIL_CONTAINERS,
)
@pytest.mark.parametrize(
"sparse_container",
[None] + CSR_CONTAINERS + CSC_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS,
)
@pytest.mark.parametrize("sample_size", [None, "half"])
def test_silhouette(sparse_container, sample_size):
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X, y = dataset.data, dataset.target
if sparse_container is not None:
X = sparse_container(X)
sample_size = int(X.shape[0] / 2) if sample_size == "half" else sample_size
D = pairwise_distances(X, metric="euclidean")
# Given that the actual labels are used, we can assume that S would be positive.
score_precomputed = silhouette_score(
D, y, metric="precomputed", sample_size=sample_size, random_state=0
)
score_euclidean = silhouette_score(
X, y, metric="euclidean", sample_size=sample_size, random_state=0
)
assert score_precomputed > 0
assert score_euclidean > 0
assert score_precomputed == pytest.approx(score_euclidean)
def test_cluster_size_1():
# Assert Silhouette Coefficient == 0 when there is 1 sample in a cluster
# (cluster 0). We also test the case where there are identical samples
# as the only members of a cluster (cluster 2). To our knowledge, this case
# is not discussed in reference material, and we choose for it a sample
# score of 1.
X = [[0.0], [1.0], [1.0], [2.0], [3.0], [3.0]]
labels = np.array([0, 1, 1, 1, 2, 2])
# Cluster 0: 1 sample -> score of 0 by Rousseeuw's convention
# Cluster 1: intra-cluster = [.5, .5, 1]
# inter-cluster = [1, 1, 1]
# silhouette = [.5, .5, 0]
# Cluster 2: intra-cluster = [0, 0]
# inter-cluster = [arbitrary, arbitrary]
# silhouette = [1., 1.]
silhouette = silhouette_score(X, labels)
assert not np.isnan(silhouette)
ss = silhouette_samples(X, labels)
assert_array_equal(ss, [0, 0.5, 0.5, 0, 1, 1])
def test_silhouette_paper_example():
# Explicitly check per-sample results against Rousseeuw (1987)
# Data from Table 1
lower = [
5.58,
7.00,
6.50,
7.08,
7.00,
3.83,
4.83,
5.08,
8.17,
5.83,
2.17,
5.75,
6.67,
6.92,
4.92,
6.42,
5.00,
5.58,
6.00,
4.67,
6.42,
3.42,
5.50,
6.42,
6.42,
5.00,
3.92,
6.17,
2.50,
4.92,
6.25,
7.33,
4.50,
2.25,
6.33,
2.75,
6.08,
6.67,
4.25,
2.67,
6.00,
6.17,
6.17,
6.92,
6.17,
5.25,
6.83,
4.50,
3.75,
5.75,
5.42,
6.08,
5.83,
6.67,
3.67,
4.75,
3.00,
6.08,
6.67,
5.00,
5.58,
4.83,
6.17,
5.67,
6.50,
6.92,
]
D = np.zeros((12, 12))
D[np.tril_indices(12, -1)] = lower
D += D.T
names = [
"BEL",
"BRA",
"CHI",
"CUB",
"EGY",
"FRA",
"IND",
"ISR",
"USA",
"USS",
"YUG",
"ZAI",
]
# Data from Figure 2
labels1 = [1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 2, 1]
expected1 = {
"USA": 0.43,
"BEL": 0.39,
"FRA": 0.35,
"ISR": 0.30,
"BRA": 0.22,
"EGY": 0.20,
"ZAI": 0.19,
"CUB": 0.40,
"USS": 0.34,
"CHI": 0.33,
"YUG": 0.26,
"IND": -0.04,
}
score1 = 0.28
# Data from Figure 3
labels2 = [1, 2, 3, 3, 1, 1, 2, 1, 1, 3, 3, 2]
expected2 = {
"USA": 0.47,
"FRA": 0.44,
"BEL": 0.42,
"ISR": 0.37,
"EGY": 0.02,
"ZAI": 0.28,
"BRA": 0.25,
"IND": 0.17,
"CUB": 0.48,
"USS": 0.44,
"YUG": 0.31,
"CHI": 0.31,
}
score2 = 0.33
for labels, expected, score in [
(labels1, expected1, score1),
(labels2, expected2, score2),
]:
expected = [expected[name] for name in names]
# we check to 2dp because that's what's in the paper
pytest.approx(
expected,
silhouette_samples(D, np.array(labels), metric="precomputed"),
abs=1e-2,
)
pytest.approx(
score, silhouette_score(D, np.array(labels), metric="precomputed"), abs=1e-2
)
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
err_msg = (
r"Number of labels is %d\. Valid values are 2 "
r"to n_samples - 1 \(inclusive\)" % len(np.unique(y))
)
with pytest.raises(ValueError, match=err_msg):
silhouette_score(X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
err_msg = (
r"Number of labels is %d\. Valid values are 2 "
r"to n_samples - 1 \(inclusive\)" % len(np.unique(y))
)
with pytest.raises(ValueError, match=err_msg):
silhouette_score(X, y)
def test_non_encoded_labels():
dataset = datasets.load_iris()
X = dataset.data
labels = dataset.target
assert silhouette_score(X, labels * 2 + 10) == silhouette_score(X, labels)
assert_array_equal(
silhouette_samples(X, labels * 2 + 10), silhouette_samples(X, labels)
)
def test_non_numpy_labels():
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
assert silhouette_score(list(X), list(y)) == silhouette_score(X, y)
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
def test_silhouette_nonzero_diag(dtype):
# Make sure silhouette_samples requires diagonal to be zero.
# Non-regression test for #12178
# Construct a zero-diagonal matrix
dists = pairwise_distances(
np.array([[0.2, 0.1, 0.12, 1.34, 1.11, 1.6]], dtype=dtype).T
)
labels = [0, 0, 0, 1, 1, 1]
# small values on the diagonal are OK
dists[2][2] = np.finfo(dists.dtype).eps * 10
silhouette_samples(dists, labels, metric="precomputed")
# values bigger than eps * 100 are not
dists[2][2] = np.finfo(dists.dtype).eps * 1000
with pytest.raises(ValueError, match="contains non-zero"):
silhouette_samples(dists, labels, metric="precomputed")
@pytest.mark.parametrize(
"sparse_container",
CSC_CONTAINERS + CSR_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS,
)
def test_silhouette_samples_precomputed_sparse(sparse_container):
"""Check that silhouette_samples works for sparse matrices correctly."""
X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T
y = [0, 0, 0, 0, 1, 1, 1, 1]
pdist_dense = pairwise_distances(X)
pdist_sparse = sparse_container(pdist_dense)
assert issparse(pdist_sparse)
output_with_sparse_input = silhouette_samples(pdist_sparse, y, metric="precomputed")
output_with_dense_input = silhouette_samples(pdist_dense, y, metric="precomputed")
assert_allclose(output_with_sparse_input, output_with_dense_input)
@pytest.mark.parametrize(
"sparse_container",
CSC_CONTAINERS + CSR_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS,
)
def test_silhouette_samples_euclidean_sparse(sparse_container):
"""Check that silhouette_samples works for sparse matrices correctly."""
X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T
y = [0, 0, 0, 0, 1, 1, 1, 1]
pdist_dense = pairwise_distances(X)
pdist_sparse = sparse_container(pdist_dense)
assert issparse(pdist_sparse)
output_with_sparse_input = silhouette_samples(pdist_sparse, y)
output_with_dense_input = silhouette_samples(pdist_dense, y)
assert_allclose(output_with_sparse_input, output_with_dense_input)
@pytest.mark.parametrize(
"sparse_container", CSC_CONTAINERS + DOK_CONTAINERS + LIL_CONTAINERS
)
def test_silhouette_reduce(sparse_container):
"""Check for non-CSR input to private method `_silhouette_reduce`."""
X = np.array([[0.2, 0.1, 0.1, 0.2, 0.1, 1.6, 0.2, 0.1]], dtype=np.float32).T
pdist_dense = pairwise_distances(X)
pdist_sparse = sparse_container(pdist_dense)
y = [0, 0, 0, 0, 1, 1, 1, 1]
label_freqs = np.bincount(y)
with pytest.raises(
TypeError,
match="Expected CSR matrix. Please pass sparse matrix in CSR format.",
):
_silhouette_reduce(pdist_sparse, start=0, labels=y, label_freqs=label_freqs)
def assert_raises_on_only_one_label(func):
"""Assert message when there is only one label"""
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match="Number of labels is"):
func(rng.rand(10, 2), np.zeros(10))
def assert_raises_on_all_points_same_cluster(func):
"""Assert message when all point are in different clusters"""
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match="Number of labels is"):
func(rng.rand(10, 2), np.arange(10))
def test_calinski_harabasz_score():
assert_raises_on_only_one_label(calinski_harabasz_score)
assert_raises_on_all_points_same_cluster(calinski_harabasz_score)
# Assert the value is 1. when all samples are equals
assert 1.0 == calinski_harabasz_score(np.ones((10, 2)), [0] * 5 + [1] * 5)
# Assert the value is 0. when all the mean cluster are equal
assert 0.0 == calinski_harabasz_score([[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10)
# General case (with non numpy arrays)
X = (
[[0, 0], [1, 1]] * 5
+ [[3, 3], [4, 4]] * 5
+ [[0, 4], [1, 3]] * 5
+ [[3, 1], [4, 0]] * 5
)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
pytest.approx(calinski_harabasz_score(X, labels), 45 * (40 - 4) / (5 * (4 - 1)))
def test_davies_bouldin_score():
assert_raises_on_only_one_label(davies_bouldin_score)
assert_raises_on_all_points_same_cluster(davies_bouldin_score)
# Assert the value is 0. when all samples are equals
assert davies_bouldin_score(np.ones((10, 2)), [0] * 5 + [1] * 5) == pytest.approx(
0.0
)
# Assert the value is 0. when all the mean cluster are equal
assert davies_bouldin_score(
[[-1, -1], [1, 1]] * 10, [0] * 10 + [1] * 10
) == pytest.approx(0.0)
# General case (with non numpy arrays)
X = (
[[0, 0], [1, 1]] * 5
+ [[3, 3], [4, 4]] * 5
+ [[0, 4], [1, 3]] * 5
+ [[3, 1], [4, 0]] * 5
)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
pytest.approx(davies_bouldin_score(X, labels), 2 * np.sqrt(0.5) / 3)
# Ensure divide by zero warning is not raised in general case
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
davies_bouldin_score(X, labels)
# General case - cluster have one sample
X = [[0, 0], [2, 2], [3, 3], [5, 5]]
labels = [0, 0, 1, 2]
pytest.approx(davies_bouldin_score(X, labels), (5.0 / 4) / 3)
def test_silhouette_score_integer_precomputed():
"""Check that silhouette_score works for precomputed metrics that are integers.
Non-regression test for #22107.
"""
result = silhouette_score(
[[0, 1, 2], [1, 0, 1], [2, 1, 0]], [0, 0, 1], metric="precomputed"
)
assert result == pytest.approx(1 / 6)
# non-zero on diagonal for ints raises an error
with pytest.raises(ValueError, match="contains non-zero"):
silhouette_score(
[[1, 1, 2], [1, 0, 1], [2, 1, 0]], [0, 0, 1], metric="precomputed"
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/cluster/tests/test_supervised.py | sklearn/metrics/cluster/tests/test_supervised.py | import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
from sklearn.base import config_context
from sklearn.metrics.cluster import (
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
contingency_matrix,
expected_mutual_information,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
v_measure_score,
)
from sklearn.metrics.cluster._supervised import (
_entropy,
_generalized_average,
check_clusterings,
entropy,
)
from sklearn.utils import assert_all_finite
from sklearn.utils._array_api import (
_get_namespace_device_dtype_ids,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._testing import _array_api_for_tests, assert_almost_equal
score_funcs = [
adjusted_rand_score,
rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
@pytest.mark.parametrize("score_func", score_funcs)
def test_error_messages_on_wrong_input(score_func):
expected = r"Found input variables with inconsistent numbers of samples: \[2, 3\]"
with pytest.raises(ValueError, match=expected):
score_func([0, 1], [1, 1, 1])
expected = r"labels_true must be 1D: shape is \(2"
with pytest.raises(ValueError, match=expected):
score_func([[0, 1], [1, 0]], [1, 1, 1])
expected = r"labels_pred must be 1D: shape is \(2"
with pytest.raises(ValueError, match=expected):
score_func([0, 1, 0], [[1, 1], [0, 0]])
def test_generalized_average():
a, b = 1, 2
methods = ["min", "geometric", "arithmetic", "max"]
means = [_generalized_average(a, b, method) for method in methods]
assert means[0] <= means[1] <= means[2] <= means[3]
c, d = 12, 12
means = [_generalized_average(c, d, method) for method in methods]
assert means[0] == means[1] == means[2] == means[3]
@pytest.mark.parametrize("score_func", score_funcs)
def test_perfect_matches(score_func):
assert score_func([], []) == pytest.approx(1.0)
assert score_func([0], [1]) == pytest.approx(1.0)
assert score_func([0, 0, 0], [0, 0, 0]) == pytest.approx(1.0)
assert score_func([0, 1, 0], [42, 7, 42]) == pytest.approx(1.0)
assert score_func([0.0, 1.0, 0.0], [42.0, 7.0, 42.0]) == pytest.approx(1.0)
assert score_func([0.0, 1.0, 2.0], [42.0, 7.0, 2.0]) == pytest.approx(1.0)
assert score_func([0, 1, 2], [42, 7, 2]) == pytest.approx(1.0)
@pytest.mark.parametrize(
"score_func",
[
normalized_mutual_info_score,
adjusted_mutual_info_score,
],
)
@pytest.mark.parametrize("average_method", ["min", "geometric", "arithmetic", "max"])
def test_perfect_matches_with_changing_means(score_func, average_method):
assert score_func([], [], average_method=average_method) == pytest.approx(1.0)
assert score_func([0], [1], average_method=average_method) == pytest.approx(1.0)
assert score_func(
[0, 0, 0], [0, 0, 0], average_method=average_method
) == pytest.approx(1.0)
assert score_func(
[0, 1, 0], [42, 7, 42], average_method=average_method
) == pytest.approx(1.0)
assert score_func(
[0.0, 1.0, 0.0], [42.0, 7.0, 42.0], average_method=average_method
) == pytest.approx(1.0)
assert score_func(
[0.0, 1.0, 2.0], [42.0, 7.0, 2.0], average_method=average_method
) == pytest.approx(1.0)
assert score_func(
[0, 1, 2], [42, 7, 2], average_method=average_method
) == pytest.approx(1.0)
# Non-regression tests for: https://github.com/scikit-learn/scikit-learn/issues/30950
assert score_func([0, 1], [0, 1], average_method=average_method) == pytest.approx(
1.0
)
assert score_func(
[0, 1, 2, 3], [0, 1, 2, 3], average_method=average_method
) == pytest.approx(1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure([0, 0, 1, 1, 2, 2], [0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_beta_parameter():
# test for when beta passed to
# homogeneity_completeness_v_measure
# and v_measure_score
beta_test = 0.2
h_test = 0.67
c_test = 0.42
v_test = (1 + beta_test) * h_test * c_test / (beta_test * h_test + c_test)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2], beta=beta_test
)
assert_almost_equal(h, h_test, 2)
assert_almost_equal(c, c_test, 2)
assert_almost_equal(v, v_test, 2)
v = v_measure_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2], beta=beta_test)
assert_almost_equal(v, v_test, 2)
def test_non_consecutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 2, 2, 2], [0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
ri_1 = rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ri_2 = rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ri_1, 0.66, 2)
assert_almost_equal(ri_2, 0.66, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10, seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs
)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# with provided sparse contingency
C = contingency_matrix(labels_a, labels_b, sparse=True)
mi = mutual_info_score(labels_a, labels_b, contingency=C)
assert_almost_equal(mi, 0.41022, 5)
# with provided dense contingency
C = contingency_matrix(labels_a, labels_b)
mi = mutual_info_score(labels_a, labels_b, contingency=C)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
n_samples = C.sum()
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27821, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert ami == pytest.approx(1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
assert_almost_equal(ami, 0.38, 2)
def test_expected_mutual_info_overflow():
# Test for regression where contingency cell exceeds 2**16
# leading to overflow in np.outer, resulting in EMI > 1
assert expected_mutual_information(np.array([[70000]]), 70000) <= 1
def test_int_overflow_mutual_info_fowlkes_mallows_score():
# Test overflow in mutual_info_classif and fowlkes_mallows_score
x = np.array(
[1] * (52632 + 2529)
+ [2] * (14660 + 793)
+ [3] * (3271 + 204)
+ [4] * (814 + 39)
+ [5] * (316 + 20)
)
y = np.array(
[0] * 52632
+ [1] * 2529
+ [0] * 14660
+ [1] * 793
+ [0] * 3271
+ [1] * 204
+ [0] * 814
+ [1] * 39
+ [0] * 316
+ [1] * 20
)
assert_all_finite(mutual_info_score(x, y))
assert_all_finite(fowlkes_mallows_score(x, y))
# TODO(1.10): Remove
def test_public_entropy_deprecation():
with pytest.warns(FutureWarning, match="Function entropy is deprecated"):
entropy([0, 0, 42.0])
def test_entropy():
assert_almost_equal(_entropy([0, 0, 42.0]), 0.6365141, 5)
assert_almost_equal(_entropy([]), 1)
assert _entropy([1, 1, 1, 1]) == 0
@pytest.mark.parametrize(
"array_namespace, device, dtype_name",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_entropy_array_api(array_namespace, device, dtype_name):
xp = _array_api_for_tests(array_namespace, device)
float_labels = xp.asarray(np.asarray([0, 0, 42.0], dtype=dtype_name), device=device)
empty_int32_labels = xp.asarray([], dtype=xp.int32, device=device)
int_labels = xp.asarray([1, 1, 1, 1], device=device)
with config_context(array_api_dispatch=True):
assert _entropy(float_labels) == pytest.approx(0.6365141, abs=1e-5)
assert _entropy(empty_int32_labels) == 1
assert _entropy(int_labels) == 0
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b, bins=(np.arange(1, 5), np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=0.1)
assert_array_almost_equal(C, C2 + 0.1)
def test_contingency_matrix_sparse():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C_sparse = contingency_matrix(labels_a, labels_b, sparse=True).toarray()
assert_array_almost_equal(C, C_sparse)
with pytest.raises(ValueError, match="Cannot set 'eps' when sparse=True"):
contingency_matrix(labels_a, labels_b, eps=1e-10, sparse=True)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(int):
labels_a, labels_b = (np.ones(i, dtype=int), np.arange(i, dtype=int))
assert normalized_mutual_info_score(labels_a, labels_b) == pytest.approx(0.0)
assert v_measure_score(labels_a, labels_b) == pytest.approx(0.0)
assert adjusted_mutual_info_score(labels_a, labels_b) == 0.0
assert normalized_mutual_info_score(labels_a, labels_b) == pytest.approx(0.0)
for method in ["min", "geometric", "arithmetic", "max"]:
assert (
adjusted_mutual_info_score(labels_a, labels_b, average_method=method)
== 0.0
)
assert normalized_mutual_info_score(
labels_a, labels_b, average_method=method
) == pytest.approx(0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = (
random_state.randint(0, 10, i),
random_state.randint(0, 10, i),
)
assert_almost_equal(
v_measure_score(labels_a, labels_b),
2.0
* mutual_info_score(labels_a, labels_b)
/ (_entropy(labels_a) + _entropy(labels_b)),
0,
)
avg = "arithmetic"
assert_almost_equal(
v_measure_score(labels_a, labels_b),
normalized_mutual_info_score(labels_a, labels_b, average_method=avg),
)
def test_fowlkes_mallows_score():
# General case
score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2])
assert_almost_equal(score, 4.0 / np.sqrt(12.0 * 6.0))
# Perfect match but where the label names changed
perfect_score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0])
assert_almost_equal(perfect_score, 1.0)
# Worst case
worst_score = fowlkes_mallows_score([0, 0, 0, 0, 0, 0], [0, 1, 2, 3, 4, 5])
assert_almost_equal(worst_score, 0.0)
def test_fowlkes_mallows_score_properties():
# handcrafted example
labels_a = np.array([0, 0, 0, 1, 1, 2])
labels_b = np.array([1, 1, 2, 2, 0, 0])
expected = 1.0 / np.sqrt((1.0 + 3.0) * (1.0 + 2.0))
# FMI = TP / sqrt((TP + FP) * (TP + FN))
score_original = fowlkes_mallows_score(labels_a, labels_b)
assert_almost_equal(score_original, expected)
# symmetric property
score_symmetric = fowlkes_mallows_score(labels_b, labels_a)
assert_almost_equal(score_symmetric, expected)
# permutation property
score_permuted = fowlkes_mallows_score((labels_a + 1) % 3, labels_b)
assert_almost_equal(score_permuted, expected)
# symmetric and permutation(both together)
score_both = fowlkes_mallows_score(labels_b, (labels_a + 2) % 3)
assert_almost_equal(score_both, expected)
@pytest.mark.parametrize(
"labels_true, labels_pred",
[
(["a"] * 6, [1, 1, 0, 0, 1, 1]),
([1] * 6, [1, 1, 0, 0, 1, 1]),
([1, 1, 0, 0, 1, 1], ["a"] * 6),
([1, 1, 0, 0, 1, 1], [1] * 6),
(["a"] * 6, ["a"] * 6),
],
)
def test_mutual_info_score_positive_constant_label(labels_true, labels_pred):
# Check that MI = 0 when one or both labelling are constant
# non-regression test for #16355
assert mutual_info_score(labels_true, labels_pred) == 0
def test_check_clustering_error():
# Test warning message for continuous values
rng = np.random.RandomState(42)
noise = rng.rand(500)
wavelength = np.linspace(0.01, 1, 500) * 1e-6
msg = (
"Clustering metrics expects discrete values but received "
"continuous values for label, and continuous values for "
"target"
)
with pytest.warns(UserWarning, match=msg):
check_clusterings(wavelength, noise)
def test_pair_confusion_matrix_fully_dispersed():
# edge case: every element is its own cluster
N = 100
clustering1 = list(range(N))
clustering2 = clustering1
expected = np.array([[N * (N - 1), 0], [0, 0]])
assert_array_equal(pair_confusion_matrix(clustering1, clustering2), expected)
def test_pair_confusion_matrix_single_cluster():
# edge case: only one cluster
N = 100
clustering1 = np.zeros((N,))
clustering2 = clustering1
expected = np.array([[0, 0], [0, N * (N - 1)]])
assert_array_equal(pair_confusion_matrix(clustering1, clustering2), expected)
def test_pair_confusion_matrix():
# regular case: different non-trivial clusterings
n = 10
N = n**2
clustering1 = np.hstack([[i + 1] * n for i in range(n)])
clustering2 = np.hstack([[i + 1] * (n + 1) for i in range(n)])[:N]
# basic quadratic implementation
expected = np.zeros(shape=(2, 2), dtype=np.int64)
for i in range(len(clustering1)):
for j in range(len(clustering2)):
if i != j:
same_cluster_1 = int(clustering1[i] == clustering1[j])
same_cluster_2 = int(clustering2[i] == clustering2[j])
expected[same_cluster_1, same_cluster_2] += 1
assert_array_equal(pair_confusion_matrix(clustering1, clustering2), expected)
@pytest.mark.parametrize(
"clustering1, clustering2",
[(list(range(100)), list(range(100))), (np.zeros((100,)), np.zeros((100,)))],
)
def test_rand_score_edge_cases(clustering1, clustering2):
# edge case 1: every element is its own cluster
# edge case 2: only one cluster
assert_allclose(rand_score(clustering1, clustering2), 1.0)
def test_rand_score():
# regular case: different non-trivial clusterings
clustering1 = [0, 0, 0, 1, 1, 1]
clustering2 = [0, 1, 0, 1, 2, 2]
# pair confusion matrix
D11 = 2 * 2 # ordered pairs (1, 3), (5, 6)
D10 = 2 * 4 # ordered pairs (1, 2), (2, 3), (4, 5), (4, 6)
D01 = 2 * 1 # ordered pair (2, 4)
D00 = 5 * 6 - D11 - D01 - D10 # the remaining pairs
# rand score
expected_numerator = D00 + D11
expected_denominator = D00 + D01 + D10 + D11
expected = expected_numerator / expected_denominator
assert_allclose(rand_score(clustering1, clustering2), expected)
def test_adjusted_rand_score_overflow():
"""Check that large amount of data will not lead to overflow in
`adjusted_rand_score`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20305
"""
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, 100_000, dtype=np.int8)
y_pred = rng.randint(0, 2, 100_000, dtype=np.int8)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
adjusted_rand_score(y_true, y_pred)
@pytest.mark.parametrize("average_method", ["min", "arithmetic", "geometric", "max"])
def test_normalized_mutual_info_score_bounded(average_method):
"""Check that nmi returns a score between 0 (included) and 1 (excluded
for non-perfect match)
Non-regression test for issue #13836
"""
labels1 = [0] * 469
labels2 = [1] + labels1[1:]
labels3 = [0, 1] + labels1[2:]
# labels1 is constant. The mutual info between labels1 and any other labelling is 0.
nmi = normalized_mutual_info_score(labels1, labels2, average_method=average_method)
assert nmi == 0
# non constant, non perfect matching labels
nmi = normalized_mutual_info_score(labels2, labels3, average_method=average_method)
assert 0 <= nmi < 1
# TODO(1.9): remove
@pytest.mark.parametrize("sparse", [True, False])
def test_fowlkes_mallows_sparse_deprecated(sparse):
"""Check deprecation warning for 'sparse' parameter of fowlkes_mallows_score."""
with pytest.warns(
FutureWarning, match="The 'sparse' parameter was deprecated in 1.7"
):
fowlkes_mallows_score([0, 1], [1, 1], sparse=sparse)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/cluster/tests/__init__.py | sklearn/metrics/cluster/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py | sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import abstractmethod
from typing import List
import numpy as np
from scipy.sparse import issparse
from sklearn import get_config
from sklearn.metrics._dist_metrics import BOOL_METRICS, METRIC_MAPPING64, DistanceMetric
from sklearn.metrics._pairwise_distances_reduction._argkmin import ArgKmin32, ArgKmin64
from sklearn.metrics._pairwise_distances_reduction._argkmin_classmode import (
ArgKminClassMode32,
ArgKminClassMode64,
)
from sklearn.metrics._pairwise_distances_reduction._base import (
_sqeuclidean_row_norms32,
_sqeuclidean_row_norms64,
)
from sklearn.metrics._pairwise_distances_reduction._radius_neighbors import (
RadiusNeighbors32,
RadiusNeighbors64,
)
from sklearn.metrics._pairwise_distances_reduction._radius_neighbors_classmode import (
RadiusNeighborsClassMode32,
RadiusNeighborsClassMode64,
)
def sqeuclidean_row_norms(X, num_threads):
"""Compute the squared euclidean norm of the rows of X in parallel.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples, n_features)
Input data. Must be c-contiguous.
num_threads : int
The number of OpenMP threads to use.
Returns
-------
sqeuclidean_row_norms : ndarray of shape (n_samples,)
Arrays containing the squared euclidean norm of each row of X.
"""
if X.dtype == np.float64:
return np.asarray(_sqeuclidean_row_norms64(X, num_threads))
if X.dtype == np.float32:
return np.asarray(_sqeuclidean_row_norms32(X, num_threads))
raise ValueError(
"Only float64 or float32 datasets are supported at this time, "
f"got: X.dtype={X.dtype}."
)
class BaseDistancesReductionDispatcher:
"""Abstract base dispatcher for pairwise distance computation & reduction.
Each dispatcher extending the base :class:`BaseDistancesReductionDispatcher`
dispatcher must implement the :meth:`compute` classmethod.
"""
@classmethod
def valid_metrics(cls) -> List[str]:
excluded = {
# PyFunc cannot be supported because it necessitates interacting with
# the CPython interpreter to call user defined functions.
"pyfunc",
"mahalanobis", # is numerically unstable
# In order to support discrete distance metrics, we need to have a
# stable simultaneous sort which preserves the order of the indices
# because there generally is a lot of occurrences for a given values
# of distances in this case.
# TODO: implement a stable simultaneous_sort.
"hamming",
*BOOL_METRICS,
}
return sorted(({"sqeuclidean"} | set(METRIC_MAPPING64.keys())) - excluded)
@classmethod
def is_usable_for(cls, X, Y, metric) -> bool:
"""Return True if the dispatcher can be used for the
given parameters.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)
Input data.
Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features)
Input data.
metric : str, default='euclidean'
The distance metric to use.
For a list of available metrics, see the documentation of
:class:`~sklearn.metrics.DistanceMetric`.
Returns
-------
True if the dispatcher can be used, else False.
"""
# FIXME: the current Cython implementation is too slow for a large number of
# features. We temporarily disable it to fallback on SciPy's implementation.
# See: https://github.com/scikit-learn/scikit-learn/issues/28191
if (
issparse(X)
and issparse(Y)
and isinstance(metric, str)
and "euclidean" in metric
):
return False
def is_numpy_c_ordered(X):
return hasattr(X, "flags") and getattr(X.flags, "c_contiguous", False)
def is_valid_sparse_matrix(X):
return (
issparse(X)
and X.format == "csr"
and
# TODO: support CSR matrices without non-zeros elements
X.nnz > 0
and
# TODO: support CSR matrices with int64 indices and indptr
# See: https://github.com/scikit-learn/scikit-learn/issues/23653
X.indices.dtype == X.indptr.dtype == np.int32
)
is_usable = (
get_config().get("enable_cython_pairwise_dist", True)
and (is_numpy_c_ordered(X) or is_valid_sparse_matrix(X))
and (is_numpy_c_ordered(Y) or is_valid_sparse_matrix(Y))
and X.dtype == Y.dtype
and X.dtype in (np.float32, np.float64)
and (metric in cls.valid_metrics() or isinstance(metric, DistanceMetric))
)
return is_usable
@classmethod
@abstractmethod
def compute(
cls,
X,
Y,
**kwargs,
):
"""Compute the reduction.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
Input data.
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
Input data.
**kwargs : additional parameters for the reduction
Notes
-----
This method is an abstract class method: it has to be implemented
for all subclasses.
"""
class ArgKmin(BaseDistancesReductionDispatcher):
"""Compute the argkmin of row vectors of X on the ones of Y.
For each row vector of X, computes the indices of k first the rows
vectors of Y with the smallest distances.
ArgKmin is typically used to perform
bruteforce k-nearest neighbors queries.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def compute(
cls,
X,
Y,
k,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
return_distance=False,
):
"""Compute the argkmin reduction.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
Input data.
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
Input data.
k : int
The k for the argkmin reduction.
metric : str, default='euclidean'
The distance metric to use for argkmin.
For a list of available metrics, see the documentation of
:class:`~sklearn.metrics.DistanceMetric`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
return_distance : boolean, default=False
Return distances between each X vector and its
argkmin if set to True.
Returns
-------
If return_distance=False:
- argkmin_indices : ndarray of shape (n_samples_X, k)
Indices of the argkmin for each vector in X.
If return_distance=True:
- argkmin_distances : ndarray of shape (n_samples_X, k)
Distances to the argkmin for each vector in X.
- argkmin_indices : ndarray of shape (n_samples_X, k)
Indices of the argkmin for each vector in X.
Notes
-----
This classmethod inspects the arguments values to dispatch to the
dtype-specialized implementation of :class:`ArgKmin`.
This allows decoupling the API entirely from the implementation details
whilst maintaining RAII: all temporarily allocated datastructures necessary
for the concrete implementation are therefore freed when this classmethod
returns.
"""
if X.dtype == Y.dtype == np.float64:
return ArgKmin64.compute(
X=X,
Y=Y,
k=k,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
return_distance=return_distance,
)
if X.dtype == Y.dtype == np.float32:
return ArgKmin32.compute(
X=X,
Y=Y,
k=k,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
return_distance=return_distance,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
class RadiusNeighbors(BaseDistancesReductionDispatcher):
"""Compute radius-based neighbors for two sets of vectors.
For each row-vector X[i] of the queries X, find all the indices j of
row-vectors in Y such that:
dist(X[i], Y[j]) <= radius
The distance function `dist` depends on the values of the `metric`
and `metric_kwargs` parameters.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def compute(
cls,
X,
Y,
radius,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
return_distance=False,
sort_results=False,
):
"""Return the results of the reduction for the given arguments.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples_X, n_features)
Input data.
Y : ndarray or CSR matrix of shape (n_samples_Y, n_features)
Input data.
radius : float
The radius defining the neighborhood.
metric : str, default='euclidean'
The distance metric to use.
For a list of available metrics, see the documentation of
:class:`~sklearn.metrics.DistanceMetric`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
despite the synchronization step at each iteration of the outer loop
on chunks of `X`.
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
return_distance : boolean, default=False
Return distances between each X vector and its neighbors if set to True.
sort_results : boolean, default=False
Sort results with respect to distances between each X vector and its
neighbors if set to True.
Returns
-------
If return_distance=False:
- neighbors_indices : ndarray of n_samples_X ndarray
Indices of the neighbors for each vector in X.
If return_distance=True:
- neighbors_indices : ndarray of n_samples_X ndarray
Indices of the neighbors for each vector in X.
- neighbors_distances : ndarray of n_samples_X ndarray
Distances to the neighbors for each vector in X.
Notes
-----
This classmethod inspects the arguments values to dispatch to the
dtype-specialized implementation of :class:`RadiusNeighbors`.
This allows decoupling the API entirely from the implementation details
whilst maintaining RAII: all temporarily allocated datastructures necessary
for the concrete implementation are therefore freed when this classmethod
returns.
"""
if X.dtype == Y.dtype == np.float64:
return RadiusNeighbors64.compute(
X=X,
Y=Y,
radius=radius,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
sort_results=sort_results,
return_distance=return_distance,
)
if X.dtype == Y.dtype == np.float32:
return RadiusNeighbors32.compute(
X=X,
Y=Y,
radius=radius,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
sort_results=sort_results,
return_distance=return_distance,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
class ArgKminClassMode(BaseDistancesReductionDispatcher):
"""Compute the argkmin of row vectors of X on the ones of Y with labels.
For each row vector of X, computes the indices of k first the rows
vectors of Y with the smallest distances. Computes weighted mode of labels.
ArgKminClassMode is typically used to perform bruteforce k-nearest neighbors
queries when the weighted mode of the labels for the k-nearest neighbors
are required, such as in `predict` methods.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def valid_metrics(cls) -> List[str]:
excluded = {
# Euclidean is technically usable for ArgKminClassMode
# but its current implementation would not be competitive.
# TODO: implement Euclidean specialization using GEMM.
"euclidean",
"sqeuclidean",
}
return list(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded)
@classmethod
def compute(
cls,
X,
Y,
k,
weights,
Y_labels,
unique_Y_labels,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
):
"""Compute the argkmin reduction.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
The input array to be labelled.
Y : ndarray of shape (n_samples_Y, n_features)
The input array whose class membership are provided through the
`Y_labels` parameter.
k : int
The number of nearest neighbors to consider.
weights : ndarray
The weights applied over the `Y_labels` of `Y` when computing the
weighted mode of the labels.
Y_labels : ndarray
An array containing the index of the class membership of the
associated samples in `Y`. This is used in labeling `X`.
unique_Y_labels : ndarray
An array containing all unique indices contained in the
corresponding `Y_labels` array.
metric : str, default='euclidean'
The distance metric to use. For a list of available metrics, see
the documentation of :class:`~sklearn.metrics.DistanceMetric`.
Currently does not support `'precomputed'`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
despite the synchronization step at each iteration of the outer loop
on chunks of `X`.
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
Returns
-------
probabilities : ndarray of shape (n_samples_X, n_classes)
An array containing the class probabilities for each sample.
Notes
-----
This classmethod is responsible for introspecting the arguments
values to dispatch to the most appropriate implementation of
:class:`PairwiseDistancesArgKmin`.
This allows decoupling the API entirely from the implementation details
whilst maintaining RAII: all temporarily allocated datastructures necessary
for the concrete implementation are therefore freed when this classmethod
returns.
"""
if weights not in {"uniform", "distance"}:
raise ValueError(
"Only the 'uniform' or 'distance' weights options are supported"
f" at this time. Got: {weights=}."
)
if X.dtype == Y.dtype == np.float64:
return ArgKminClassMode64.compute(
X=X,
Y=Y,
k=k,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
if X.dtype == Y.dtype == np.float32:
return ArgKminClassMode32.compute(
X=X,
Y=Y,
k=k,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
class RadiusNeighborsClassMode(BaseDistancesReductionDispatcher):
"""Compute radius-based class modes of row vectors of X using the
those of Y.
For each row-vector X[i] of the queries X, find all the indices j of
row-vectors in Y such that:
dist(X[i], Y[j]) <= radius
RadiusNeighborsClassMode is typically used to perform bruteforce
radius neighbors queries when the weighted mode of the labels for
the nearest neighbors within the specified radius are required,
such as in `predict` methods.
This class is not meant to be instantiated, one should only use
its :meth:`compute` classmethod which handles allocation and
deallocation consistently.
"""
@classmethod
def valid_metrics(cls) -> List[str]:
excluded = {
# Euclidean is technically usable for RadiusNeighborsClassMode
# but it would not be competitive.
# TODO: implement Euclidean specialization using GEMM.
"euclidean",
"sqeuclidean",
}
return sorted(set(BaseDistancesReductionDispatcher.valid_metrics()) - excluded)
@classmethod
def compute(
cls,
X,
Y,
radius,
weights,
Y_labels,
unique_Y_labels,
outlier_label,
metric="euclidean",
chunk_size=None,
metric_kwargs=None,
strategy=None,
):
"""Return the results of the reduction for the given arguments.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
The input array to be labelled.
Y : ndarray of shape (n_samples_Y, n_features)
The input array whose class membership is provided through
the `Y_labels` parameter.
radius : float
The radius defining the neighborhood.
weights : ndarray
The weights applied to the `Y_labels` when computing the
weighted mode of the labels.
Y_labels : ndarray
An array containing the index of the class membership of the
associated samples in `Y`. This is used in labeling `X`.
unique_Y_labels : ndarray
An array containing all unique class labels.
outlier_label : int, default=None
Label for outlier samples (samples with no neighbors in given
radius). In the default case when the value is None if any
outlier is detected, a ValueError will be raised. The outlier
label should be selected from among the unique 'Y' labels. If
it is specified with a different value a warning will be raised
and all class probabilities of outliers will be assigned to be 0.
metric : str, default='euclidean'
The distance metric to use. For a list of available metrics, see
the documentation of :class:`~sklearn.metrics.DistanceMetric`.
Currently does not support `'precomputed'`.
chunk_size : int, default=None,
The number of vectors per chunk. If None (default) looks-up in
scikit-learn configuration for `pairwise_dist_chunk_size`,
and use 256 if it is not set.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
strategy : str, {'auto', 'parallel_on_X', 'parallel_on_Y'}, default=None
The chunking strategy defining which dataset parallelization are made on.
For both strategies the computations happens with two nested loops,
respectively on chunks of X and chunks of Y.
Strategies differs on which loop (outer or inner) is made to run
in parallel with the Cython `prange` construct:
- 'parallel_on_X' dispatches chunks of X uniformly on threads.
Each thread then iterates on all the chunks of Y. This strategy is
embarrassingly parallel and comes with no datastructures
synchronisation.
- 'parallel_on_Y' dispatches chunks of Y uniformly on threads.
Each thread processes all the chunks of X in turn. This strategy is
a sequence of embarrassingly parallel subtasks (the inner loop on Y
chunks) with intermediate datastructures synchronisation at each
iteration of the sequential outer loop on X chunks.
- 'auto' relies on a simple heuristic to choose between
'parallel_on_X' and 'parallel_on_Y': when `X.shape[0]` is large enough,
'parallel_on_X' is usually the most efficient strategy.
When `X.shape[0]` is small but `Y.shape[0]` is large, 'parallel_on_Y'
brings more opportunity for parallelism and is therefore more efficient
despite the synchronization step at each iteration of the outer loop
on chunks of `X`.
- None (default) looks-up in scikit-learn configuration for
`pairwise_dist_parallel_strategy`, and use 'auto' if it is not set.
Returns
-------
probabilities : ndarray of shape (n_samples_X, n_classes)
An array containing the class probabilities for each sample.
"""
if weights not in {"uniform", "distance"}:
raise ValueError(
"Only the 'uniform' or 'distance' weights options are supported"
f" at this time. Got: {weights=}."
)
if X.dtype == Y.dtype == np.float64:
return RadiusNeighborsClassMode64.compute(
X=X,
Y=Y,
radius=radius,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
outlier_label=outlier_label,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
if X.dtype == Y.dtype == np.float32:
return RadiusNeighborsClassMode32.compute(
X=X,
Y=Y,
radius=radius,
weights=weights,
Y_labels=np.array(Y_labels, dtype=np.intp),
unique_Y_labels=np.array(unique_Y_labels, dtype=np.intp),
outlier_label=outlier_label,
metric=metric,
chunk_size=chunk_size,
metric_kwargs=metric_kwargs,
strategy=strategy,
)
raise ValueError(
"Only float64 or float32 datasets pairs are supported at this time, "
f"got: X.dtype={X.dtype} and Y.dtype={Y.dtype}."
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/metrics/_pairwise_distances_reduction/__init__.py | sklearn/metrics/_pairwise_distances_reduction/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
#
# Pairwise Distances Reductions
# =============================
#
# Overview
# --------
#
# This module provides routines to compute pairwise distances between a set
# of row vectors of X and another set of row vectors of Y and apply a
# reduction on top. The canonical example is the brute-force computation
# of the top k nearest neighbors by leveraging the arg-k-min reduction.
#
# The reduction takes a matrix of pairwise distances between rows of X and Y
# as input and outputs an aggregate data-structure for each row of X. The
# aggregate values are typically smaller than the number of rows in Y, hence
# the term reduction.
#
# For computational reasons, the reduction are performed on the fly on chunks
# of rows of X and Y so as to keep intermediate data-structures in CPU cache
# and avoid unnecessary round trips of large distance arrays with the RAM
# that would otherwise severely degrade the speed by making the overall
# processing memory-bound.
#
# Finally, the routines follow a generic parallelization template to process
# chunks of data with OpenMP loops (via Cython prange), either on rows of X
# or rows of Y depending on their respective sizes.
#
#
# Dispatching to specialized implementations
# ------------------------------------------
#
# Dispatchers are meant to be used in the Python code. Under the hood, a
# dispatcher must only define the logic to choose at runtime to the correct
# dtype-specialized :class:`BaseDistancesReductionDispatcher` implementation based
# on the dtype of X and of Y.
#
#
# High-level diagram
# ------------------
#
# Legend:
#
# A ---⊳ B: A inherits from B
# A ---x B: A dispatches to B
#
#
# (base dispatcher)
# BaseDistancesReductionDispatcher
# ∆
# |
# |
# +------------------+---------------+---------------+------------------+
# | | | |
# | (dispatcher) (dispatcher) |
# | ArgKmin RadiusNeighbors |
# | | | |
# | | | |
# | | (float{32,64} implem.) | |
# | | BaseDistancesReduction{32,64} | |
# | | ∆ | |
# (dispatcher) | | | (dispatcher)
# ArgKminClassMode | | | RadiusNeighborsClassMode
# | | +----------+----------+ | |
# | | | | | |
# | | | | | |
# | x | | x |
# | +-------⊳ ArgKmin{32,64} RadiusNeighbors{32,64} ⊲---+ |
# x | | ∆ ∆ | | x
# ArgKminClassMode{32,64} | | | | RadiusNeighborsClassMode{32,64}
# ===================================== Specializations ============================================
# | | | |
# | | | |
# x | | x
# EuclideanArgKmin{32,64} EuclideanRadiusNeighbors{32,64}
#
#
# For instance :class:`ArgKmin` dispatches to:
# - :class:`ArgKmin64` if X and Y are two `float64` array-likes
# - :class:`ArgKmin32` if X and Y are two `float32` array-likes
#
# In addition, if the metric parameter is set to "euclidean" or "sqeuclidean",
# then some direct subclass of `BaseDistancesReduction{32,64}` further dispatches
# to one of their subclass for euclidean-specialized implementation. For instance,
# :class:`ArgKmin64` dispatches to :class:`EuclideanArgKmin64`.
#
# Those Euclidean-specialized implementations relies on optimal implementations of
# a decomposition of the squared euclidean distance matrix into a sum of three terms
# (see :class:`MiddleTermComputer{32,64}`).
#
from sklearn.metrics._pairwise_distances_reduction._dispatcher import (
ArgKmin,
ArgKminClassMode,
BaseDistancesReductionDispatcher,
RadiusNeighbors,
RadiusNeighborsClassMode,
sqeuclidean_row_norms,
)
__all__ = [
"ArgKmin",
"ArgKminClassMode",
"BaseDistancesReductionDispatcher",
"RadiusNeighbors",
"RadiusNeighborsClassMode",
"sqeuclidean_row_norms",
]
# ruff: noqa: E501
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_build_utils/version.py | sklearn/_build_utils/version.py | #!/usr/bin/env python3
"""Extract version number from __init__.py"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import os
sklearn_init = os.path.join(os.path.dirname(__file__), "../__init__.py")
data = open(sklearn_init).readlines()
version_line = next(line for line in data if line.startswith("__version__"))
version = version_line.strip().split(" = ")[1].replace('"', "").replace("'", "")
print(version)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_build_utils/tempita.py | sklearn/_build_utils/tempita.py | #!/usr/bin/env python3
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
from Cython import Tempita as tempita
# XXX: If this import ever fails (does it really?), vendor either
# cython.tempita or numpy/npy_tempita.
def process_tempita(fromfile, outfile=None):
"""Process tempita templated file and write out the result.
The template file is expected to end in `.c.tp` or `.pyx.tp`:
E.g. processing `template.c.in` generates `template.c`.
"""
with open(fromfile, "r", encoding="utf-8") as f:
template_content = f.read()
template = tempita.Template(template_content)
content = template.substitute()
with open(outfile, "w", encoding="utf-8") as f:
f.write(content)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infile", type=str, help="Path to the input file")
parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory")
parser.add_argument(
"-i",
"--ignore",
type=str,
help=(
"An ignored input - may be useful to add a "
"dependency between custom targets"
),
)
args = parser.parse_args()
if not args.infile.endswith(".tp"):
raise ValueError(f"Unexpected extension: {args.infile}")
if not args.outdir:
raise ValueError("Missing `--outdir` argument to tempita.py")
outdir_abs = os.path.join(os.getcwd(), args.outdir)
outfile = os.path.join(
outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]
)
process_tempita(args.infile, outfile)
if __name__ == "__main__":
main()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_build_utils/__init__.py | sklearn/_build_utils/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/semi_supervised/_label_propagation.py | sklearn/semi_supervised/_label_propagation.py | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semi-supervised classification algorithms. At a high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset given
label assignments over an initial subset. In one variant, the algorithm does
not allow for any errors in the initial assignment (hard-clamping) while
in another variant, the algorithm allows for some wiggle room for the initial
assignments, allowing them to change by a fraction alpha in each iteration
(soft-clamping).
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supports RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
import numpy as np
from scipy import sparse
from sklearn.base import BaseEstimator, ClassifierMixin, _fit_context
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.fixes import laplacian as csgraph_laplacian
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_is_fitted, validate_data
class BaseLabelPropagation(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'} or callable, default='rbf'
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape (n_samples, n_features),
and return a (n_samples, n_samples) shaped weight matrix.
gamma : float, default=20
Parameter for rbf kernel.
n_neighbors : int, default=7
Parameter for knn kernel. Need to be strictly positive.
alpha : float, default=1.0
Clamping factor.
max_iter : int, default=30
Change maximum number of iterations allowed.
tol : float, default=1e-3
Convergence tolerance: threshold to consider the system at steady
state.
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
"""
_parameter_constraints: dict = {
"kernel": [StrOptions({"knn", "rbf"}), callable],
"gamma": [Interval(Real, 0, None, closed="left")],
"n_neighbors": [Interval(Integral, 0, None, closed="neither")],
"alpha": [None, Interval(Real, 0, 1, closed="neither")],
"max_iter": [Interval(Integral, 0, None, closed="neither")],
"tol": [Interval(Real, 0, None, closed="left")],
"n_jobs": [None, Integral],
}
def __init__(
self,
kernel="rbf",
*,
gamma=20,
n_neighbors=7,
alpha=1,
max_iter=30,
tol=1e-3,
n_jobs=None,
):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs
).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(
self.nn_fit._fit_X, self.n_neighbors, mode="connectivity"
)
else:
return self.nn_fit.kneighbors(y, return_distance=False)
elif callable(self.kernel):
if y is None:
return self.kernel(X, X)
else:
return self.kernel(X, y)
@abstractmethod
def _build_graph(self):
raise NotImplementedError(
"Graph construction must be implemented to fit a label propagation model."
)
def predict(self, X):
"""Perform inductive inference across the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
y : ndarray of shape (n_samples,)
Predictions for input data.
"""
# Note: since `predict` does not accept semi-supervised labels as input,
# `fit(X, y).predict(X) != fit(X, y).transduction_`.
# Hence, `fit_predict` is not implemented.
# See https://github.com/scikit-learn/scikit-learn/pull/24898
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
probabilities : ndarray of shape (n_samples, n_classes)
Normalized probability distributions across
class labels.
"""
check_is_fitted(self)
X_2d = validate_data(
self,
X,
accept_sparse=["csc", "csr", "coo", "dok", "bsr", "lil", "dia"],
reset=False,
)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == "knn":
probabilities = np.array(
[
np.sum(self.label_distributions_[weight_matrix], axis=0)
for weight_matrix in weight_matrices
]
)
else:
weight_matrices = weight_matrices.T
probabilities = safe_sparse_dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit a semi-supervised label propagation model to X.
The input samples (labeled and unlabeled) are provided by matrix X,
and target labels are provided by matrix y. We conventionally apply the
label -1 to unlabeled samples in matrix y in a semi-supervised
classification.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Target class values with unlabeled points marked as -1.
All unlabeled samples will be transductively assigned labels
internally, which are stored in `transduction_`.
Returns
-------
self : object
Returns the instance itself.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc"],
reset=True,
)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = classes[classes != -1]
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self._variant == "propagation":
# LabelPropagation
y_static[unlabeled] = 0
else:
# LabelSpreading
y_static *= 1 - self.alpha
l_previous = np.zeros((self.X_.shape[0], n_classes))
unlabeled = unlabeled[:, np.newaxis]
if sparse.issparse(graph_matrix):
graph_matrix = graph_matrix.tocsr()
for self.n_iter_ in range(self.max_iter):
if np.abs(self.label_distributions_ - l_previous).sum() < self.tol:
break
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_
)
if self._variant == "propagation":
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
normalizer[normalizer == 0] = 1
self.label_distributions_ /= normalizer
self.label_distributions_ = np.where(
unlabeled, self.label_distributions_, y_static
)
else:
# clamp
self.label_distributions_ = (
np.multiply(self.alpha, self.label_distributions_) + y_static
)
else:
warnings.warn(
"max_iter=%d was reached without convergence." % self.max_iter,
category=ConvergenceWarning,
)
self.n_iter_ += 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
normalizer[normalizer == 0] = 1
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_, axis=1)]
self.transduction_ = transduction.ravel()
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'} or callable, default='rbf'
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape (n_samples, n_features),
and return a (n_samples, n_samples) shaped weight matrix.
gamma : float, default=20
Parameter for rbf kernel.
n_neighbors : int, default=7
Parameter for knn kernel which need to be strictly positive.
max_iter : int, default=1000
Change maximum number of iterations allowed.
tol : float, default=1e-3
Convergence tolerance: threshold to consider the system at steady
state.
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
X_ : {array-like, sparse matrix} of shape (n_samples, n_features)
Input array.
classes_ : ndarray of shape (n_classes,)
The distinct labels used in classifying instances.
label_distributions_ : ndarray of shape (n_samples, n_classes)
Categorical distribution for each item.
transduction_ : ndarray of shape (n_samples)
Label assigned to each item during :term:`fit`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations run.
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise.
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
LabelPropagation(...)
"""
_variant = "propagation"
_parameter_constraints: dict = {**BaseLabelPropagation._parameter_constraints}
_parameter_constraints.pop("alpha")
def __init__(
self,
kernel="rbf",
*,
gamma=20,
n_neighbors=7,
max_iter=1000,
tol=1e-3,
n_jobs=None,
):
super().__init__(
kernel=kernel,
gamma=gamma,
n_neighbors=n_neighbors,
max_iter=max_iter,
tol=tol,
n_jobs=n_jobs,
alpha=None,
)
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample."""
if self.kernel == "knn":
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=1)
# handle spmatrix (make normalizer 1D)
if sparse.isspmatrix(affinity_matrix):
normalizer = np.ravel(normalizer)
# TODO: when SciPy 1.12+ is min dependence, replace up to ---- with:
# affinity_matrix /= normalizer[:, np.newaxis]
if sparse.issparse(affinity_matrix):
inv_normalizer = sparse.diags(1.0 / normalizer)
affinity_matrix = inv_normalizer @ affinity_matrix
else: # Dense affinity_matrix
affinity_matrix /= normalizer[:, np.newaxis]
# ----
return affinity_matrix
def fit(self, X, y):
"""Fit a semi-supervised label propagation model to X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Target class values with unlabeled points marked as -1.
All unlabeled samples will be transductively assigned labels
internally, which are stored in `transduction_`.
Returns
-------
self : object
Returns the instance itself.
"""
return super().fit(X, y)
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning.
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'} or callable, default='rbf'
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape (n_samples, n_features),
and return a (n_samples, n_samples) shaped weight matrix.
gamma : float, default=20
Parameter for rbf kernel.
n_neighbors : int, default=7
Parameter for knn kernel which is a strictly positive integer.
alpha : float, default=0.2
Clamping factor. A value in (0, 1) that specifies the relative amount
that an instance should adopt the information from its neighbors as
opposed to its initial label.
alpha=0 means keeping the initial label information; alpha=1 means
replacing all initial information.
max_iter : int, default=30
Maximum number of iterations allowed.
tol : float, default=1e-3
Convergence tolerance: threshold to consider the system at steady
state.
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
X_ : ndarray of shape (n_samples, n_features)
Input array.
classes_ : ndarray of shape (n_classes,)
The distinct labels used in classifying instances.
label_distributions_ : ndarray of shape (n_samples, n_classes)
Categorical distribution for each item.
transduction_ : ndarray of shape (n_samples,)
Label assigned to each item during :term:`fit`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations run.
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning.
References
----------
`Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
<https://citeseerx.ist.psu.edu/doc_view/pid/d74c37aabf2d5cae663007cbd8718175466aea8c>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
LabelSpreading(...)
"""
_variant = "spreading"
_parameter_constraints: dict = {**BaseLabelPropagation._parameter_constraints}
_parameter_constraints["alpha"] = [Interval(Real, 0, 1, closed="neither")]
def __init__(
self,
kernel="rbf",
*,
gamma=20,
n_neighbors=7,
alpha=0.2,
max_iter=30,
tol=1e-3,
n_jobs=None,
):
# this one has different base parameters
super().__init__(
kernel=kernel,
gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha,
max_iter=max_iter,
tol=tol,
n_jobs=n_jobs,
)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == "knn":
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = csgraph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.issparse(laplacian):
diag_mask = laplacian.row == laplacian.col
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[:: n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/semi_supervised/_self_training.py | sklearn/semi_supervised/_self_training.py | import warnings
from numbers import Integral, Real
import numpy as np
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
MetaEstimatorMixin,
_fit_context,
clone,
)
from sklearn.utils import Bunch, get_tags, safe_mask
from sklearn.utils._param_validation import HasMethods, Interval, StrOptions
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils.metaestimators import available_if
from sklearn.utils.validation import _estimator_has, check_is_fitted, validate_data
__all__ = ["SelfTrainingClassifier"]
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
class SelfTrainingClassifier(ClassifierMixin, MetaEstimatorMixin, BaseEstimator):
"""Self-training classifier.
This :term:`metaestimator` allows a given supervised classifier to function as a
semi-supervised classifier, allowing it to learn from unlabeled data. It
does this by iteratively predicting pseudo-labels for the unlabeled data
and adding them to the training set.
The classifier will continue iterating until either max_iter is reached, or
no pseudo-labels were added to the training set in the previous iteration.
Read more in the :ref:`User Guide <self_training>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and `predict_proba`.
Invoking the `fit` method will fit a clone of the passed estimator,
which will be stored in the `estimator_` attribute.
.. versionadded:: 1.6
`estimator` was added to replace `base_estimator`.
threshold : float, default=0.75
The decision threshold for use with `criterion='threshold'`.
Should be in [0, 1). When using the `'threshold'` criterion, a
:ref:`well calibrated classifier <calibration>` should be used.
criterion : {'threshold', 'k_best'}, default='threshold'
The selection criterion used to select which labels to add to the
training set. If `'threshold'`, pseudo-labels with prediction
probabilities above `threshold` are added to the dataset. If `'k_best'`,
the `k_best` pseudo-labels with highest prediction probabilities are
added to the dataset. When using the 'threshold' criterion, a
:ref:`well calibrated classifier <calibration>` should be used.
k_best : int, default=10
The amount of samples to add in each iteration. Only used when
`criterion='k_best'`.
max_iter : int or None, default=10
Maximum number of iterations allowed. Should be greater than or equal
to 0. If it is `None`, the classifier will continue to predict labels
until no new pseudo-labels are added, or all unlabeled samples have
been labeled.
verbose : bool, default=False
Enable verbose output.
Attributes
----------
estimator_ : estimator object
The fitted estimator.
classes_ : ndarray or list of ndarray of shape (n_classes,)
Class labels for each output. (Taken from the trained
`estimator_`).
transduction_ : ndarray of shape (n_samples,)
The labels used for the final fit of the classifier, including
pseudo-labels added during fit.
labeled_iter_ : ndarray of shape (n_samples,)
The iteration in which each sample was labeled. When a sample has
iteration 0, the sample was already labeled in the original dataset.
When a sample has iteration -1, the sample was not labeled in any
iteration.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The number of rounds of self-training, that is the number of times the
base estimator is fitted on relabeled variants of the training set.
termination_condition_ : {'max_iter', 'no_change', 'all_labeled'}
The reason that fitting was stopped.
- `'max_iter'`: `n_iter_` reached `max_iter`.
- `'no_change'`: no new labels were predicted.
- `'all_labeled'`: all unlabeled samples were labeled before `max_iter`
was reached.
See Also
--------
LabelPropagation : Label propagation classifier.
LabelSpreading : Label spreading model for semi-supervised learning.
References
----------
:doi:`David Yarowsky. 1995. Unsupervised word sense disambiguation rivaling
supervised methods. In Proceedings of the 33rd annual meeting on
Association for Computational Linguistics (ACL '95). Association for
Computational Linguistics, Stroudsburg, PA, USA, 189-196.
<10.3115/981658.981684>`
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import SelfTrainingClassifier
>>> from sklearn.svm import SVC
>>> rng = np.random.RandomState(42)
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = rng.rand(iris.target.shape[0]) < 0.3
>>> iris.target[random_unlabeled_points] = -1
>>> svc = SVC(probability=True, gamma="auto")
>>> self_training_model = SelfTrainingClassifier(svc)
>>> self_training_model.fit(iris.data, iris.target)
SelfTrainingClassifier(...)
"""
_parameter_constraints: dict = {
# We don't require `predic_proba` here to allow passing a meta-estimator
# that only exposes `predict_proba` after fitting.
"estimator": [HasMethods(["fit"])],
"threshold": [Interval(Real, 0.0, 1.0, closed="left")],
"criterion": [StrOptions({"threshold", "k_best"})],
"k_best": [Interval(Integral, 1, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left"), None],
"verbose": ["verbose"],
}
def __init__(
self,
estimator=None,
threshold=0.75,
criterion="threshold",
k_best=10,
max_iter=10,
verbose=False,
):
self.estimator = estimator
self.threshold = threshold
self.criterion = criterion
self.k_best = k_best
self.max_iter = max_iter
self.verbose = verbose
def _get_estimator(self):
"""Get the estimator.
Returns
-------
estimator_ : estimator object
The cloned estimator object.
"""
return clone(self.estimator)
@_fit_context(
# SelfTrainingClassifier.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, **params):
"""
Fit self-training classifier using `X`, `y` as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
y : {array-like, sparse matrix} of shape (n_samples,)
Array representing the labels. Unlabeled samples should have the
label -1.
**params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_params(params, self, "fit")
self.estimator_ = self._get_estimator()
# we need row slicing support for sparse matrices, but costly finiteness check
# can be delegated to the base estimator.
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc", "lil", "dok"],
ensure_all_finite=False,
)
if y.dtype.kind in ["U", "S"]:
raise ValueError(
"y has dtype string. If you wish to predict on "
"string targets, use dtype object, and use -1"
" as the label for unlabeled samples."
)
has_label = y != -1
if np.all(has_label):
warnings.warn("y contains no unlabeled samples", UserWarning)
if self.criterion == "k_best" and (
self.k_best > X.shape[0] - np.sum(has_label)
):
warnings.warn(
(
"k_best is larger than the amount of unlabeled "
"samples. All unlabeled samples will be labeled in "
"the first iteration"
),
UserWarning,
)
if _routing_enabled():
routed_params = process_routing(self, "fit", **params)
else:
routed_params = Bunch(estimator=Bunch(fit={}))
self.transduction_ = np.copy(y)
self.labeled_iter_ = np.full_like(y, -1)
self.labeled_iter_[has_label] = 0
self.n_iter_ = 0
while not np.all(has_label) and (
self.max_iter is None or self.n_iter_ < self.max_iter
):
self.n_iter_ += 1
self.estimator_.fit(
X[safe_mask(X, has_label)],
self.transduction_[has_label],
**routed_params.estimator.fit,
)
# Predict on the unlabeled samples
prob = self.estimator_.predict_proba(X[safe_mask(X, ~has_label)])
pred = self.estimator_.classes_[np.argmax(prob, axis=1)]
max_proba = np.max(prob, axis=1)
# Select new labeled samples
if self.criterion == "threshold":
selected = max_proba > self.threshold
else:
n_to_select = min(self.k_best, max_proba.shape[0])
if n_to_select == max_proba.shape[0]:
selected = np.ones_like(max_proba, dtype=bool)
else:
# NB these are indices, not a mask
selected = np.argpartition(-max_proba, n_to_select)[:n_to_select]
# Map selected indices into original array
selected_full = np.nonzero(~has_label)[0][selected]
# Add newly labeled confident predictions to the dataset
self.transduction_[selected_full] = pred[selected]
has_label[selected_full] = True
self.labeled_iter_[selected_full] = self.n_iter_
if selected_full.shape[0] == 0:
# no changed labels
self.termination_condition_ = "no_change"
break
if self.verbose:
print(
f"End of iteration {self.n_iter_},"
f" added {selected_full.shape[0]} new labels."
)
if self.n_iter_ == self.max_iter:
self.termination_condition_ = "max_iter"
if np.all(has_label):
self.termination_condition_ = "all_labeled"
self.estimator_.fit(
X[safe_mask(X, has_label)],
self.transduction_[has_label],
**routed_params.estimator.fit,
)
self.classes_ = self.estimator_.classes_
return self
@available_if(_estimator_has("predict"))
def predict(self, X, **params):
"""Predict the classes of `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's ``predict`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples,)
Array with predicted labels.
"""
check_is_fitted(self)
_raise_for_params(params, self, "predict")
if _routing_enabled():
# metadata routing is enabled.
routed_params = process_routing(self, "predict", **params)
else:
routed_params = Bunch(estimator=Bunch(predict={}))
X = validate_data(
self,
X,
accept_sparse=True,
ensure_all_finite=False,
reset=False,
)
return self.estimator_.predict(X, **routed_params.estimator.predict)
@available_if(_estimator_has("predict_proba"))
def predict_proba(self, X, **params):
"""Predict probability for each possible outcome.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's
``predict_proba`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Array with prediction probabilities.
"""
check_is_fitted(self)
_raise_for_params(params, self, "predict_proba")
if _routing_enabled():
# metadata routing is enabled.
routed_params = process_routing(self, "predict_proba", **params)
else:
routed_params = Bunch(estimator=Bunch(predict_proba={}))
X = validate_data(
self,
X,
accept_sparse=True,
ensure_all_finite=False,
reset=False,
)
return self.estimator_.predict_proba(X, **routed_params.estimator.predict_proba)
@available_if(_estimator_has("decision_function"))
def decision_function(self, X, **params):
"""Call decision function of the `estimator`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's
``decision_function`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Result of the decision function of the `estimator`.
"""
check_is_fitted(self)
_raise_for_params(params, self, "decision_function")
if _routing_enabled():
# metadata routing is enabled.
routed_params = process_routing(self, "decision_function", **params)
else:
routed_params = Bunch(estimator=Bunch(decision_function={}))
X = validate_data(
self,
X,
accept_sparse=True,
ensure_all_finite=False,
reset=False,
)
return self.estimator_.decision_function(
X, **routed_params.estimator.decision_function
)
@available_if(_estimator_has("predict_log_proba"))
def predict_log_proba(self, X, **params):
"""Predict log probability for each possible outcome.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
**params : dict of str -> object
Parameters to pass to the underlying estimator's
``predict_log_proba`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y : ndarray of shape (n_samples, n_features)
Array with log prediction probabilities.
"""
check_is_fitted(self)
_raise_for_params(params, self, "predict_log_proba")
if _routing_enabled():
# metadata routing is enabled.
routed_params = process_routing(self, "predict_log_proba", **params)
else:
routed_params = Bunch(estimator=Bunch(predict_log_proba={}))
X = validate_data(
self,
X,
accept_sparse=True,
ensure_all_finite=False,
reset=False,
)
return self.estimator_.predict_log_proba(
X, **routed_params.estimator.predict_log_proba
)
@available_if(_estimator_has("score"))
def score(self, X, y, **params):
"""Call score on the `estimator`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array representing the data.
y : array-like of shape (n_samples,)
Array representing the labels.
**params : dict of str -> object
Parameters to pass to the underlying estimator's ``score`` method.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
score : float
Result of calling score on the `estimator`.
"""
check_is_fitted(self)
_raise_for_params(params, self, "score")
if _routing_enabled():
# metadata routing is enabled.
routed_params = process_routing(self, "score", **params)
else:
routed_params = Bunch(estimator=Bunch(score={}))
X = validate_data(
self,
X,
accept_sparse=True,
ensure_all_finite=False,
reset=False,
)
return self.estimator_.score(X, y, **routed_params.estimator.score)
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self)
router.add(
estimator=self.estimator,
method_mapping=(
MethodMapping()
.add(callee="fit", caller="fit")
.add(callee="score", caller="fit")
.add(callee="predict", caller="predict")
.add(callee="predict_proba", caller="predict_proba")
.add(callee="decision_function", caller="decision_function")
.add(callee="predict_log_proba", caller="predict_log_proba")
.add(callee="score", caller="score")
),
)
return router
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/semi_supervised/__init__.py | sklearn/semi_supervised/__init__.py | """Semi-supervised learning algorithms.
These algorithms utilize small amounts of labeled data and large amounts of unlabeled
data for classification tasks.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.semi_supervised._label_propagation import LabelPropagation, LabelSpreading
from sklearn.semi_supervised._self_training import SelfTrainingClassifier
__all__ = ["LabelPropagation", "LabelSpreading", "SelfTrainingClassifier"]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/semi_supervised/tests/test_self_training.py | sklearn/semi_supervised/tests/test_self_training.py | from math import ceil
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.base import clone
from sklearn.datasets import load_iris, make_blobs
from sklearn.ensemble import StackingClassifier
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.svm import SVC
from sklearn.tests.test_pipeline import SimpleEstimator
from sklearn.tree import DecisionTreeClassifier
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# load the iris dataset and randomly permute it
iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, random_state=0
)
n_labeled_samples = 50
y_train_missing_labels = y_train.copy()
y_train_missing_labels[n_labeled_samples:] = -1
mapping = {0: "A", 1: "B", 2: "C", -1: "-1"}
y_train_missing_strings = np.vectorize(mapping.get)(y_train_missing_labels).astype(
object
)
y_train_missing_strings[y_train_missing_labels == -1] = -1
def test_warns_k_best():
st = SelfTrainingClassifier(KNeighborsClassifier(), criterion="k_best", k_best=1000)
with pytest.warns(UserWarning, match="k_best is larger than"):
st.fit(X_train, y_train_missing_labels)
assert st.termination_condition_ == "all_labeled"
@pytest.mark.parametrize(
"estimator",
[KNeighborsClassifier(), LogisticRegression()],
)
@pytest.mark.parametrize("selection_crit", ["threshold", "k_best"])
def test_classification(estimator, selection_crit):
estimator = clone(estimator) # Avoid side effects from previous tests.
# Check classification for various parameter settings.
# Also assert that predictions for strings and numerical labels are equal.
# Also test for multioutput classification
threshold = 0.75
max_iter = 10
st = SelfTrainingClassifier(
estimator, max_iter=max_iter, threshold=threshold, criterion=selection_crit
)
st.fit(X_train, y_train_missing_labels)
pred = st.predict(X_test)
proba = st.predict_proba(X_test)
st_string = SelfTrainingClassifier(
estimator, max_iter=max_iter, criterion=selection_crit, threshold=threshold
)
st_string.fit(X_train, y_train_missing_strings)
pred_string = st_string.predict(X_test)
proba_string = st_string.predict_proba(X_test)
assert_array_equal(np.vectorize(mapping.get)(pred), pred_string)
assert_array_equal(proba, proba_string)
assert st.termination_condition_ == st_string.termination_condition_
# Check consistency between labeled_iter, n_iter and max_iter
labeled = y_train_missing_labels != -1
# assert that labeled samples have labeled_iter = 0
assert_array_equal(st.labeled_iter_ == 0, labeled)
# assert that labeled samples do not change label during training
assert_array_equal(y_train_missing_labels[labeled], st.transduction_[labeled])
# assert that the max of the iterations is less than the total amount of
# iterations
assert np.max(st.labeled_iter_) <= st.n_iter_ <= max_iter
assert np.max(st_string.labeled_iter_) <= st_string.n_iter_ <= max_iter
# check shapes
assert st.labeled_iter_.shape == st.transduction_.shape
assert st_string.labeled_iter_.shape == st_string.transduction_.shape
def test_k_best():
st = SelfTrainingClassifier(
KNeighborsClassifier(n_neighbors=1),
criterion="k_best",
k_best=10,
max_iter=None,
)
y_train_only_one_label = np.copy(y_train)
y_train_only_one_label[1:] = -1
n_samples = y_train.shape[0]
n_expected_iter = ceil((n_samples - 1) / 10)
st.fit(X_train, y_train_only_one_label)
assert st.n_iter_ == n_expected_iter
# Check labeled_iter_
assert np.sum(st.labeled_iter_ == 0) == 1
for i in range(1, n_expected_iter):
assert np.sum(st.labeled_iter_ == i) == 10
assert np.sum(st.labeled_iter_ == n_expected_iter) == (n_samples - 1) % 10
assert st.termination_condition_ == "all_labeled"
def test_sanity_classification():
estimator = SVC(gamma="scale", probability=True)
estimator.fit(X_train[n_labeled_samples:], y_train[n_labeled_samples:])
st = SelfTrainingClassifier(estimator)
st.fit(X_train, y_train_missing_labels)
pred1, pred2 = estimator.predict(X_test), st.predict(X_test)
assert not np.array_equal(pred1, pred2)
score_supervised = accuracy_score(estimator.predict(X_test), y_test)
score_self_training = accuracy_score(st.predict(X_test), y_test)
assert score_self_training > score_supervised
def test_none_iter():
# Check that the all samples were labeled after a 'reasonable' number of
# iterations.
st = SelfTrainingClassifier(KNeighborsClassifier(), threshold=0.55, max_iter=None)
st.fit(X_train, y_train_missing_labels)
assert st.n_iter_ < 10
assert st.termination_condition_ == "all_labeled"
@pytest.mark.parametrize(
"estimator",
[KNeighborsClassifier(), SVC(gamma="scale", probability=True, random_state=0)],
)
@pytest.mark.parametrize("y", [y_train_missing_labels, y_train_missing_strings])
def test_zero_iterations(estimator, y):
estimator = clone(estimator) # Avoid side effects from previous tests.
# Check classification for zero iterations.
# Fitting a SelfTrainingClassifier with zero iterations should give the
# same results as fitting a supervised classifier.
# This also asserts that string arrays work as expected.
clf1 = SelfTrainingClassifier(estimator, max_iter=0)
clf1.fit(X_train, y)
clf2 = estimator.fit(X_train[:n_labeled_samples], y[:n_labeled_samples])
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
assert clf1.termination_condition_ == "max_iter"
def test_prefitted_throws_error():
# Test that passing a pre-fitted classifier and calling predict throws an
# error
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
st = SelfTrainingClassifier(knn)
with pytest.raises(
NotFittedError,
match="This SelfTrainingClassifier instance is not fitted yet",
):
st.predict(X_train)
@pytest.mark.parametrize("max_iter", range(1, 5))
def test_labeled_iter(max_iter):
# Check that the amount of datapoints labeled in iteration 0 is equal to
# the amount of labeled datapoints we passed.
st = SelfTrainingClassifier(KNeighborsClassifier(), max_iter=max_iter)
st.fit(X_train, y_train_missing_labels)
amount_iter_0 = len(st.labeled_iter_[st.labeled_iter_ == 0])
assert amount_iter_0 == n_labeled_samples
# Check that the max of the iterations is less than the total amount of
# iterations
assert np.max(st.labeled_iter_) <= st.n_iter_ <= max_iter
def test_no_unlabeled():
# Test that training on a fully labeled dataset produces the same results
# as training the classifier by itself.
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
st = SelfTrainingClassifier(knn)
with pytest.warns(UserWarning, match="y contains no unlabeled samples"):
st.fit(X_train, y_train)
assert_array_equal(knn.predict(X_test), st.predict(X_test))
# Assert that all samples were labeled in iteration 0 (since there were no
# unlabeled samples).
assert np.all(st.labeled_iter_ == 0)
assert st.termination_condition_ == "all_labeled"
def test_early_stopping():
svc = SVC(gamma="scale", probability=True)
st = SelfTrainingClassifier(svc)
X_train_easy = [[1], [0], [1], [0.5]]
y_train_easy = [1, 0, -1, -1]
# X = [[0.5]] cannot be predicted on with a high confidence, so training
# stops early
st.fit(X_train_easy, y_train_easy)
assert st.n_iter_ == 1
assert st.termination_condition_ == "no_change"
def test_strings_dtype():
clf = SelfTrainingClassifier(KNeighborsClassifier())
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
labels_multiclass = ["one", "two", "three"]
y_strings = np.take(labels_multiclass, y)
with pytest.raises(ValueError, match="dtype"):
clf.fit(X, y_strings)
@pytest.mark.parametrize("verbose", [True, False])
def test_verbose(capsys, verbose):
clf = SelfTrainingClassifier(KNeighborsClassifier(), verbose=verbose)
clf.fit(X_train, y_train_missing_labels)
captured = capsys.readouterr()
if verbose:
assert "iteration" in captured.out
else:
assert "iteration" not in captured.out
def test_verbose_k_best(capsys):
st = SelfTrainingClassifier(
KNeighborsClassifier(n_neighbors=1),
criterion="k_best",
k_best=10,
verbose=True,
max_iter=None,
)
y_train_only_one_label = np.copy(y_train)
y_train_only_one_label[1:] = -1
n_samples = y_train.shape[0]
n_expected_iter = ceil((n_samples - 1) / 10)
st.fit(X_train, y_train_only_one_label)
captured = capsys.readouterr()
msg = "End of iteration {}, added {} new labels."
for i in range(1, n_expected_iter):
assert msg.format(i, 10) in captured.out
assert msg.format(n_expected_iter, (n_samples - 1) % 10) in captured.out
def test_k_best_selects_best():
# Tests that the labels added by st really are the 10 best labels.
est = LogisticRegression(random_state=0)
st = SelfTrainingClassifier(est, criterion="k_best", max_iter=1, k_best=10)
has_label = y_train_missing_labels != -1
st.fit(X_train, y_train_missing_labels)
got_label = ~has_label & (st.transduction_ != -1)
est.fit(X_train[has_label], y_train_missing_labels[has_label])
pred = est.predict_proba(X_train[~has_label])
max_proba = np.max(pred, axis=1)
most_confident_est = X_train[~has_label][np.argsort(max_proba)[-10:]]
added_by_st = X_train[np.where(got_label)].tolist()
for row in most_confident_est.tolist():
assert row in added_by_st
def test_estimator_meta_estimator():
# Check that a meta-estimator relying on an estimator implementing
# `predict_proba` will work even if it does not expose this method before being
# fitted.
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/19119
estimator = StackingClassifier(
estimators=[
("svc_1", SVC(probability=True)),
("svc_2", SVC(probability=True)),
],
final_estimator=SVC(probability=True),
cv=2,
)
assert hasattr(estimator, "predict_proba")
clf = SelfTrainingClassifier(estimator=estimator)
clf.fit(X_train, y_train_missing_labels)
clf.predict_proba(X_test)
estimator = StackingClassifier(
estimators=[
("svc_1", SVC(probability=False)),
("svc_2", SVC(probability=False)),
],
final_estimator=SVC(probability=False),
cv=2,
)
assert not hasattr(estimator, "predict_proba")
clf = SelfTrainingClassifier(estimator=estimator)
with pytest.raises(AttributeError):
clf.fit(X_train, y_train_missing_labels)
def test_self_training_estimator_attribute_error():
"""Check that we raise the proper AttributeErrors when the `estimator`
does not implement the `predict_proba` method, which is called from within
`fit`, or `decision_function`, which is decorated with `available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
"""
# `SVC` with `probability=False` does not implement 'predict_proba' that
# is required internally in `fit` of `SelfTrainingClassifier`. We expect
# an AttributeError to be raised.
estimator = SVC(probability=False, gamma="scale")
self_training = SelfTrainingClassifier(estimator)
with pytest.raises(AttributeError, match="has no attribute 'predict_proba'"):
self_training.fit(X_train, y_train_missing_labels)
# `DecisionTreeClassifier` does not implement 'decision_function' and
# should raise an AttributeError
self_training = SelfTrainingClassifier(estimator=DecisionTreeClassifier())
outer_msg = "This 'SelfTrainingClassifier' has no attribute 'decision_function'"
inner_msg = "'DecisionTreeClassifier' object has no attribute 'decision_function'"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
self_training.fit(X_train, y_train_missing_labels).decision_function(X_train)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
# Metadata routing tests
# =================================================================
@pytest.mark.filterwarnings("ignore:y contains no unlabeled samples:UserWarning")
@pytest.mark.parametrize(
"method", ["decision_function", "predict_log_proba", "predict_proba", "predict"]
)
def test_routing_passed_metadata_not_supported(method):
"""Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`."""
est = SelfTrainingClassifier(estimator=SimpleEstimator())
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
est.fit([[1], [1]], [1, 1], sample_weight=[1], prop="a")
est = SelfTrainingClassifier(estimator=SimpleEstimator())
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
# make sure that the estimator thinks it is already fitted
est.fitted_params_ = True
getattr(est, method)([[1]], sample_weight=[1], prop="a")
# End of routing tests
# ====================
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/semi_supervised/tests/test_label_propagation.py | sklearn/semi_supervised/tests/test_label_propagation.py | """test the label propagation module"""
import warnings
import numpy as np
import pytest
from scipy.sparse import issparse
from sklearn.datasets import make_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors
from sklearn.semi_supervised import _label_propagation as label_propagation
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
assert_array_equal,
)
SPARSE_TYPES = ("sparse_csr", "sparse_csc", "sparse_csr_array", "sparse_csc_array")
CONSTRUCTOR_TYPES = ("array",) + SPARSE_TYPES
ESTIMATORS = [
(label_propagation.LabelPropagation, {"kernel": "rbf"}),
(label_propagation.LabelPropagation, {"kernel": "knn", "n_neighbors": 2}),
(
label_propagation.LabelPropagation,
{"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)},
),
(label_propagation.LabelSpreading, {"kernel": "rbf"}),
(label_propagation.LabelSpreading, {"kernel": "knn", "n_neighbors": 2}),
(
label_propagation.LabelSpreading,
{"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)},
),
]
LP_ESTIMATORS = [
(klass, params)
for (klass, params) in ESTIMATORS
if klass == label_propagation.LabelPropagation
]
@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS)
def test_fit_transduction(global_dtype, Estimator, parameters):
samples = np.asarray([[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]], dtype=global_dtype)
labels = [0, 1, -1]
clf = Estimator(**parameters).fit(samples, labels)
assert clf.transduction_[2] == 1
@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS)
def test_distribution(global_dtype, Estimator, parameters):
if parameters["kernel"] == "knn":
pytest.skip(
"Unstable test for this configuration: changes in k-NN ordering break it."
)
samples = np.asarray([[1.0, 0.0], [0.0, 1.0], [1.0, 1.0]], dtype=global_dtype)
labels = [0, 1, -1]
clf = Estimator(**parameters).fit(samples, labels)
assert_allclose(clf.label_distributions_[2], [0.5, 0.5], atol=1e-2)
@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS)
def test_predict(global_dtype, Estimator, parameters):
samples = np.asarray([[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]], dtype=global_dtype)
labels = [0, 1, -1]
clf = Estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS)
def test_predict_proba(global_dtype, Estimator, parameters):
samples = np.asarray([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]], dtype=global_dtype)
labels = [0, 1, -1]
clf = Estimator(**parameters).fit(samples, labels)
assert_allclose(clf.predict_proba([[1.0, 1.0]]), np.array([[0.5, 0.5]]))
@pytest.mark.parametrize("alpha", [0.1, 0.3, 0.5, 0.7, 0.9])
@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS)
def test_label_spreading_closed_form(global_dtype, Estimator, parameters, alpha):
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
X = X.astype(global_dtype, copy=False)
y[::3] = -1
gamma = 0.1
clf = label_propagation.LabelSpreading(gamma=gamma).fit(X, y)
# adopting notation from Zhou et al (2004):
S = clf._build_graph()
Y = np.zeros((len(y), n_classes + 1), dtype=X.dtype)
Y[np.arange(len(y)), y] = 1
Y = Y[:, :-1]
expected = np.dot(np.linalg.inv(np.eye(len(S), dtype=S.dtype) - alpha * S), Y)
expected /= expected.sum(axis=1)[:, np.newaxis]
clf = label_propagation.LabelSpreading(
max_iter=100, alpha=alpha, tol=1e-10, gamma=gamma
)
clf.fit(X, y)
assert_allclose(expected, clf.label_distributions_)
def test_label_propagation_closed_form(global_dtype):
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
X = X.astype(global_dtype, copy=False)
y[::3] = -1
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
unlabelled_idx = Y[:, (-1,)].nonzero()[0]
labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0]
clf = label_propagation.LabelPropagation(max_iter=100, tol=1e-10, gamma=0.1)
clf.fit(X, y)
# adopting notation from Zhu et al 2002
T_bar = clf._build_graph()
Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing="ij"))]
Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing="ij"))]
Y = Y[:, :-1]
Y_l = Y[labelled_idx, :]
Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l)
expected = Y.copy()
expected[unlabelled_idx, :] = Y_u
expected /= expected.sum(axis=1)[:, np.newaxis]
assert_allclose(expected, clf.label_distributions_, atol=1e-4)
@pytest.mark.parametrize("accepted_sparse_type", SPARSE_TYPES)
@pytest.mark.parametrize("index_dtype", [np.int32, np.int64])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("Estimator, parameters", ESTIMATORS)
def test_sparse_input_types(
accepted_sparse_type, index_dtype, dtype, Estimator, parameters
):
# This is non-regression test for #17085
X = _convert_container([[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]], accepted_sparse_type)
X.data = X.data.astype(dtype, copy=False)
X.indices = X.indices.astype(index_dtype, copy=False)
X.indptr = X.indptr.astype(index_dtype, copy=False)
labels = [0, 1, -1]
clf = Estimator(**parameters).fit(X, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
@pytest.mark.parametrize("constructor", CONSTRUCTOR_TYPES)
@pytest.mark.parametrize("Estimator, parameters", LP_ESTIMATORS)
def test_label_propagation_build_graph_normalized(constructor, Estimator, parameters):
# required but unused X and labels values
X = np.array([[1.0, 0.0], [1.0, 1.0], [1.0, 3.0]])
labels = [0, 1, -1]
# test normalization of an affinity_matrix
aff_matrix = np.array([[1.0, 1.0, 0.0], [2.0, 1.0, 1.0], [0.0, 1.0, 3.0]])
expected = np.array([[0.5, 0.5, 0.0], [0.5, 0.25, 0.25], [0.0, 0.25, 0.75]])
def kernel_affinity_matrix(x, y=None):
return _convert_container(aff_matrix, constructor)
clf = Estimator(kernel=kernel_affinity_matrix).fit(X, labels)
graph = clf._build_graph()
assert_allclose(graph.sum(axis=1), 1) # normalized rows
if issparse(graph):
graph = graph.toarray()
assert_allclose(graph, expected)
@pytest.mark.parametrize("constructor_type", CONSTRUCTOR_TYPES)
def test_convergence_speed(constructor_type):
# This is a non-regression test for #5774
X = _convert_container([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]], constructor_type)
y = np.array([0, 1, -1])
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=5000)
mdl.fit(X, y)
# this should converge quickly:
assert mdl.n_iter_ < 10
assert_array_equal(mdl.predict(X), [0, 1, 1])
def test_convergence_warning():
# This is a non-regression test for #5774
X = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]])
y = np.array([0, 1, -1])
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=1)
warn_msg = "max_iter=1 was reached without convergence."
with pytest.warns(ConvergenceWarning, match=warn_msg):
mdl.fit(X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=1)
with pytest.warns(ConvergenceWarning, match=warn_msg):
mdl.fit(X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = label_propagation.LabelSpreading(kernel="rbf", max_iter=500)
with warnings.catch_warnings():
warnings.simplefilter("error", ConvergenceWarning)
mdl.fit(X, y)
mdl = label_propagation.LabelPropagation(kernel="rbf", max_iter=500)
with warnings.catch_warnings():
warnings.simplefilter("error", ConvergenceWarning)
mdl.fit(X, y)
@pytest.mark.parametrize(
"LabelPropagationCls",
[label_propagation.LabelSpreading, label_propagation.LabelPropagation],
)
def test_label_propagation_non_zero_normalizer(LabelPropagationCls):
# check that we don't divide by zero in case of null normalizer
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/pull/15946
# https://github.com/scikit-learn/scikit-learn/issues/9292
X = np.array([[100.0, 100.0], [100.0, 100.0], [0.0, 0.0], [0.0, 0.0]])
y = np.array([0, 1, -1, -1])
mdl = LabelPropagationCls(kernel="knn", max_iter=100, n_neighbors=1)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
mdl.fit(X, y)
def test_predict_sparse_callable_kernel(global_dtype):
# This is a non-regression test for #15866
# Custom sparse kernel (top-K RBF)
def topk_rbf(X, Y=None, n_neighbors=10, gamma=1e-5):
nn = NearestNeighbors(n_neighbors=10, metric="euclidean", n_jobs=2)
nn.fit(X)
W = -1 * nn.kneighbors_graph(Y, mode="distance").power(2) * gamma
np.exp(W.data, out=W.data)
assert issparse(W)
return W.T
n_classes = 4
n_samples = 500
n_test = 10
X, y = make_classification(
n_classes=n_classes,
n_samples=n_samples,
n_features=20,
n_informative=20,
n_redundant=0,
n_repeated=0,
random_state=0,
)
X = X.astype(global_dtype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=0
)
model = label_propagation.LabelSpreading(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test) >= 0.9
model = label_propagation.LabelPropagation(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test) >= 0.9
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/semi_supervised/tests/__init__.py | sklearn/semi_supervised/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/graph.py | sklearn/utils/graph.py | """Graph utilities and algorithms."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy import sparse
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.utils._param_validation import Integral, Interval, validate_params
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
@validate_params(
{
"graph": ["array-like", "sparse matrix"],
"source": [Interval(Integral, 0, None, closed="left")],
"cutoff": [Interval(Integral, 0, None, closed="left"), None],
},
prefer_skip_nested_validation=True,
)
def single_source_shortest_path_length(graph, source, *, cutoff=None):
"""Return the length of the shortest path from source to all reachable nodes.
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_nodes, n_nodes)
Adjacency matrix of the graph. Sparse matrix of format LIL is
preferred.
source : int
Start node for path.
cutoff : int, default=None
Depth to stop the search - only paths of length <= cutoff are returned.
Returns
-------
paths : dict
Reachable end nodes mapped to length of path from source,
i.e. `{end: path_length}`.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 0],
... [ 0, 0, 0, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2}
>>> graph = np.ones((6, 6))
>>> sorted(single_source_shortest_path_length(graph, 2).items())
[(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)]
"""
if sparse.issparse(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
def _fix_connected_components(
X,
graph,
n_connected_components,
component_labels,
mode="distance",
metric="euclidean",
**kwargs,
):
"""Add connections to sparse graph to connect unconnected components.
For each pair of unconnected components, compute all pairwise distances
from one component to the other, and add a connection on the closest pair
of samples. This is a hacky way to get a graph with a single connected
component, which is necessary for example to compute a shortest path
between all pairs of samples in the graph.
Parameters
----------
X : array of shape (n_samples, n_features) or (n_samples, n_samples)
Features to compute the pairwise distances. If `metric =
"precomputed"`, X is the matrix of pairwise distances.
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples.
n_connected_components : int
Number of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
component_labels : array of shape (n_samples)
Labels of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
mode : {'connectivity', 'distance'}, default='distance'
Type of graph matrix: 'connectivity' corresponds to the connectivity
matrix with ones and zeros, and 'distance' corresponds to the distances
between neighbors according to the given metric.
metric : str
Metric used in `sklearn.metrics.pairwise.pairwise_distances`.
kwargs : kwargs
Keyword arguments passed to
`sklearn.metrics.pairwise.pairwise_distances`.
Returns
-------
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples, with a single connected component.
"""
if metric == "precomputed" and sparse.issparse(X):
raise RuntimeError(
"_fix_connected_components with metric='precomputed' requires the "
"full distance matrix in X, and does not work with a sparse "
"neighbors graph."
)
for i in range(n_connected_components):
idx_i = np.flatnonzero(component_labels == i)
Xi = X[idx_i]
for j in range(i):
idx_j = np.flatnonzero(component_labels == j)
Xj = X[idx_j]
if metric == "precomputed":
D = X[np.ix_(idx_i, idx_j)]
else:
D = pairwise_distances(Xi, Xj, metric=metric, **kwargs)
ii, jj = np.unravel_index(D.argmin(axis=None), D.shape)
if mode == "connectivity":
graph[idx_i[ii], idx_j[jj]] = 1
graph[idx_j[jj], idx_i[ii]] = 1
elif mode == "distance":
graph[idx_i[ii], idx_j[jj]] = D[ii, jj]
graph[idx_j[jj], idx_i[ii]] = D[ii, jj]
else:
raise ValueError(
"Unknown mode=%r, should be one of ['connectivity', 'distance']."
% mode
)
return graph
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_missing.py | sklearn/utils/_missing.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import math
import numbers
from contextlib import suppress
def is_scalar_nan(x):
"""Test if x is NaN.
This function is meant to overcome the issue that np.isnan does not allow
non-numerical types as input, and that np.nan is not float('nan').
Parameters
----------
x : any type
Any scalar value.
Returns
-------
bool
Returns true if x is NaN, and false otherwise.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils._missing import is_scalar_nan
>>> is_scalar_nan(np.nan)
True
>>> is_scalar_nan(float("nan"))
True
>>> is_scalar_nan(None)
False
>>> is_scalar_nan("")
False
>>> is_scalar_nan([np.nan])
False
"""
return (
not isinstance(x, numbers.Integral)
and isinstance(x, numbers.Real)
and math.isnan(x)
)
def is_pandas_na(x):
"""Test if x is pandas.NA.
We intentionally do not use this function to return `True` for `pd.NA` in
`is_scalar_nan`, because estimators that support `pd.NA` are the exception
rather than the rule at the moment. When `pd.NA` is more universally
supported, we may reconsider this decision.
Parameters
----------
x : any type
The input value to test.
Returns
-------
boolean
True if `x` is `pandas.NA`, False otherwise.
"""
with suppress(ImportError):
from pandas import NA
return x is NA
return False
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_bunch.py | sklearn/utils/_bunch.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
class Bunch(dict):
"""Container object exposing keys as attributes.
Bunch objects are sometimes used as an output for functions and methods.
They extend dictionaries by enabling values to be accessed by key,
`bunch["value_key"]`, or by an attribute, `bunch.value_key`.
Examples
--------
>>> from sklearn.utils import Bunch
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super().__init__(kwargs)
# Map from deprecated key to warning message
self.__dict__["_deprecated_key_to_warnings"] = {}
def __getitem__(self, key):
if key in self.__dict__.get("_deprecated_key_to_warnings", {}):
warnings.warn(
self._deprecated_key_to_warnings[key],
FutureWarning,
)
return super().__getitem__(key)
def _set_deprecated(self, value, *, new_key, deprecated_key, warning_message):
"""Set key in dictionary to be deprecated with its warning message."""
self.__dict__["_deprecated_key_to_warnings"][deprecated_key] = warning_message
self[new_key] = self[deprecated_key] = value
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_response.py | sklearn/utils/_response.py | """Utilities to get the response values of a classifier or a regressor.
It allows to make uniform checks and validation.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from sklearn.base import is_classifier
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _check_response_method, check_is_fitted
def _process_predict_proba(*, y_pred, target_type, classes, pos_label):
"""Get the response values when the response method is `predict_proba`.
This function process the `y_pred` array in the binary and multi-label cases.
In the binary case, it selects the column corresponding to the positive
class. In the multi-label case, it stacks the predictions if they are not
in the "compressed" format `(n_samples, n_outputs)`.
Parameters
----------
y_pred : ndarray
Output of `estimator.predict_proba`. The shape depends on the target type:
- for binary classification, it is a 2d array of shape `(n_samples, 2)`;
- for multiclass classification, it is a 2d array of shape
`(n_samples, n_classes)`;
- for multilabel classification, it is either a list of 2d arrays of shape
`(n_samples, 2)` (e.g. `RandomForestClassifier` or `KNeighborsClassifier`) or
an array of shape `(n_samples, n_outputs)` (e.g. `MLPClassifier` or
`RidgeClassifier`).
target_type : {"binary", "multiclass", "multilabel-indicator"}
Type of the target.
classes : ndarray of shape (n_classes,) or list of such arrays
Class labels as reported by `estimator.classes_`.
pos_label : int, float, bool or str
Only used with binary and multiclass targets.
Returns
-------
y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
(n_samples, n_output)
Compressed predictions format as requested by the metrics.
"""
if target_type == "binary" and y_pred.shape[1] < 2:
# We don't handle classifiers trained on a single class.
raise ValueError(
f"Got predict_proba of shape {y_pred.shape}, but need "
"classifier with two classes."
)
if target_type == "binary":
col_idx = np.flatnonzero(classes == pos_label)[0]
return y_pred[:, col_idx]
elif target_type == "multilabel-indicator":
# Use a compress format of shape `(n_samples, n_output)`.
# Only `MLPClassifier` and `RidgeClassifier` return an array of shape
# `(n_samples, n_outputs)`.
if isinstance(y_pred, list):
# list of arrays of shape `(n_samples, 2)`
return np.vstack([p[:, -1] for p in y_pred]).T
else:
# array of shape `(n_samples, n_outputs)`
return y_pred
return y_pred
def _process_decision_function(*, y_pred, target_type, classes, pos_label):
"""Get the response values when the response method is `decision_function`.
This function process the `y_pred` array in the binary and multi-label cases.
In the binary case, it inverts the sign of the score if the positive label
is not `classes[1]`. In the multi-label case, it stacks the predictions if
they are not in the "compressed" format `(n_samples, n_outputs)`.
Parameters
----------
y_pred : ndarray
Output of `estimator.decision_function`. The shape depends on the target type:
- for binary classification, it is a 1d array of shape `(n_samples,)` where the
sign is assuming that `classes[1]` is the positive class;
- for multiclass classification, it is a 2d array of shape
`(n_samples, n_classes)`;
- for multilabel classification, it is a 2d array of shape `(n_samples,
n_outputs)`.
target_type : {"binary", "multiclass", "multilabel-indicator"}
Type of the target.
classes : ndarray of shape (n_classes,) or list of such arrays
Class labels as reported by `estimator.classes_`.
pos_label : int, float, bool or str
Only used with binary and multiclass targets.
Returns
-------
y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
(n_samples, n_output)
Compressed predictions format as requested by the metrics.
"""
if target_type == "binary" and pos_label == classes[0]:
return -1 * y_pred
return y_pred
def _get_response_values(
estimator,
X,
response_method,
pos_label=None,
return_response_method_used=False,
):
"""Compute the response values of a classifier, an outlier detector, or a regressor.
The response values are predictions such that it follows the following shape:
- for binary classification, it is a 1d array of shape `(n_samples,)`;
- for multiclass classification
- with response_method="predict", it is a 1d array of shape `(n_samples,)`;
- otherwise, it is a 2d array of shape `(n_samples, n_classes)`;
- for multilabel classification, it is a 2d array of shape `(n_samples, n_outputs)`;
- for outlier detection, it is a 1d array of shape `(n_samples,)`;
- for regression, it is a 1d array of shape `(n_samples,)`.
If `estimator` is a binary classifier, also return the label for the
effective positive class.
This utility is used primarily in the displays and the scikit-learn scorers.
.. versionadded:: 1.3
Parameters
----------
estimator : estimator instance
Fitted classifier, outlier detector, or regressor or a
fitted :class:`~sklearn.pipeline.Pipeline` in which the last estimator is a
classifier, an outlier detector, or a regressor.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
response_method : {"predict_proba", "predict_log_proba", "decision_function", \
"predict"} or list of such str
Specifies the response method to use get prediction from an estimator
(i.e. :term:`predict_proba`, :term:`predict_log_proba`,
:term:`decision_function` or :term:`predict`). Possible choices are:
- if `str`, it corresponds to the name to the method to return;
- if a list of `str`, it provides the method names in order of
preference. The method returned corresponds to the first method in
the list and which is implemented by `estimator`.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing
the metrics. If `None` and target is 'binary', `estimators.classes_[1]` is
considered as the positive class.
return_response_method_used : bool, default=False
Whether to return the response method used to compute the response
values.
.. versionadded:: 1.4
Returns
-------
y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
(n_samples, n_outputs)
Target scores calculated from the provided `response_method`
and `pos_label`.
pos_label : int, float, bool, str or None
The class considered as the positive class when computing
the metrics. Returns `None` if `estimator` is a regressor or an outlier
detector.
response_method_used : str
The response method used to compute the response values. Only returned
if `return_response_method_used` is `True`.
.. versionadded:: 1.4
Raises
------
ValueError
If `pos_label` is not a valid label.
If the shape of `y_pred` is not consistent for binary classifier.
If the response method can be applied to a classifier only and
`estimator` is a regressor.
"""
from sklearn.base import is_classifier, is_outlier_detector
if is_classifier(estimator):
prediction_method = _check_response_method(estimator, response_method)
classes = estimator.classes_
target_type = type_of_target(classes)
if target_type in ("binary", "multiclass"):
if pos_label is not None and pos_label not in classes.tolist():
raise ValueError(
f"pos_label={pos_label} is not a valid label: It should be "
f"one of {classes}"
)
elif pos_label is None and target_type == "binary":
pos_label = classes[-1]
y_pred = prediction_method(X)
if prediction_method.__name__ in ("predict_proba", "predict_log_proba"):
y_pred = _process_predict_proba(
y_pred=y_pred,
target_type=target_type,
classes=classes,
pos_label=pos_label,
)
elif prediction_method.__name__ == "decision_function":
y_pred = _process_decision_function(
y_pred=y_pred,
target_type=target_type,
classes=classes,
pos_label=pos_label,
)
elif is_outlier_detector(estimator):
prediction_method = _check_response_method(estimator, response_method)
y_pred, pos_label = prediction_method(X), None
else: # estimator is a regressor
if response_method != "predict":
raise ValueError(
f"{estimator.__class__.__name__} should either be a classifier to be "
f"used with response_method={response_method} or the response_method "
"should be 'predict'. Got a regressor with response_method="
f"{response_method} instead."
)
prediction_method = estimator.predict
y_pred, pos_label = prediction_method(X), None
if return_response_method_used:
return y_pred, pos_label, prediction_method.__name__
return y_pred, pos_label
def _get_response_values_binary(
estimator, X, response_method, pos_label=None, return_response_method_used=False
):
"""Compute the response values of a binary classifier.
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a binary classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
response_method : {'auto', 'predict_proba', 'decision_function'}
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing
the metrics. By default, `estimators.classes_[1]` is
considered as the positive class.
return_response_method_used : bool, default=False
Whether to return the response method used to compute the response
values.
.. versionadded:: 1.5
Returns
-------
y_pred : ndarray of shape (n_samples,)
Target scores calculated from the provided response_method
and pos_label.
pos_label : int, float, bool or str
The class considered as the positive class when computing
the metrics.
response_method_used : str
The response method used to compute the response values. Only returned
if `return_response_method_used` is `True`.
.. versionadded:: 1.5
"""
classification_error = "Expected 'estimator' to be a binary classifier."
check_is_fitted(estimator)
if not is_classifier(estimator):
raise ValueError(
classification_error + f" Got {estimator.__class__.__name__} instead."
)
elif len(estimator.classes_) != 2:
raise ValueError(
classification_error + f" Got {len(estimator.classes_)} classes instead."
)
if response_method == "auto":
response_method = ["predict_proba", "decision_function"]
return _get_response_values(
estimator,
X,
response_method,
pos_label=pos_label,
return_response_method_used=return_response_method_used,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/metadata_routing.py | sklearn/utils/metadata_routing.py | """Utilities to route metadata within scikit-learn estimators."""
# This module is not a separate sub-folder since that would result in a circular
# import issue.
#
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.utils._metadata_requests import ( # noqa: F401
UNCHANGED,
UNUSED,
WARN,
MetadataRequest,
MetadataRouter,
MethodMapping,
_MetadataRequester,
_raise_for_params,
_raise_for_unsupported_routing,
_routing_enabled,
_RoutingNotSupportedMixin,
get_routing_for_object,
process_routing,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_metadata_requests.py | sklearn/utils/_metadata_requests.py | """
Metadata Routing Utility
In order to better understand the components implemented in this file, one
needs to understand their relationship to one another.
The only relevant public API for end users are the ``set_{method}_request`` methods,
e.g. ``estimator.set_fit_request(sample_weight=True)``. However, third-party
developers and users who implement custom meta-estimators, need to deal with
the objects implemented in this file.
The routing is coordinated by building ``MetadataRequest`` objects
for objects that consume metadata, and ``MetadataRouter`` objects for objects that
can route metadata, which are then aligned during a call to `process_routing()`. This
function returns a Bunch object (dictionary-like) with all the information on the
consumers and which metadata they had requested and the actual metadata values. A
routing method (such as `fit` in a meta-estimator) can now provide the metadata to the
relevant consuming method (such as `fit` in a sub-estimator).
The ``MetadataRequest`` and ``MetadataRouter`` objects are constructed via a
``get_metadata_routing`` method, which all scikit-learn estimators provide.
This method is automatically implemented via ``BaseEstimator`` for all simple
estimators, but needs a custom implementation for meta-estimators.
MetadataRequest
~~~~~~~~~~~~~~~
In non-routing consumers, the simplest case, e.g. ``SVM``, ``get_metadata_routing``
returns a ``MetadataRequest`` object which is assigned to the consumer's
`_metadata_request` attribute. It stores which metadata is required by each method of
the consumer by including one ``MethodMetadataRequest`` per method in ``METHODS``
(e. g. ``fit``, ``score``, etc).
Users and developers almost never need to directly add a new ``MethodMetadataRequest``,
to the consumer's `_metadata_request` attribute, since these are generated
automatically. This attribute is modified while running `set_{method}_request` methods
(such as `set_fit_request()`), which adds the request via
`method_metadata_request.add_request(param=prop, alias=alias)`.
The ``alias`` in the ``add_request`` method has to be either a string (an alias),
or one of ``[True (requested), False (unrequested), None (error if passed)]``. There
are some other special values such as ``UNUSED`` and ``WARN`` which are used
for purposes such as warning of removing a metadata in a child class, but not
used by the end users.
MetadataRouter
~~~~~~~~~~~~~~
In routers (such as meta-estimators or multi metric scorers), ``get_metadata_routing``
returns a ``MetadataRouter`` object. It provides information about which method, from
the router object, calls which method in a consumer's object, and also, which metadata
had been requested by the consumer's methods, thus specifying how metadata is to be
passed. If a sub-estimator is a router as well, their routing information is also stored
in the meta-estimators router.
Conceptually, this information looks like:
```
{
"sub_estimator1": (
mapping=[(caller="fit", callee="transform"), ...],
router=MetadataRequest(...), # or another MetadataRouter
),
...
}
```
The `MetadataRouter` objects are never stored and are always recreated anew whenever
the object's `get_metadata_routing` method is called.
An object that is both a router and a consumer, e.g. a meta-estimator which
consumes ``sample_weight`` and routes ``sample_weight`` to its sub-estimators
also returns a ``MetadataRouter`` object. Its routing information includes both
information about what metadata is required by the object itself (added via
``MetadataRouter.add_self_request``), as well as the routing information for its
sub-estimators (added via ``MetadataRouter.add``).
Implementation Details
~~~~~~~~~~~~~~~~~~~~~~
To give the above representation some structure, we use the following objects:
- ``(caller=..., callee=...)`` is a namedtuple called ``MethodPair``.
- The list of ``MethodPair`` stored in the ``mapping`` field of a `RouterMappingPair` is
a ``MethodMapping`` object.
- ``(mapping=..., router=...)`` is a namedtuple called ``RouterMappingPair``.
The ``set_{method}_request`` methods are dynamically generated for estimators
which inherit from ``BaseEstimator``. This is done by attaching instances
of the ``RequestMethod`` descriptor to classes, which is done in the
``_MetadataRequester`` class, and ``BaseEstimator`` inherits from this mixin.
This mixin also implements the ``get_metadata_routing``, which meta-estimators
need to override, but it works for simple consumers as is.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import inspect
from collections import defaultdict, namedtuple
from copy import deepcopy
from typing import TYPE_CHECKING, Optional, Union
from warnings import warn
from sklearn import get_config
from sklearn.exceptions import UnsetMetadataPassedError
from sklearn.utils._bunch import Bunch
# Only the following methods are supported in the routing mechanism. Adding new
# methods at the moment involves monkeypatching this list.
# Note that if this list is changed or monkeypatched, the corresponding method
# needs to be added under a TYPE_CHECKING condition like the one done here in
# _MetadataRequester
SIMPLE_METHODS = [
"fit",
"partial_fit",
"predict",
"predict_proba",
"predict_log_proba",
"decision_function",
"score",
"split",
"transform",
"inverse_transform",
]
# These methods are a composite of other methods and one cannot set their
# requests directly. Instead they should be set by setting the requests of the
# simple methods which make the composite ones.
COMPOSITE_METHODS = {
"fit_transform": ["fit", "transform"],
"fit_predict": ["fit", "predict"],
}
METHODS = SIMPLE_METHODS + list(COMPOSITE_METHODS.keys())
def _routing_repr(obj):
"""Get a representation suitable for messages printed in the routing machinery.
This is different than `repr(obj)`, since repr(estimator) can be verbose when
there are many constructor arguments set by the user.
This is most suitable for Scorers as it gives a nice representation of what they
are. This is done by implementing a `_routing_repr` method on the object.
Since the `owner` object could be the type name (str), we return that string if the
given `obj` is a string, otherwise we return the object's type name.
.. versionadded:: 1.8
"""
try:
return obj._routing_repr()
except AttributeError:
return obj if isinstance(obj, str) else type(obj).__name__
def _routing_enabled():
"""Return whether metadata routing is enabled.
.. versionadded:: 1.3
Returns
-------
enabled : bool
Whether metadata routing is enabled. If the config is not set, it
defaults to False.
"""
return get_config().get("enable_metadata_routing", False)
def _raise_for_params(params, owner, method, allow=None):
"""Raise an error if metadata routing is not enabled and params are passed.
.. versionadded:: 1.4
Parameters
----------
params : dict
The metadata passed to a method.
owner : object
The object to which the method belongs.
method : str
The name of the method, e.g. "fit".
allow : list of str, default=None
A list of parameters which are allowed to be passed even if metadata
routing is not enabled.
Raises
------
ValueError
If metadata routing is not enabled and params are passed.
"""
caller = f"{_routing_repr(owner)}.{method}" if method else _routing_repr(owner)
allow = allow if allow is not None else {}
if not _routing_enabled() and (params.keys() - allow):
raise ValueError(
f"Passing extra keyword arguments to {caller} is only supported if"
" enable_metadata_routing=True, which you can set using"
" `sklearn.set_config`. See the User Guide"
" <https://scikit-learn.org/stable/metadata_routing.html> for more"
f" details. Extra parameters passed are: {set(params)}"
)
def _raise_for_unsupported_routing(obj, method, **kwargs):
"""Raise when metadata routing is enabled and metadata is passed.
This is used in meta-estimators which have not implemented metadata routing
to prevent silent bugs. There is no need to use this function if the
meta-estimator is not accepting any metadata, especially in `fit`, since
if a meta-estimator accepts any metadata, they would do that in `fit` as
well.
Parameters
----------
obj : estimator
The estimator for which we're raising the error.
method : str
The method where the error is raised.
**kwargs : dict
The metadata passed to the method.
"""
kwargs = {key: value for key, value in kwargs.items() if value is not None}
if _routing_enabled() and kwargs:
cls_name = _routing_repr(obj)
raise NotImplementedError(
f"{cls_name}.{method} cannot accept given metadata ({set(kwargs.keys())})"
f" since metadata routing is not yet implemented for {cls_name}."
)
class _RoutingNotSupportedMixin:
"""A mixin to be used to remove the default `get_metadata_routing`.
This is used in meta-estimators where metadata routing is not yet
implemented.
This also makes it clear in our rendered documentation that this method
cannot be used.
"""
def get_metadata_routing(self):
"""Raise `NotImplementedError`.
This estimator does not support metadata routing yet."""
raise NotImplementedError(
f"{_routing_repr(self)} has not implemented metadata routing yet."
)
# Request values
# ==============
# Each request value needs to be one of the following values, or an alias.
# this is used in `__metadata_request__*` attributes to indicate that a
# metadata is not present even though it may be present in the
# corresponding method's signature.
UNUSED = "$UNUSED$"
# this is used whenever a default value is changed, and therefore the user
# should explicitly set the value, otherwise a warning is shown. An example
# is when a meta-estimator is only a router, but then becomes also a
# consumer in a new release.
WARN = "$WARN$"
# this is the default used in `set_{method}_request` methods to indicate no
# change requested by the user.
UNCHANGED = "$UNCHANGED$"
VALID_REQUEST_VALUES = [False, True, None, UNUSED, WARN]
def request_is_alias(item):
"""Check if an item is a valid string alias for a metadata.
Values in ``VALID_REQUEST_VALUES`` are not considered aliases in this
context. Only a string which is a valid identifier is.
Parameters
----------
item : object
The given item to be checked if it can be an alias for the metadata.
Returns
-------
result : bool
Whether the given item is a valid alias.
"""
if item in VALID_REQUEST_VALUES:
return False
# item is only an alias if it's a valid identifier
return isinstance(item, str) and item.isidentifier()
def request_is_valid(item):
"""Check if an item is a valid request value (and not an alias).
Parameters
----------
item : object
The given item to be checked.
Returns
-------
result : bool
Whether the given item is valid.
"""
return item in VALID_REQUEST_VALUES
# Metadata Request for Simple Consumers
# =====================================
# This section includes MethodMetadataRequest and MetadataRequest which are
# used in simple consumers.
class MethodMetadataRequest:
"""Container for metadata requests associated with a single method.
Instances of this class get used within a :class:`MetadataRequest` - one per each
public method (`fit`, `transform`, ...) that its owning consumer has.
.. versionadded:: 1.3
Parameters
----------
owner : object
The object owning these requests.
method : str
The name of the method to which these requests belong.
requests : dict of {str: bool, None or str}, default=None
The initial requests for this method.
"""
def __init__(self, owner, method, requests=None):
self._requests = requests or dict()
self.owner = owner
self.method = method
@property
def requests(self):
"""Dictionary of the form: ``{key: alias}``."""
return self._requests
def add_request(
self,
*,
param,
alias,
):
"""Add request info for a metadata.
Parameters
----------
param : str
The metadata for which a request is set.
alias : str, or {True, False, None}
Specifies which metadata should be routed to the method that owns this
`MethodMetadataRequest`.
- str: the name (or alias) of metadata given to a meta-estimator that
should be routed to the method that owns this `MethodMetadataRequest`.
- True: requested
- False: not requested
- None: error if passed
"""
if not request_is_alias(alias) and not request_is_valid(alias):
raise ValueError(
f"The alias you're setting for `{param}` should be either a "
"valid identifier or one of {None, True, False}, but given "
f"value is: `{alias}`"
)
if alias == param:
alias = True
if alias == UNUSED:
if param in self._requests:
del self._requests[param]
else:
raise ValueError(
f"Trying to remove parameter {param} with UNUSED which doesn't"
" exist."
)
else:
self._requests[param] = alias
return self
def _get_param_names(self, return_alias):
"""Get names of all metadata that can be consumed or routed by this method.
This method returns the names of all metadata, even the ``False``
ones.
Parameters
----------
return_alias : bool
Controls whether original or aliased names should be returned. If
``False``, aliases are ignored and original names are returned.
Returns
-------
names : set of str
A set of strings with the names of all metadata.
"""
return set(
alias if return_alias and not request_is_valid(alias) else prop
for prop, alias in self._requests.items()
if not request_is_valid(alias) or alias is not False
)
def _check_warnings(self, *, params):
"""Check whether metadata is passed which is marked as WARN.
If any metadata is passed which is marked as WARN, a warning is raised.
Parameters
----------
params : dict
The metadata passed to a method.
"""
params = {} if params is None else params
warn_params = {
prop
for prop, alias in self._requests.items()
if alias == WARN and prop in params
}
for param in warn_params:
warn(
f"Support for {param} has recently been added to {self.owner} class. "
"To maintain backward compatibility, it is ignored now. "
f"Using `set_{self.method}_request({param}={{True, False}})` "
"on this method of the class, you can set the request value "
"to False to silence this warning, or to True to consume and "
"use the metadata."
)
def _route_params(self, params, parent, caller):
"""Prepare the given metadata to be passed to the method.
The output of this method can be used directly as the input to the
corresponding method as **kwargs.
Parameters
----------
params : dict
A dictionary of provided metadata.
parent : object
Parent class object, that routes the metadata.
caller : str
Method from the parent class object, where the metadata is routed from.
Returns
-------
params : Bunch
A :class:`~sklearn.utils.Bunch` of {metadata: value} which can be
passed to the corresponding method.
"""
self._check_warnings(params=params)
unrequested = dict()
args = {arg: value for arg, value in params.items() if value is not None}
res = Bunch()
for prop, alias in self._requests.items():
if alias is False or alias == WARN:
continue
elif alias is True and prop in args:
res[prop] = args[prop]
elif alias is None and prop in args:
unrequested[prop] = args[prop]
elif alias in args:
res[prop] = args[alias]
if unrequested:
if self.method in COMPOSITE_METHODS:
callee_methods = COMPOSITE_METHODS[self.method]
else:
callee_methods = [self.method]
set_requests_on = "".join(
[
f".set_{method}_request({{metadata}}=True/False)"
for method in callee_methods
]
)
message = (
f"[{', '.join([key for key in unrequested])}] are passed but are not"
" explicitly set as requested or not requested for"
f" {_routing_repr(self.owner)}.{self.method}, which is used within"
f" {_routing_repr(parent)}.{caller}. Call `{_routing_repr(self.owner)}"
+ set_requests_on
+ "` for each metadata you want to request/ignore. See the"
" Metadata Routing User guide"
" <https://scikit-learn.org/stable/metadata_routing.html> for more"
" information."
)
raise UnsetMetadataPassedError(
message=message,
unrequested_params=unrequested,
routed_params=res,
)
return res
def _consumes(self, params):
"""Return subset of `params` consumed by the method that owns this instance.
Parameters
----------
params : iterable of str
An iterable of parameter names to test for consumption.
Returns
-------
consumed_params : set of str
A subset of parameters from `params` which are consumed by this method.
"""
params = set(params)
consumed_params = set()
for metadata_name, alias in self._requests.items():
if alias is True and metadata_name in params:
consumed_params.add(metadata_name)
elif isinstance(alias, str) and alias in params:
consumed_params.add(alias)
return consumed_params
def _serialize(self):
"""Serialize the object.
Returns
-------
obj : dict
A serialized version of the instance in the form of a dictionary.
"""
return self._requests
def __repr__(self):
return str(self._serialize())
def __str__(self):
return str(repr(self))
class MetadataRequest:
"""Contains the metadata request info of a consumer.
Instances of `MethodMetadataRequest` are used in this class for each
available method under `metadatarequest.{method}`.
Consumer-only classes such as simple estimators return a serialized
version of this class as the output of `get_metadata_routing()`.
.. versionadded:: 1.3
Parameters
----------
owner : object
The object to which these requests belong.
"""
# this is here for us to use this attribute's value instead of doing
# `isinstance` in our checks, so that we avoid issues when people vendor
# this file instead of using it directly from scikit-learn.
_type = "metadata_request"
def __init__(self, owner):
self.owner = owner
for method in SIMPLE_METHODS:
setattr(
self,
method,
MethodMetadataRequest(owner=owner, method=method),
)
def consumes(self, method, params):
"""Return params consumed as metadata in a :term:`consumer`.
This method returns the subset of given `params` that are consumed by the
given `method`. It can be used to check if parameters are used as metadata in
the specified method of the :term:`consumer` that owns this `MetadataRequest`
instance.
.. versionadded:: 1.4
Parameters
----------
method : str
The name of the method for which to determine consumed parameters.
params : iterable of str
An iterable of parameter names to test for consumption.
Returns
-------
consumed_params : set of str
A subset of parameters from `params` which are consumed by the given method.
"""
return getattr(self, method)._consumes(params=params)
def __getattr__(self, name):
# Called when the default attribute access fails with an AttributeError
# (either __getattribute__() raises an AttributeError because name is
# not an instance attribute or an attribute in the class tree for self;
# or __get__() of a name property raises AttributeError). This method
# should either return the (computed) attribute value or raise an
# AttributeError exception.
# https://docs.python.org/3/reference/datamodel.html#object.__getattr__
if name not in COMPOSITE_METHODS:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{name}'"
)
requests = {}
for method in COMPOSITE_METHODS[name]:
mmr = getattr(self, method)
existing = set(requests.keys())
upcoming = set(mmr.requests.keys())
common = existing & upcoming
conflicts = [key for key in common if requests[key] != mmr._requests[key]]
if conflicts:
raise ValueError(
f"Conflicting metadata requests for {', '.join(conflicts)} while"
f" composing the requests for {name}. Metadata with the same name"
f" for methods {', '.join(COMPOSITE_METHODS[name])} should have the"
" same request value."
)
requests.update(mmr._requests)
return MethodMetadataRequest(owner=self.owner, method=name, requests=requests)
def _get_param_names(self, method, return_alias, ignore_self_request=None):
"""Get names of all metadata that can be consumed or routed by specified \
method.
This method returns the names of all metadata, even the ``False``
ones.
Parameters
----------
method : str
The name of the method for which metadata names are requested.
return_alias : bool
Controls whether original or aliased names should be returned. If
``False``, aliases are ignored and original names are returned.
ignore_self_request : bool
Ignored. Present for API compatibility.
Returns
-------
names : set of str
A set of strings with the names of all metadata.
"""
return getattr(self, method)._get_param_names(return_alias=return_alias)
def _route_params(self, *, params, method, parent, caller):
"""Prepare the given parameters to be passed to the method.
The output of this method can be used directly as the input to the
corresponding method as extra keyword arguments to pass metadata.
Parameters
----------
params : dict
A dictionary of provided metadata.
method : str
The name of the method for which the parameters are requested and
routed.
parent : object
Parent class object, that routes the metadata.
caller : str
Method from the parent class object, where the metadata is routed from.
Returns
-------
params : Bunch
A :class:`~sklearn.utils.Bunch` of {metadata: value} which can be given to
the corresponding method.
"""
return getattr(self, method)._route_params(
params=params, parent=parent, caller=caller
)
def _check_warnings(self, *, method, params):
"""Check whether metadata is passed which is marked as WARN.
If any metadata is passed which is marked as WARN, a warning is raised.
Parameters
----------
method : str
The name of the method for which the warnings should be checked.
params : dict
The metadata passed to a method.
"""
getattr(self, method)._check_warnings(params=params)
def _serialize(self):
"""Serialize the object.
Returns
-------
obj : dict
A serialized version of the instance in the form of a dictionary.
"""
output = dict()
for method in SIMPLE_METHODS:
mmr = getattr(self, method)
if len(mmr.requests):
output[method] = mmr._serialize()
return output
def __repr__(self):
return str(self._serialize())
def __str__(self):
return str(repr(self))
# Metadata Request for Routers
# ============================
# This section includes all objects required for MetadataRouter which is used
# in routers, returned by their ``get_metadata_routing``.
# `RouterMappingPair` is used to store a (mapping, router) tuple where `mapping` is a
# `MethodMapping` object and `router` is the output of `get_metadata_routing`.
# `MetadataRouter` stores a collection of `RouterMappingPair` objects in its
# `_route_mappings` attribute.
RouterMappingPair = namedtuple("RouterMappingPair", ["mapping", "router"])
# `MethodPair` is used to store a single method routing. `MethodMapping` stores a list
# of `MethodPair` objects in its `_routes` attribute.
MethodPair = namedtuple("MethodPair", ["caller", "callee"])
class MethodMapping:
"""Stores the mapping between caller and callee methods for a :term:`router`.
This class is primarily used in a ``get_metadata_routing()`` of a router
object when defining the mapping between the router's methods and a sub-object (a
sub-estimator or a scorer).
Iterating through an instance of this class yields
``MethodPair(caller, callee)`` instances.
.. versionadded:: 1.3
"""
def __init__(self):
self._routes = []
def __iter__(self):
return iter(self._routes)
def add(self, *, caller, callee):
"""Add a method mapping.
Parameters
----------
caller : str
Parent estimator's method name in which the ``callee`` is called.
callee : str
Child object's method name. This method is called in ``caller``.
Returns
-------
self : MethodMapping
Returns self.
"""
if caller not in METHODS:
raise ValueError(
f"Given caller:{caller} is not a valid method. Valid methods are:"
f" {METHODS}"
)
if callee not in METHODS:
raise ValueError(
f"Given callee:{callee} is not a valid method. Valid methods are:"
f" {METHODS}"
)
self._routes.append(MethodPair(caller=caller, callee=callee))
return self
def _serialize(self):
"""Serialize the object.
Returns
-------
obj : list
A serialized version of the instance in the form of a list.
"""
result = list()
for route in self._routes:
result.append({"caller": route.caller, "callee": route.callee})
return result
def __repr__(self):
return str(self._serialize())
def __str__(self):
return str(repr(self))
class MetadataRouter:
"""Coordinates metadata routing for a :term:`router` object.
This class is used by :term:`meta-estimators` or functions that can route metadata,
to handle their metadata routing. Routing information is stored in a
dictionary-like structure of the form ``{"object_name":
RouterMappingPair(mapping, router)}``, where ``mapping``
is an instance of :class:`~sklearn.utils.metadata_routing.MethodMapping` and
``router`` is either a
:class:`~sklearn.utils.metadata_routing.MetadataRequest` or another
:class:`~sklearn.utils.metadata_routing.MetadataRouter` instance.
.. versionadded:: 1.3
Parameters
----------
owner : object
The object to which these requests belong.
"""
# this is here for us to use this attribute's value instead of doing
# `isinstance`` in our checks, so that we avoid issues when people vendor
# this file instead of using it directly from scikit-learn.
_type = "metadata_router"
def __init__(self, owner):
self._route_mappings = dict()
# `_self_request` is used if the router is also a consumer.
# _self_request, (added using `add_self_request()`) is treated
# differently from the other consumer objects which are stored in
# _route_mappings.
self._self_request = None
self.owner = owner
def add_self_request(self, obj):
"""Add `self` (as a :term:`consumer`) to the `MetadataRouter`.
This method is used if the :term:`router` is also a :term:`consumer`, and hence
the router itself needs to be included in the routing. The passed object
can be an estimator or a
:class:`~sklearn.utils.metadata_routing.MetadataRequest`.
A router should add itself using this method instead of `add` since it
should be treated differently than the other consumer objects to which metadata
is routed by the router.
Parameters
----------
obj : object
This is typically the router instance, i.e. `self` in a
``get_metadata_routing()`` implementation. It can also be a
``MetadataRequest`` instance.
Returns
-------
self : MetadataRouter
Returns `self`.
"""
if getattr(obj, "_type", None) == "metadata_request":
self._self_request = deepcopy(obj)
elif hasattr(obj, "_get_metadata_request"):
self._self_request = deepcopy(obj._get_metadata_request())
else:
raise ValueError(
"Given `obj` is neither a `MetadataRequest` nor does it implement the"
" required API. Inheriting from `BaseEstimator` implements the required"
" API."
)
return self
def add(self, *, method_mapping, **objs):
"""Add :term:`consumers <consumer>` to the `MetadataRouter`.
The estimators that consume metadata are passed as named objects along with a
method mapping, that defines how their methods relate to those of the
:term:`router`.
Parameters
----------
method_mapping : MethodMapping
The mapping between the child (:term:`consumer`) and the parent's
(:term:`router`'s) methods.
**objs : dict
A dictionary of objects, whose requests are extracted by calling
:func:`~sklearn.utils.metadata_routing.get_routing_for_object` on them.
Returns
-------
self : MetadataRouter
Returns `self`.
"""
method_mapping = deepcopy(method_mapping)
for name, obj in objs.items():
self._route_mappings[name] = RouterMappingPair(
mapping=method_mapping, router=get_routing_for_object(obj)
)
return self
def consumes(self, method, params):
"""Return params consumed as metadata in a :term:`router` or its sub-estimators.
This method returns the subset of `params` that are consumed by the
`method`. A `param` is considered consumed if it is used in the specified
method of the :term:`router` itself or any of its sub-estimators (or their
sub-estimators).
.. versionadded:: 1.4
Parameters
----------
method : str
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_tags.py | sklearn/utils/_tags.py | from __future__ import annotations
from dataclasses import dataclass, field
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
@dataclass(slots=True)
class InputTags:
"""Tags for the input data.
Parameters
----------
one_d_array : bool, default=False
Whether the input can be a 1D array.
two_d_array : bool, default=True
Whether the input can be a 2D array. Note that most common
tests currently run only if this flag is set to ``True``.
three_d_array : bool, default=False
Whether the input can be a 3D array.
sparse : bool, default=False
Whether the input can be a sparse matrix.
categorical : bool, default=False
Whether the input can be categorical.
string : bool, default=False
Whether the input can be an array-like of strings.
dict : bool, default=False
Whether the input can be a dictionary.
positive_only : bool, default=False
Whether the estimator requires positive X.
allow_nan : bool, default=False
Whether the estimator supports data with missing values encoded as `np.nan`.
pairwise : bool, default=False
This boolean attribute indicates whether the data (`X`),
:term:`fit` and similar methods consists of pairwise measures
over samples rather than a feature representation for each
sample. It is usually `True` where an estimator has a
`metric` or `affinity` or `kernel` parameter with value
'precomputed'. Its primary purpose is to support a
:term:`meta-estimator` or a cross validation procedure that
extracts a sub-sample of data intended for a pairwise
estimator, where the data needs to be indexed on both axes.
Specifically, this tag is used by
`sklearn.utils.metaestimators._safe_split` to slice rows and
columns.
Note that if setting this tag to ``True`` means the estimator can take only
positive values, the `positive_only` tag must reflect it and also be set to
``True``.
"""
one_d_array: bool = False
two_d_array: bool = True
three_d_array: bool = False
sparse: bool = False
categorical: bool = False
string: bool = False
dict: bool = False
positive_only: bool = False
allow_nan: bool = False
pairwise: bool = False
@dataclass(slots=True)
class TargetTags:
"""Tags for the target data.
Parameters
----------
required : bool
Whether the estimator requires y to be passed to `fit`,
`fit_predict` or `fit_transform` methods. The tag is ``True``
for estimators inheriting from `~sklearn.base.RegressorMixin`
and `~sklearn.base.ClassifierMixin`.
one_d_labels : bool, default=False
Whether the input is a 1D labels (y).
two_d_labels : bool, default=False
Whether the input is a 2D labels (y).
positive_only : bool, default=False
Whether the estimator requires a positive y (only applicable
for regression).
multi_output : bool, default=False
Whether a regressor supports multi-target outputs or a classifier supports
multi-class multi-output.
See :term:`multi-output` in the glossary.
single_output : bool, default=True
Whether the target can be single-output. This can be ``False`` if the
estimator supports only multi-output cases.
"""
required: bool
one_d_labels: bool = False
two_d_labels: bool = False
positive_only: bool = False
multi_output: bool = False
single_output: bool = True
@dataclass(slots=True)
class TransformerTags:
"""Tags for the transformer.
Parameters
----------
preserves_dtype : list[str], default=["float64"]
Applies only on transformers. It corresponds to the data types
which will be preserved such that `X_trans.dtype` is the same
as `X.dtype` after calling `transformer.transform(X)`. If this
list is empty, then the transformer is not expected to
preserve the data type. The first value in the list is
considered as the default data type, corresponding to the data
type of the output when the input data type is not going to be
preserved.
"""
preserves_dtype: list[str] = field(default_factory=lambda: ["float64"])
@dataclass(slots=True)
class ClassifierTags:
"""Tags for the classifier.
Parameters
----------
poor_score : bool, default=False
Whether the estimator fails to provide a "reasonable" test-set
score, which currently for classification is an accuracy of
0.83 on ``make_blobs(n_samples=300, random_state=0)``. The
datasets and values are based on current estimators in scikit-learn
and might be replaced by something more systematic.
multi_class : bool, default=True
Whether the classifier can handle multi-class
classification. Note that all classifiers support binary
classification. Therefore this flag indicates whether the
classifier is a binary-classifier-only or not.
See :term:`multi-class` in the glossary.
multi_label : bool, default=False
Whether the classifier supports multi-label output: a data point can
be predicted to belong to a variable number of classes.
See :term:`multi-label` in the glossary.
"""
poor_score: bool = False
multi_class: bool = True
multi_label: bool = False
@dataclass(slots=True)
class RegressorTags:
"""Tags for the regressor.
Parameters
----------
poor_score : bool, default=False
Whether the estimator fails to provide a "reasonable" test-set
score, which currently for regression is an R2 of 0.5 on
``make_regression(n_samples=200, n_features=10,
n_informative=1, bias=5.0, noise=20, random_state=42)``. The
dataset and values are based on current estimators in scikit-learn
and might be replaced by something more systematic.
"""
poor_score: bool = False
@dataclass(slots=True)
class Tags:
"""Tags for the estimator.
See :ref:`estimator_tags` for more information.
Parameters
----------
estimator_type : str or None
The type of the estimator. Can be one of:
- "classifier"
- "regressor"
- "transformer"
- "clusterer"
- "outlier_detector"
- "density_estimator"
target_tags : :class:`TargetTags`
The target(y) tags.
transformer_tags : :class:`TransformerTags` or None
The transformer tags.
classifier_tags : :class:`ClassifierTags` or None
The classifier tags.
regressor_tags : :class:`RegressorTags` or None
The regressor tags.
array_api_support : bool, default=False
Whether the estimator supports Array API compatible inputs.
no_validation : bool, default=False
Whether the estimator skips input-validation. This is only meant for
stateless and dummy transformers!
non_deterministic : bool, default=False
Whether the estimator is not deterministic given a fixed ``random_state``.
requires_fit : bool, default=True
Whether the estimator requires to be fitted before calling one of
`transform`, `predict`, `predict_proba`, or `decision_function`.
_skip_test : bool, default=False
Whether to skip common tests entirely. Don't use this unless
you have a *very good* reason.
input_tags : :class:`InputTags`
The input data(X) tags.
"""
estimator_type: str | None
target_tags: TargetTags
transformer_tags: TransformerTags | None = None
classifier_tags: ClassifierTags | None = None
regressor_tags: RegressorTags | None = None
array_api_support: bool = False
no_validation: bool = False
non_deterministic: bool = False
requires_fit: bool = True
_skip_test: bool = False
input_tags: InputTags = field(default_factory=InputTags)
def get_tags(estimator) -> Tags:
"""Get estimator tags.
:class:`~sklearn.BaseEstimator` provides the estimator tags machinery.
For scikit-learn built-in estimators, we should still rely on
`self.__sklearn_tags__()`. `get_tags(est)` should be used when we
are not sure where `est` comes from: typically
`get_tags(self.estimator)` where `self` is a meta-estimator, or in
the common checks.
.. versionadded:: 1.6
Parameters
----------
estimator : estimator object
The estimator from which to get the tag.
Returns
-------
tags : :class:`~.sklearn.utils.Tags`
The estimator tags.
"""
try:
tags = estimator.__sklearn_tags__()
except AttributeError as exc:
if "object has no attribute '__sklearn_tags__'" in str(exc):
# Happens when `__sklearn_tags__` is implemented by calling
# `super().__sklearn_tags__()` but there is no `__sklearn_tags__`
# method in the base class. Typically happens when only inheriting
# from Mixins.
raise AttributeError(
f"The following error was raised: {exc}. It seems that "
"there are no classes that implement `__sklearn_tags__` "
"in the MRO and/or all classes in the MRO call "
"`super().__sklearn_tags__()`. Make sure to inherit from "
"`BaseEstimator` which implements `__sklearn_tags__` (or "
"alternatively define `__sklearn_tags__` but we don't recommend "
"this approach). Note that `BaseEstimator` needs to be on the "
"right side of other Mixins in the inheritance order."
)
else:
raise
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_encode.py | sklearn/utils/_encode.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from collections import Counter
from contextlib import suppress
from typing import NamedTuple
import numpy as np
from sklearn.utils._array_api import _isin, device, get_namespace, xpx
from sklearn.utils._missing import is_scalar_nan
def _unique(values, *, return_inverse=False, return_counts=False):
"""Helper function to find unique values with support for python objects.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : ndarray
Values to check for unknowns.
return_inverse : bool, default=False
If True, also return the indices of the unique values.
return_counts : bool, default=False
If True, also return the number of times each unique item appears in
values.
Returns
-------
unique : ndarray
The sorted unique values.
unique_inverse : ndarray
The indices to reconstruct the original array from the unique array.
Only provided if `return_inverse` is True.
unique_counts : ndarray
The number of times each of the unique values comes up in the original
array. Only provided if `return_counts` is True.
"""
if values.dtype == object:
return _unique_python(
values, return_inverse=return_inverse, return_counts=return_counts
)
# numerical
return _unique_np(
values, return_inverse=return_inverse, return_counts=return_counts
)
def _unique_np(values, return_inverse=False, return_counts=False):
"""Helper function to find unique values for numpy arrays that correctly
accounts for nans. See `_unique` documentation for details."""
xp, _ = get_namespace(values)
inverse, counts = None, None
if return_inverse and return_counts:
uniques, _, inverse, counts = xp.unique_all(values)
elif return_inverse:
uniques, inverse = xp.unique_inverse(values)
elif return_counts:
uniques, counts = xp.unique_counts(values)
else:
uniques = xp.unique_values(values)
# np.unique will have duplicate missing values at the end of `uniques`
# here we clip the nans and remove it from uniques
if uniques.size and is_scalar_nan(uniques[-1]):
nan_idx = xp.searchsorted(uniques, xp.nan)
uniques = uniques[: nan_idx + 1]
if return_inverse:
inverse[inverse > nan_idx] = nan_idx
if return_counts:
counts[nan_idx] = xp.sum(counts[nan_idx:])
counts = counts[: nan_idx + 1]
ret = (uniques,)
if return_inverse:
ret += (inverse,)
if return_counts:
ret += (counts,)
return ret[0] if len(ret) == 1 else ret
class MissingValues(NamedTuple):
"""Data class for missing data information"""
nan: bool
none: bool
def to_list(self):
"""Convert tuple to a list where None is always first."""
output = []
if self.none:
output.append(None)
if self.nan:
output.append(np.nan)
return output
def _extract_missing(values):
"""Extract missing values from `values`.
Parameters
----------
values: set
Set of values to extract missing from.
Returns
-------
output: set
Set with missing values extracted.
missing_values: MissingValues
Object with missing value information.
"""
missing_values_set = {
value for value in values if value is None or is_scalar_nan(value)
}
if not missing_values_set:
return values, MissingValues(nan=False, none=False)
if None in missing_values_set:
if len(missing_values_set) == 1:
output_missing_values = MissingValues(nan=False, none=True)
else:
# If there is more than one missing value, then it has to be
# float('nan') or np.nan
output_missing_values = MissingValues(nan=True, none=True)
else:
output_missing_values = MissingValues(nan=True, none=False)
# create set without the missing values
output = values - missing_values_set
return output, output_missing_values
class _nandict(dict):
"""Dictionary with support for nans."""
def __init__(self, mapping):
super().__init__(mapping)
for key, value in mapping.items():
if is_scalar_nan(key):
self.nan_value = value
break
def __missing__(self, key):
if hasattr(self, "nan_value") and is_scalar_nan(key):
return self.nan_value
raise KeyError(key)
def _map_to_integer(values, uniques):
"""Map values based on its position in uniques."""
xp, _ = get_namespace(values, uniques)
table = _nandict({val: i for i, val in enumerate(uniques)})
return xp.asarray([table[v] for v in values], device=device(values))
def _unique_python(values, *, return_inverse, return_counts):
# Only used in `_uniques`, see docstring there for details
try:
uniques_set = set(values)
uniques_set, missing_values = _extract_missing(uniques_set)
uniques = sorted(uniques_set)
uniques.extend(missing_values.to_list())
uniques = np.array(uniques, dtype=values.dtype)
except TypeError:
types = sorted(t.__qualname__ for t in set(type(v) for v in values))
raise TypeError(
"Encoders require their input argument must be uniformly "
f"strings or numbers. Got {types}"
)
ret = (uniques,)
if return_inverse:
ret += (_map_to_integer(values, uniques),)
if return_counts:
ret += (_get_counts(values, uniques),)
return ret[0] if len(ret) == 1 else ret
def _encode(values, *, uniques, check_unknown=True):
"""Helper function to encode values into [0, n_uniques - 1].
Uses pure python method for object dtype, and numpy method for
all other dtypes.
The numpy method has the limitation that the `uniques` need to
be sorted. Importantly, this is not checked but assumed to already be
the case. The calling method needs to ensure this for all non-object
values.
Parameters
----------
values : ndarray
Values to encode.
uniques : ndarray
The unique values in `values`. If the dtype is not object, then
`uniques` needs to be sorted.
check_unknown : bool, default=True
If True, check for values in `values` that are not in `unique`
and raise an error. This is ignored for object dtype, and treated as
True in this case. This parameter is useful for
_BaseEncoder._transform() to avoid calling _check_unknown()
twice.
Returns
-------
encoded : ndarray
Encoded values
"""
xp, _ = get_namespace(values, uniques)
if not xp.isdtype(values.dtype, "numeric"):
try:
return _map_to_integer(values, uniques)
except KeyError as e:
raise ValueError(f"y contains previously unseen labels: {e}")
else:
if check_unknown:
diff = _check_unknown(values, uniques)
if diff:
raise ValueError(f"y contains previously unseen labels: {diff}")
return xp.searchsorted(uniques, values)
def _check_unknown(values, known_values, return_mask=False):
"""
Helper function to check for unknowns in values to be encoded.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : array
Values to check for unknowns.
known_values : array
Known values. Must be unique.
return_mask : bool, default=False
If True, return a mask of the same shape as `values` indicating
the valid values.
Returns
-------
diff : list
The unique values present in `values` and not in `know_values`.
valid_mask : boolean array
Additionally returned if ``return_mask=True``.
"""
xp, _ = get_namespace(values, known_values)
valid_mask = None
if not xp.isdtype(values.dtype, "numeric"):
values_set = set(values)
values_set, missing_in_values = _extract_missing(values_set)
uniques_set = set(known_values)
uniques_set, missing_in_uniques = _extract_missing(uniques_set)
diff = values_set - uniques_set
nan_in_diff = missing_in_values.nan and not missing_in_uniques.nan
none_in_diff = missing_in_values.none and not missing_in_uniques.none
def is_valid(value):
return (
value in uniques_set
or (missing_in_uniques.none and value is None)
or (missing_in_uniques.nan and is_scalar_nan(value))
)
if return_mask:
if diff or nan_in_diff or none_in_diff:
valid_mask = xp.array([is_valid(value) for value in values])
else:
valid_mask = xp.ones(len(values), dtype=xp.bool)
diff = list(diff)
if none_in_diff:
diff.append(None)
if nan_in_diff:
diff.append(np.nan)
else:
unique_values = xp.unique_values(values)
diff = xpx.setdiff1d(unique_values, known_values, assume_unique=True, xp=xp)
if return_mask:
if diff.size:
valid_mask = _isin(values, known_values, xp)
else:
valid_mask = xp.ones(len(values), dtype=xp.bool)
# check for nans in the known_values
if xp.any(xp.isnan(known_values)):
diff_is_nan = xp.isnan(diff)
if xp.any(diff_is_nan):
# removes nan from valid_mask
if diff.size and return_mask:
is_nan = xp.isnan(values)
valid_mask[is_nan] = 1
# remove nan from diff
diff = diff[~diff_is_nan]
diff = list(diff)
if return_mask:
return diff, valid_mask
return diff
class _NaNCounter(Counter):
"""Counter with support for nan values."""
def __init__(self, items):
super().__init__(self._generate_items(items))
def _generate_items(self, items):
"""Generate items without nans. Stores the nan counts separately."""
for item in items:
if not is_scalar_nan(item):
yield item
continue
if not hasattr(self, "nan_count"):
self.nan_count = 0
self.nan_count += 1
def __missing__(self, key):
if hasattr(self, "nan_count") and is_scalar_nan(key):
return self.nan_count
raise KeyError(key)
def _get_counts(values, uniques):
"""Get the count of each of the `uniques` in `values`.
The counts will use the order passed in by `uniques`. For non-object dtypes,
`uniques` is assumed to be sorted and `np.nan` is at the end.
"""
if values.dtype.kind in "OU":
counter = _NaNCounter(values)
output = np.zeros(len(uniques), dtype=np.int64)
for i, item in enumerate(uniques):
with suppress(KeyError):
output[i] = counter[item]
return output
unique_values, counts = _unique_np(values, return_counts=True)
# Recorder unique_values based on input: `uniques`
uniques_in_values = np.isin(uniques, unique_values, assume_unique=True)
if np.isnan(unique_values[-1]) and np.isnan(uniques[-1]):
uniques_in_values[-1] = True
unique_valid_indices = np.searchsorted(unique_values, uniques[uniques_in_values])
output = np.zeros_like(uniques, dtype=np.int64)
output[uniques_in_values] = counts[unique_valid_indices]
return output
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/sparsefuncs.py | sklearn/utils/sparsefuncs.py | """A collection of utilities to work with sparse matrices and arrays."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import LinearOperator
from sklearn.utils.fixes import _sparse_min_max, _sparse_nan_min_max
from sklearn.utils.sparsefuncs_fast import (
csc_mean_variance_axis0 as _csc_mean_var_axis0,
)
from sklearn.utils.sparsefuncs_fast import (
csr_matmul_csr_to_dense,
)
from sklearn.utils.sparsefuncs_fast import (
csr_mean_variance_axis0 as _csr_mean_var_axis0,
)
from sklearn.utils.sparsefuncs_fast import (
incr_mean_variance_axis0 as _incr_mean_var_axis0,
)
from sklearn.utils.validation import _check_sample_weight
def _raise_typeerror(X):
"""Raises a TypeError if X is not a CSR or CSC matrix"""
input_type = X.format if sp.issparse(X) else type(X)
err = "Expected a CSR or CSC sparse matrix, got %s." % input_type
raise TypeError(err)
def _raise_error_wrong_axis(axis):
if axis not in (0, 1):
raise ValueError(
"Unknown axis value: %d. Use 0 for rows, or 1 for columns" % axis
)
def inplace_csr_column_scale(X, scale):
"""Inplace column scaling of a CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
It should be of CSR format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed feature-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_csr_column_scale(csr, scale)
>>> csr.todense()
matrix([[16, 3, 4],
[ 0, 0, 10],
[ 0, 0, 0],
[ 0, 0, 0]])
"""
assert scale.shape[0] == X.shape[1]
X.data *= scale.take(X.indices, mode="clip")
def inplace_csr_row_scale(X, scale):
"""Inplace row scaling of a CSR matrix.
Scale each sample of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to be scaled. It should be of CSR format.
scale : ndarray of float of shape (n_samples,)
Array of precomputed sample-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[0]
X.data *= np.repeat(scale, np.diff(X.indptr))
def mean_variance_axis(X, axis, weights=None, return_sum_weights=False):
"""Compute mean and variance along an axis on a CSR or CSC matrix.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It can be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
return_sum_weights : bool, default=False
If True, returns the sum of weights seen for each feature
if `axis=0` or each sample if `axis=1`.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,), dtype=floating
Feature-wise means.
variances : ndarray of shape (n_features,), dtype=floating
Feature-wise variances.
sum_weights : ndarray of shape (n_features,), dtype=floating
Returned if `return_sum_weights` is `True`.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.mean_variance_axis(csr, axis=0)
(array([2. , 0.25, 1.75]), array([12. , 0.1875, 4.1875]))
"""
_raise_error_wrong_axis(axis)
if sp.issparse(X) and X.format == "csr":
if axis == 0:
return _csr_mean_var_axis0(
X, weights=weights, return_sum_weights=return_sum_weights
)
else:
return _csc_mean_var_axis0(
X.T, weights=weights, return_sum_weights=return_sum_weights
)
elif sp.issparse(X) and X.format == "csc":
if axis == 0:
return _csc_mean_var_axis0(
X, weights=weights, return_sum_weights=return_sum_weights
)
else:
return _csr_mean_var_axis0(
X.T, weights=weights, return_sum_weights=return_sum_weights
)
else:
_raise_typeerror(X)
def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None):
"""Compute incremental mean and variance along an axis on a CSR or CSC matrix.
last_mean, last_var are the statistics computed at the last step by this
function. Both must be initialized to 0-arrays of the proper size, i.e.
the number of features in X. last_n is the number of samples encountered
until now.
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of means to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of variances to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_n : float or ndarray of shape (n_features,) or (n_samples,), \
dtype=floating
Sum of the weights seen so far, excluding the current weights
If not float, it should be of shape (n_features,) if
axis=0 or (n_samples,) if axis=1. If float it corresponds to
having same weights for all samples (or features).
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise means if axis = 0 or
sample-wise means if axis = 1.
variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise variances if axis = 0 or
sample-wise variances if axis = 1.
n : ndarray of shape (n_features,) or (n_samples,), dtype=integral
Updated number of seen samples per feature if axis=0
or number of seen features per sample if axis=1.
If weights is not None, n is a sum of the weights of the seen
samples or features instead of the actual number of seen
samples or features.
Notes
-----
NaNs are ignored in the algorithm.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.incr_mean_variance_axis(
... csr, axis=0, last_mean=np.zeros(3), last_var=np.zeros(3), last_n=2
... )
(array([1.33, 0.167, 1.17]), array([8.88, 0.139, 3.47]),
array([6., 6., 6.]))
"""
_raise_error_wrong_axis(axis)
if not (sp.issparse(X) and X.format in ("csc", "csr")):
_raise_typeerror(X)
if np.size(last_n) == 1:
last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype)
if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)):
raise ValueError("last_mean, last_var, last_n do not have the same shapes.")
if axis == 1:
if np.size(last_mean) != X.shape[0]:
raise ValueError(
"If axis=1, then last_mean, last_n, last_var should be of "
f"size n_samples {X.shape[0]} (Got {np.size(last_mean)})."
)
else: # axis == 0
if np.size(last_mean) != X.shape[1]:
raise ValueError(
"If axis=0, then last_mean, last_n, last_var should be of "
f"size n_features {X.shape[1]} (Got {np.size(last_mean)})."
)
X = X.T if axis == 1 else X
if weights is not None:
weights = _check_sample_weight(weights, X, dtype=X.dtype)
return _incr_mean_var_axis0(
X, last_mean=last_mean, last_var=last_var, last_n=last_n, weights=weights
)
def inplace_column_scale(X, scale):
"""Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features. It should be
of CSC or CSR format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed feature-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_column_scale(csr, scale)
>>> csr.todense()
matrix([[16, 3, 4],
[ 0, 0, 10],
[ 0, 0, 0],
[ 0, 0, 0]])
"""
if sp.issparse(X) and X.format == "csc":
inplace_csr_row_scale(X.T, scale)
elif sp.issparse(X) and X.format == "csr":
inplace_csr_column_scale(X, scale)
else:
_raise_typeerror(X)
def inplace_row_scale(X, scale):
"""Inplace row scaling of a CSR or CSC matrix.
Scale each row of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to be scaled. It should be of CSR or CSC format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed sample-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 4, 5])
>>> indices = np.array([0, 1, 2, 3, 3])
>>> data = np.array([8, 1, 2, 5, 6])
>>> scale = np.array([2, 3, 4, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 5],
[0, 0, 0, 6]])
>>> sparsefuncs.inplace_row_scale(csr, scale)
>>> csr.todense()
matrix([[16, 2, 0, 0],
[ 0, 0, 6, 0],
[ 0, 0, 0, 20],
[ 0, 0, 0, 30]])
"""
if sp.issparse(X) and X.format == "csc":
inplace_csr_column_scale(X.T, scale)
elif sp.issparse(X) and X.format == "csr":
inplace_csr_row_scale(X, scale)
else:
_raise_typeerror(X)
def inplace_swap_row_csc(X, m, n):
"""Swap two rows of a CSC matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of
CSC format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_mask] = n
def inplace_swap_row_csr(X, m, n):
"""Swap two rows of a CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of
CSR format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
# The following swapping makes life easier since m is assumed to be the
# smaller integer below.
if m > n:
m, n = n, m
indptr = X.indptr
m_start = indptr[m]
m_stop = indptr[m + 1]
n_start = indptr[n]
n_stop = indptr[n + 1]
nz_m = m_stop - m_start
nz_n = n_stop - n_start
if nz_m != nz_n:
# Modify indptr first
X.indptr[m + 2 : n] += nz_n - nz_m
X.indptr[m + 1] = m_start + nz_n
X.indptr[n] = n_stop - nz_m
X.indices = np.concatenate(
[
X.indices[:m_start],
X.indices[n_start:n_stop],
X.indices[m_stop:n_start],
X.indices[m_start:m_stop],
X.indices[n_stop:],
]
)
X.data = np.concatenate(
[
X.data[:m_start],
X.data[n_start:n_stop],
X.data[m_stop:n_start],
X.data[m_start:m_stop],
X.data[n_stop:],
]
)
def inplace_swap_row(X, m, n):
"""
Swap two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two rows are to be swapped. It should be of CSR or
CSC format.
m : int
Index of the row of X to be swapped.
n : int
Index of the row of X to be swapped.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 3, 3])
>>> indices = np.array([0, 2, 2])
>>> data = np.array([8, 2, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 0, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_swap_row(csr, 0, 1)
>>> csr.todense()
matrix([[0, 0, 5],
[8, 0, 2],
[0, 0, 0],
[0, 0, 0]])
"""
if sp.issparse(X) and X.format == "csc":
inplace_swap_row_csc(X, m, n)
elif sp.issparse(X) and X.format == "csr":
inplace_swap_row_csr(X, m, n)
else:
_raise_typeerror(X)
def inplace_swap_column(X, m, n):
"""
Swap two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two columns are to be swapped. It should be of
CSR or CSC format.
m : int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 3, 3])
>>> indices = np.array([0, 2, 2])
>>> data = np.array([8, 2, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 0, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_swap_column(csr, 0, 1)
>>> csr.todense()
matrix([[0, 8, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
"""
if m < 0:
m += X.shape[1]
if n < 0:
n += X.shape[1]
if sp.issparse(X) and X.format == "csc":
inplace_swap_row_csr(X, m, n)
elif sp.issparse(X) and X.format == "csr":
inplace_swap_row_csc(X, m, n)
else:
_raise_typeerror(X)
def min_max_axis(X, axis, ignore_nan=False):
"""Compute minimum and maximum along an axis on a CSR or CSC matrix.
Optionally ignore NaN values.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
ignore_nan : bool, default=False
Ignore or passing through NaN values.
.. versionadded:: 0.20
Returns
-------
mins : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Feature-wise minima.
maxs : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Feature-wise maxima.
"""
if sp.issparse(X) and X.format in ("csr", "csc"):
if ignore_nan:
return _sparse_nan_min_max(X, axis=axis)
else:
return _sparse_min_max(X, axis=axis)
else:
_raise_typeerror(X)
def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0.
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : sparse matrix of shape (n_samples, n_labels)
Input data. It should be of CSR format.
axis : {0, 1}, default=None
The axis on which the data is aggregated.
sample_weight : array-like of shape (n_samples,), default=None
Weight for each row of X.
Returns
-------
nnz : int, float, ndarray of shape (n_samples,) or ndarray of shape (n_features,)
Number of non-zero values in the array along a given axis. Otherwise,
the total number of non-zero values in the array is returned.
"""
if axis == -1:
axis = 1
elif axis == -2:
axis = 0
elif X.format != "csr":
raise TypeError("Expected CSR sparse format, got {0}".format(X.format))
# We rely here on the fact that np.diff(Y.indptr) for a CSR
# will return the number of nonzero entries in each row.
# A bincount over Y.indices will return the number of nonzeros
# in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
if axis is None:
if sample_weight is None:
return X.nnz
else:
return np.dot(np.diff(X.indptr), sample_weight)
elif axis == 1:
out = np.diff(X.indptr)
if sample_weight is None:
# astype here is for consistency with axis=0 dtype
return out.astype("intp")
return out * sample_weight
elif axis == 0:
if sample_weight is None:
return np.bincount(X.indices, minlength=X.shape[1])
else:
weights = np.repeat(sample_weight, np.diff(X.indptr))
return np.bincount(X.indices, minlength=X.shape[1], weights=weights)
else:
raise ValueError("Unsupported axis: {0}".format(axis))
def _get_median(data, n_zeros):
"""Compute the median of data with n_zeros additional zeros.
This function is used to support sparse matrices; it modifies data
in-place.
"""
n_elems = len(data) + n_zeros
if not n_elems:
return np.nan
n_negative = np.count_nonzero(data < 0)
middle, is_odd = divmod(n_elems, 2)
data.sort()
if is_odd:
return _get_elem_at_rank(middle, data, n_negative, n_zeros)
return (
_get_elem_at_rank(middle - 1, data, n_negative, n_zeros)
+ _get_elem_at_rank(middle, data, n_negative, n_zeros)
) / 2.0
def _get_elem_at_rank(rank, data, n_negative, n_zeros):
"""Find the value in data augmented with n_zeros for the given rank"""
if rank < n_negative:
return data[rank]
if rank - n_negative < n_zeros:
return 0
return data[rank - n_zeros]
def csc_median_axis_0(X):
"""Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSC format.
Returns
-------
median : ndarray of shape (n_features,)
Median.
"""
if not (sp.issparse(X) and X.format == "csc"):
raise TypeError("Expected matrix of CSC format, got %s" % X.format)
indptr = X.indptr
n_samples, n_features = X.shape
median = np.zeros(n_features)
for f_ind, (start, end) in enumerate(itertools.pairwise(indptr)):
# Prevent modifying X in place
data = np.copy(X.data[start:end])
nz = n_samples - data.size
median[f_ind] = _get_median(data, nz)
return median
def _implicit_column_offset(X, offset):
"""Create an implicitly offset linear operator.
This is used by PCA on sparse data to avoid densifying the whole data
matrix.
Params
------
X : sparse matrix of shape (n_samples, n_features)
offset : ndarray of shape (n_features,)
Returns
-------
centered : LinearOperator
"""
offset = offset[None, :]
XT = X.T
return LinearOperator(
matvec=lambda x: X @ x - offset @ x,
matmat=lambda x: X @ x - offset @ x,
rmatvec=lambda x: XT @ x - (offset * x.sum()),
rmatmat=lambda x: XT @ x - offset.T @ x.sum(axis=0)[None, :],
dtype=X.dtype,
shape=X.shape,
)
def sparse_matmul_to_dense(A, B, out=None):
"""Compute A @ B for sparse and 2-dim A and B while returning an ndarray.
Parameters
----------
A : sparse matrix of shape (n1, n2) and format CSC or CSR
Left-side input matrix.
B : sparse matrix of shape (n2, n3) and format CSC or CSR
Right-side input matrix.
out : ndarray of shape (n1, n3) or None
Optional ndarray into which the result is written.
Returns
-------
out
An ndarray, new created if out=None.
"""
if not (sp.issparse(A) and A.format in ("csc", "csr") and A.ndim == 2):
raise ValueError("Input 'A' must be a sparse 2-dim CSC or CSR array.")
if not (sp.issparse(B) and B.format in ("csc", "csr") and B.ndim == 2):
raise ValueError("Input 'B' must be a sparse 2-dim CSC or CSR array.")
if A.shape[1] != B.shape[0]:
msg = (
"Shapes must fulfil A.shape[1] == B.shape[0], "
f"got {A.shape[1]} == {B.shape[0]}."
)
raise ValueError(msg)
n1, n2 = A.shape
n3 = B.shape[1]
if A.dtype != B.dtype or A.dtype not in (np.float32, np.float64):
msg = "Dtype of A and B must be the same, either both float32 or float64."
raise ValueError(msg)
if out is None:
out = np.empty((n1, n3), dtype=A.data.dtype)
else:
if out.shape[0] != n1 or out.shape[1] != n3:
raise ValueError("Shape of out must be ({n1}, {n3}), got {out.shape}.")
if out.dtype != A.data.dtype:
raise ValueError("Dtype of out must match that of input A..")
transpose_out = False
if A.format == "csc":
if B.format == "csc":
# out.T = (A @ B).T = B.T @ A.T, note that A.T and B.T are csr
transpose_out = True
A, B, out = B.T, A.T, out.T
n1, n3 = n3, n1
else:
# It seems best to just convert to csr.
A = A.tocsr()
elif B.format == "csc":
# It seems best to just convert to csr.
B = B.tocsr()
csr_matmul_csr_to_dense(
A.data, A.indices, A.indptr, B.data, B.indices, B.indptr, out, n1, n2, n3
)
if transpose_out:
out = out.T
return out
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/discovery.py | sklearn/utils/discovery.py | """Utilities to discover scikit-learn objects."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import inspect
import pkgutil
from importlib import import_module
from operator import itemgetter
from pathlib import Path
_MODULE_TO_IGNORE = {
"tests",
"externals",
"setup",
"conftest",
"experimental",
"estimator_checks",
}
def all_estimators(type_filter=None):
"""Get a list of all estimators from `sklearn`.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
Parameters
----------
type_filter : {"classifier", "regressor", "cluster", "transformer"} \
or list of such str, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actual type of the class.
Examples
--------
>>> from sklearn.utils.discovery import all_estimators
>>> estimators = all_estimators()
>>> type(estimators)
<class 'list'>
>>> type(estimators[0])
<class 'tuple'>
>>> estimators[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
>>> classifiers = all_estimators(type_filter="classifier")
>>> classifiers[:2]
[('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>),
('BaggingClassifier', <class 'sklearn.ensemble._bagging.BaggingClassifier'>)]
>>> regressors = all_estimators(type_filter="regressor")
>>> regressors[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostRegressor',
<class 'sklearn.ensemble._weight_boosting.AdaBoostRegressor'>)]
>>> both = all_estimators(type_filter=["classifier", "regressor"])
>>> both[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
"""
# lazy import to avoid circular imports from sklearn.base
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
ClusterMixin,
RegressorMixin,
TransformerMixin,
)
from sklearn.utils._testing import ignore_warnings
def is_abstract(c):
if not (hasattr(c, "__abstractmethods__")):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
root = str(Path(__file__).parent.parent) # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
module_parts = module_name.split(".")
if (
any(part in _MODULE_TO_IGNORE for part in module_parts)
or "._" in module_name
):
continue
module = import_module(module_name)
classes = inspect.getmembers(module, inspect.isclass)
classes = [
(name, est_cls) for name, est_cls in classes if not name.startswith("_")
]
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [
c
for c in all_classes
if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {
"classifier": ClassifierMixin,
"regressor": RegressorMixin,
"transformer": TransformerMixin,
"cluster": ClusterMixin,
}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend(
[est for est in estimators if issubclass(est[1], mixin)]
)
estimators = filtered_estimators
if type_filter:
raise ValueError(
"Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
f" {type_filter!r}."
)
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def all_displays():
"""Get a list of all displays from `sklearn`.
Returns
-------
displays : list of tuples
List of (name, class), where ``name`` is the display class name as
string and ``class`` is the actual type of the class.
Examples
--------
>>> from sklearn.utils.discovery import all_displays
>>> displays = all_displays()
>>> displays[0]
('CalibrationDisplay', <class 'sklearn.calibration.CalibrationDisplay'>)
"""
# lazy import to avoid circular imports from sklearn.base
from sklearn.utils._testing import ignore_warnings
all_classes = []
root = str(Path(__file__).parent.parent) # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
module_parts = module_name.split(".")
if (
any(part in _MODULE_TO_IGNORE for part in module_parts)
or "._" in module_name
):
continue
module = import_module(module_name)
classes = inspect.getmembers(module, inspect.isclass)
classes = [
(name, display_class)
for name, display_class in classes
if not name.startswith("_") and name.endswith("Display")
]
all_classes.extend(classes)
return sorted(set(all_classes), key=itemgetter(0))
def _is_checked_function(item):
if not inspect.isfunction(item):
return False
if item.__name__.startswith("_"):
return False
mod = item.__module__
if not mod.startswith("sklearn.") or mod.endswith("estimator_checks"):
return False
return True
def all_functions():
"""Get a list of all functions from `sklearn`.
Returns
-------
functions : list of tuples
List of (name, function), where ``name`` is the function name as
string and ``function`` is the actual function.
Examples
--------
>>> from sklearn.utils.discovery import all_functions
>>> functions = all_functions()
>>> name, function = functions[0]
>>> name
'accuracy_score'
"""
# lazy import to avoid circular imports from sklearn.base
from sklearn.utils._testing import ignore_warnings
all_functions = []
root = str(Path(__file__).parent.parent) # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
module_parts = module_name.split(".")
if (
any(part in _MODULE_TO_IGNORE for part in module_parts)
or "._" in module_name
):
continue
module = import_module(module_name)
functions = inspect.getmembers(module, _is_checked_function)
functions = [
(func.__name__, func)
for name, func in functions
if not name.startswith("_")
]
all_functions.extend(functions)
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(all_functions), key=itemgetter(0))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_arpack.py | sklearn/utils/_arpack.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.utils.validation import check_random_state
def _init_arpack_v0(size, random_state):
"""Initialize the starting vector for iteration in ARPACK functions.
Initialize an ndarray with values sampled from the uniform distribution on
[-1, 1]. This initialization model has been chosen to be consistent with
the ARPACK one as another initialization can lead to convergence issues.
Parameters
----------
size : int
The size of the eigenvalue vector to be initialized.
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator used to generate a
uniform distribution. If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is the
random number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Returns
-------
v0 : ndarray of shape (size,)
The initialized vector.
"""
random_state = check_random_state(random_state)
v0 = random_state.uniform(-1, 1, size)
return v0
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/extmath.py | sklearn/utils/extmath.py | """Utilities to perform optimal mathematical operations in scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import inspect
import warnings
from contextlib import nullcontext
from functools import partial
from numbers import Integral
import numpy as np
from scipy import linalg, sparse
from sklearn.utils._array_api import (
_average,
_is_numpy_namespace,
_max_precision_float_dtype,
_nanmean,
_nansum,
device,
get_namespace,
get_namespace_and_device,
)
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.deprecation import deprecated
from sklearn.utils.sparsefuncs import sparse_matmul_to_dense
from sklearn.utils.sparsefuncs_fast import csr_row_norms
from sklearn.utils.validation import check_array, check_random_state
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Faster than norm(x) ** 2.
Parameters
----------
x : array-like
The input array which could be either be a vector or a 2 dimensional array.
Returns
-------
float
The Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array).
"""
x = np.ravel(x, order="K")
if np.issubdtype(x.dtype, np.integer):
warnings.warn(
(
"Array type is integer, np.dot may overflow. "
"Data should be float type to avoid this issue"
),
UserWarning,
)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
Parameters
----------
X : array-like
The input array.
squared : bool, default=False
If True, return squared norms.
Returns
-------
array-like
The row-wise (squared) Euclidean norm of X.
"""
if sparse.issparse(X):
X = X.tocsr()
norms = csr_row_norms(X)
if not squared:
norms = np.sqrt(norms)
else:
xp, _ = get_namespace(X)
if _is_numpy_namespace(xp):
X = np.asarray(X)
norms = np.einsum("ij,ij->i", X, X)
norms = xp.asarray(norms)
else:
norms = xp.sum(xp.multiply(X, X), axis=1)
if not squared:
norms = xp.sqrt(norms)
return norms
def fast_logdet(A):
"""Compute logarithm of determinant of a square matrix.
The (natural) logarithm of the determinant of a square matrix
is returned if det(A) is non-negative and well defined.
If the determinant is zero or negative returns -Inf.
Equivalent to : np.log(np.det(A)) but more robust.
Parameters
----------
A : array_like of shape (n, n)
The square matrix.
Returns
-------
logdet : float
When det(A) is strictly positive, log(det(A)) is returned.
When det(A) is non-positive or not defined, then -inf is returned.
See Also
--------
numpy.linalg.slogdet : Compute the sign and (natural) logarithm of the determinant
of an array.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import fast_logdet
>>> a = np.array([[5, 1], [2, 8]])
>>> fast_logdet(a)
np.float64(3.6375861597263857)
"""
xp, _ = get_namespace(A)
sign, ld = xp.linalg.slogdet(A)
if not sign > 0:
return -xp.inf
return ld
def density(w):
"""Compute density of a sparse vector.
Parameters
----------
w : {ndarray, sparse matrix}
The input data can be numpy ndarray or a sparse matrix.
Returns
-------
float
The density of w, between 0 and 1.
Examples
--------
>>> from scipy import sparse
>>> from sklearn.utils.extmath import density
>>> X = sparse.random(10, 10, density=0.25, random_state=0)
>>> density(X)
0.25
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, *, dense_output=False):
"""Dot product that handle the sparse matrix case correctly.
Parameters
----------
a : {ndarray, sparse matrix}
b : {ndarray, sparse matrix}
dense_output : bool, default=False
When False, ``a`` and ``b`` both being sparse will yield sparse output.
When True, output will always be a dense array.
Returns
-------
dot_product : {ndarray, sparse matrix}
Sparse if ``a`` and ``b`` are sparse and ``dense_output=False``.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from sklearn.utils.extmath import safe_sparse_dot
>>> X = csr_matrix([[1, 2], [3, 4], [5, 6]])
>>> dot_product = safe_sparse_dot(X, X.T)
>>> dot_product.toarray()
array([[ 5, 11, 17],
[11, 25, 39],
[17, 39, 61]])
"""
xp, _ = get_namespace(a, b)
if a.ndim > 2 or b.ndim > 2:
if sparse.issparse(a):
# sparse is always 2D. Implies b is 3D+
# [i, j] @ [k, ..., l, m, n] -> [i, k, ..., l, n]
b_ = np.rollaxis(b, -2)
b_2d = b_.reshape((b.shape[-2], -1))
ret = a @ b_2d
ret = ret.reshape(a.shape[0], *b_.shape[1:])
elif sparse.issparse(b):
# sparse is always 2D. Implies a is 3D+
# [k, ..., l, m] @ [i, j] -> [k, ..., l, j]
a_2d = a.reshape(-1, a.shape[-1])
ret = a_2d @ b
ret = ret.reshape(*a.shape[:-1], b.shape[1])
else:
# Alternative for `np.dot` when dealing with a or b having
# more than 2 dimensions, that works with the array api.
# If b is 1-dim then the last axis for b is taken otherwise
# if b is >= 2-dim then the second to last axis is taken.
b_axis = -1 if b.ndim == 1 else -2
ret = xp.tensordot(a, b, axes=[-1, b_axis])
elif (
dense_output
and a.ndim == 2
and b.ndim == 2
and a.dtype in (np.float32, np.float64)
and b.dtype in (np.float32, np.float64)
and (sparse.issparse(a) and a.format in ("csc", "csr"))
and (sparse.issparse(b) and b.format in ("csc", "csr"))
):
# Use dedicated fast method for dense_C = sparse_A @ sparse_B
return sparse_matmul_to_dense(a, b)
else:
ret = a @ b
if (
sparse.issparse(a)
and sparse.issparse(b)
and dense_output
and hasattr(ret, "toarray")
):
return ret.toarray()
return ret
def randomized_range_finder(
A, *, size, n_iter, power_iteration_normalizer="auto", random_state=None
):
"""Compute an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data matrix.
size : int
Size of the return array.
n_iter : int
Number of power iterations used to stabilize the result.
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
Q : ndarray of shape (size, size)
A projection matrix, the range of which approximates well the range of the
input matrix A.
Notes
-----
Follows Algorithm 4.3 of
:arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Halko, et al. (2009)
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import randomized_range_finder
>>> A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> randomized_range_finder(A, size=2, n_iter=2, random_state=42)
array([[-0.214, 0.887],
[-0.521, 0.249],
[-0.826, -0.388]])
"""
A = check_array(A, accept_sparse=True)
return _randomized_range_finder(
A,
size=size,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
random_state=random_state,
)
def _randomized_range_finder(
A, *, size, n_iter, power_iteration_normalizer="auto", random_state=None
):
"""Body of randomized_range_finder without input validation."""
xp, is_array_api_compliant = get_namespace(A)
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
# XXX: generate random number directly from xp if it's possible
# one day.
Q = xp.asarray(random_state.normal(size=(A.shape[1], size)))
if hasattr(A, "dtype") and xp.isdtype(A.dtype, kind="real floating"):
# Use float32 computation and components if A has a float32 dtype.
Q = xp.astype(Q, A.dtype, copy=False)
# Move Q to device if needed only after converting to float32 if needed to
# avoid allocating unnecessary memory on the device.
# Note: we cannot combine the astype and to_device operations in one go
# using xp.asarray(..., dtype=dtype, device=device) because downcasting
# from float64 to float32 in asarray might not always be accepted as only
# casts following type promotion rules are guarateed to work.
# https://github.com/data-apis/array-api/issues/647
if is_array_api_compliant:
Q = xp.asarray(Q, device=device(A))
# Deal with "auto" mode
if power_iteration_normalizer == "auto":
if n_iter <= 2:
power_iteration_normalizer = "none"
elif is_array_api_compliant:
# XXX: https://github.com/data-apis/array-api/issues/627
warnings.warn(
"Array API does not support LU factorization, falling back to QR"
" instead. Set `power_iteration_normalizer='QR'` explicitly to silence"
" this warning."
)
power_iteration_normalizer = "QR"
else:
power_iteration_normalizer = "LU"
elif power_iteration_normalizer == "LU" and is_array_api_compliant:
raise ValueError(
"Array API does not support LU factorization. Set "
"`power_iteration_normalizer='QR'` instead."
)
if is_array_api_compliant:
qr_normalizer = partial(xp.linalg.qr, mode="reduced")
else:
# Use scipy.linalg instead of numpy.linalg when not explicitly
# using the Array API.
qr_normalizer = partial(linalg.qr, mode="economic", check_finite=False)
if power_iteration_normalizer == "QR":
normalizer = qr_normalizer
elif power_iteration_normalizer == "LU":
normalizer = partial(linalg.lu, permute_l=True, check_finite=False)
else:
normalizer = lambda x: (x, None)
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for _ in range(n_iter):
Q, _ = normalizer(A @ Q)
Q, _ = normalizer(A.T @ Q)
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = qr_normalizer(A @ Q)
return Q
@validate_params(
{
"M": ["array-like", "sparse matrix"],
"n_components": [Interval(Integral, 1, None, closed="left")],
"n_oversamples": [Interval(Integral, 0, None, closed="left")],
"n_iter": [Interval(Integral, 0, None, closed="left"), StrOptions({"auto"})],
"power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})],
"transpose": ["boolean", StrOptions({"auto"})],
"flip_sign": ["boolean"],
"random_state": ["random_state"],
"svd_lapack_driver": [StrOptions({"gesdd", "gesvd"})],
},
prefer_skip_nested_validation=True,
)
def randomized_svd(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
transpose="auto",
flip_sign=True,
random_state=None,
svd_lapack_driver="gesdd",
):
"""Compute a truncated randomized SVD.
This method solves the fixed-rank approximation problem described in [1]_
(problem (1.5), p5).
Refer to
:ref:`sphx_glr_auto_examples_applications_wikipedia_principal_eigenvector.py`
for a typical example where the power iteration algorithm is used to rank web pages.
This algorithm is also known to be used as a building block in Google's PageRank
algorithm.
Parameters
----------
M : {array-like, sparse matrix} of shape (n_samples, n_features)
Matrix to decompose.
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of `M` so as
to ensure proper conditioning. The total number of random vectors
used to find the range of `M` is `n_components + n_oversamples`. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See [1]_
(pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see [1]_ page 9).
.. versionchanged:: 0.18
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : bool or 'auto', default='auto'
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : bool, default=True
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, default='warn'
The seed of the pseudo random number generator to use when
shuffling the data, i.e. getting the random vectors to initialize
the algorithm. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
.. versionchanged:: 1.2
The default value changed from 0 to None.
svd_lapack_driver : {"gesdd", "gesvd"}, default="gesdd"
Whether to use the more efficient divide-and-conquer approach
(`"gesdd"`) or more general rectangular approach (`"gesvd"`) to compute
the SVD of the matrix B, which is the projection of M into a low
dimensional subspace, as described in [1]_.
.. versionadded:: 1.2
Returns
-------
u : ndarray of shape (n_samples, n_components)
Unitary matrix having left singular vectors with signs flipped as columns.
s : ndarray of shape (n_components,)
The singular values, sorted in non-increasing order.
vh : ndarray of shape (n_components, n_features)
Unitary matrix having right singular vectors with signs flipped as rows.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
References
----------
.. [1] :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Halko, et al. (2009)
.. [2] `"A randomized algorithm for the decomposition of matrices"
<https://doi.org/10.1016/j.acha.2010.02.003>`_
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert (2011)
.. [3] :arxiv:`"An implementation of a randomized algorithm for principal
component analysis" <1412.3510>` A. Szlam et al. (2014)
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import randomized_svd
>>> a = np.array([[1, 2, 3, 5],
... [3, 4, 5, 6],
... [7, 8, 9, 10]])
>>> U, s, Vh = randomized_svd(a, n_components=2, random_state=0)
>>> U.shape, s.shape, Vh.shape
((3, 2), (2,), (2, 4))
"""
M = check_array(M, accept_sparse=True)
return _randomized_svd(
M,
n_components=n_components,
n_oversamples=n_oversamples,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
transpose=transpose,
flip_sign=flip_sign,
random_state=random_state,
svd_lapack_driver=svd_lapack_driver,
)
def _randomized_svd(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
transpose="auto",
flip_sign=True,
random_state=None,
svd_lapack_driver="gesdd",
):
"""Body of randomized_svd without input validation."""
xp, is_array_api_compliant = get_namespace(M)
if sparse.issparse(M) and M.format in ("lil", "dok"):
warnings.warn(
"Calculating SVD of a {} is expensive. "
"csr_matrix is more efficient.".format(type(M).__name__),
sparse.SparseEfficiencyWarning,
)
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == "auto":
# Checks if the number of iterations is explicitly specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < 0.1 * min(M.shape) else 4
if transpose == "auto":
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = _randomized_range_finder(
M,
size=n_random,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
random_state=random_state,
)
# project M to the (k + p) dimensional space using the basis vectors
B = Q.T @ M
# compute the SVD on the thin matrix: (k + p) wide
if is_array_api_compliant:
Uhat, s, Vt = xp.linalg.svd(B, full_matrices=False)
else:
# When array_api_dispatch is disabled, rely on scipy.linalg
# instead of numpy.linalg to avoid introducing a behavior change w.r.t.
# previous versions of scikit-learn.
Uhat, s, Vt = linalg.svd(
B, full_matrices=False, lapack_driver=svd_lapack_driver
)
del B
U = Q @ Uhat
if flip_sign:
if not transpose:
U, Vt = svd_flip(U, Vt)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, Vt = svd_flip(U, Vt, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return Vt[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], Vt[:n_components, :]
def _randomized_eigsh(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
selection="module",
random_state=None,
):
"""Computes a truncated eigendecomposition using randomized methods
This method solves the fixed-rank approximation problem described in the
Halko et al paper.
The choice of which components to select can be tuned with the `selection`
parameter.
.. versionadded:: 0.24
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose, it should be real symmetric square or complex
hermitian
n_components : int
Number of eigenvalues and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of eigenvectors and eigenvalues. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See Halko
et al (pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see Halko et al paper, page 9).
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
selection : {'value', 'module'}, default='module'
Strategy used to select the n components. When `selection` is `'value'`
(not yet implemented, will become the default when implemented), the
components corresponding to the n largest eigenvalues are returned.
When `selection` is `'module'`, the components corresponding to the n
eigenvalues with largest modules are returned.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
eigendecomposition using randomized methods to speed up the computations.
This method is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
Strategy 'value': not implemented yet.
Algorithms 5.3, 5.4 and 5.5 in the Halko et al paper should provide good
candidates for a future implementation.
Strategy 'module':
The principle is that for diagonalizable matrices, the singular values and
eigenvalues are related: if t is an eigenvalue of A, then :math:`|t|` is a
singular value of A. This method relies on a randomized SVD to find the n
singular components corresponding to the n singular values with largest
modules, and then uses the signs of the singular vectors to find the true
sign of t: if the sign of left and right singular vectors are different
then the corresponding eigenvalue is negative.
Returns
-------
eigvals : 1D array of shape (n_components,) containing the `n_components`
eigenvalues selected (see ``selection`` parameter).
eigvecs : 2D array of shape (M.shape[0], n_components) containing the
`n_components` eigenvectors corresponding to the `eigvals`, in the
corresponding order. Note that this follows the `scipy.linalg.eigh`
convention.
See Also
--------
:func:`randomized_svd`
References
----------
* :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
(Algorithm 4.3 for strategy 'module') <0909.4061>`
Halko, et al. (2009)
"""
if selection == "value": # pragma: no cover
# to do : an algorithm can be found in the Halko et al reference
raise NotImplementedError()
elif selection == "module":
# Note: no need for deterministic U and Vt (flip_sign=True),
# as we only use the dot product UVt afterwards
U, S, Vt = randomized_svd(
M,
n_components=n_components,
n_oversamples=n_oversamples,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
flip_sign=False,
random_state=random_state,
)
eigvecs = U[:, :n_components]
eigvals = S[:n_components]
# Conversion of Singular values into Eigenvalues:
# For any eigenvalue t, the corresponding singular value is |t|.
# So if there is a negative eigenvalue t, the corresponding singular
# value will be -t, and the left (U) and right (V) singular vectors
# will have opposite signs.
# Fastest way: see <https://stackoverflow.com/a/61974002/7262247>
diag_VtU = np.einsum("ji,ij->j", Vt[:n_components, :], U[:, :n_components])
signs = np.sign(diag_VtU)
eigvals = eigvals * signs
else: # pragma: no cover
raise ValueError("Invalid `selection`: %r" % selection)
return eigvals, eigvecs
def weighted_mode(a, w, *, axis=0):
"""Return an array of the weighted modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array-like of shape (n_samples,)
Array of which values to find mode(s).
w : array-like of shape (n_samples,)
Array of weights for each value.
axis : int, default=0
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
See Also
--------
scipy.stats.mode: Calculates the Modal (most common) value of array elements
along specified axis.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([4.]), array([3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([2.]), array([3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.5.
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
if a.shape != w.shape:
w = np.full(a.shape, w, dtype=w.dtype)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = a == score
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray of shape (M, len(arrays)), default=None
Array to place the cartesian product in.
Returns
-------
out : ndarray of shape (M, len(arrays))
Array containing the cartesian products formed of input arrays.
If not provided, the `dtype` of the output array is set to the most
permissive `dtype` of the input arrays, according to NumPy type
promotion.
.. versionadded:: 1.2
Add support for arrays of different types.
Notes
-----
This function may not be used on more than 32 arrays
because the underlying numpy functions do not support it.
Examples
--------
>>> from sklearn.utils.extmath import cartesian
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
dtype = np.result_type(*arrays) # find the most permissive dtype
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_set_output.py | sklearn/utils/_set_output.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import importlib
from functools import wraps
from typing import Protocol, runtime_checkable
import numpy as np
from scipy.sparse import issparse
from sklearn._config import get_config
from sklearn.utils._available_if import available_if
def check_library_installed(library):
"""Check library is installed."""
try:
return importlib.import_module(library)
except ImportError as exc:
raise ImportError(
f"Setting output container to '{library}' requires {library} to be"
" installed"
) from exc
def get_columns(columns):
if callable(columns):
try:
return columns()
except Exception:
return None
return columns
@runtime_checkable
class ContainerAdapterProtocol(Protocol):
container_lib: str
def create_container(self, X_output, X_original, columns, inplace=False):
"""Create container from `X_output` with additional metadata.
Parameters
----------
X_output : {ndarray, dataframe}
Data to wrap.
X_original : {ndarray, dataframe}
Original input dataframe. This is used to extract the metadata that should
be passed to `X_output`, e.g. pandas row index.
columns : callable, ndarray, or None
The column names or a callable that returns the column names. The
callable is useful if the column names require some computation. If `None`,
then no columns are passed to the container's constructor.
inplace : bool, default=False
Whether or not we intend to modify `X_output` in-place. However, it does
not guarantee that we return the same object if the in-place operation
is not possible.
Returns
-------
wrapped_output : container_type
`X_output` wrapped into the container type.
"""
def is_supported_container(self, X):
"""Return True if X is a supported container.
Parameters
----------
Xs: container
Containers to be checked.
Returns
-------
is_supported_container : bool
True if X is a supported container.
"""
def rename_columns(self, X, columns):
"""Rename columns in `X`.
Parameters
----------
X : container
Container which columns is updated.
columns : ndarray of str
Columns to update the `X`'s columns with.
Returns
-------
updated_container : container
Container with new names.
"""
def hstack(self, Xs):
"""Stack containers horizontally (column-wise).
Parameters
----------
Xs : list of containers
List of containers to stack.
Returns
-------
stacked_Xs : container
Stacked containers.
"""
class PandasAdapter:
container_lib = "pandas"
def create_container(self, X_output, X_original, columns, inplace=True):
pd = check_library_installed("pandas")
columns = get_columns(columns)
if not inplace or not isinstance(X_output, pd.DataFrame):
# In all these cases, we need to create a new DataFrame
# Unfortunately, we cannot use `getattr(container, "index")`
# because `list` exposes an `index` attribute.
if isinstance(X_output, pd.DataFrame):
index = X_output.index
elif isinstance(X_original, (pd.DataFrame, pd.Series)):
index = X_original.index
else:
index = None
# We don't pass columns here because it would intend columns selection
# instead of renaming.
X_output = pd.DataFrame(X_output, index=index, copy=not inplace)
if columns is not None:
return self.rename_columns(X_output, columns)
return X_output
def is_supported_container(self, X):
pd = check_library_installed("pandas")
return isinstance(X, pd.DataFrame)
def rename_columns(self, X, columns):
# we cannot use `rename` since it takes a dictionary and at this stage we have
# potentially duplicate column names in `X`
X.columns = columns
return X
def hstack(self, Xs):
pd = check_library_installed("pandas")
return pd.concat(Xs, axis=1)
class PolarsAdapter:
container_lib = "polars"
def create_container(self, X_output, X_original, columns, inplace=True):
pl = check_library_installed("polars")
columns = get_columns(columns)
columns = columns.tolist() if isinstance(columns, np.ndarray) else columns
if not inplace or not isinstance(X_output, pl.DataFrame):
# In all these cases, we need to create a new DataFrame
return pl.DataFrame(X_output, schema=columns, orient="row")
if columns is not None:
return self.rename_columns(X_output, columns)
return X_output
def is_supported_container(self, X):
pl = check_library_installed("polars")
return isinstance(X, pl.DataFrame)
def rename_columns(self, X, columns):
# we cannot use `rename` since it takes a dictionary and at this stage we have
# potentially duplicate column names in `X`
X.columns = columns
return X
def hstack(self, Xs):
pl = check_library_installed("polars")
return pl.concat(Xs, how="horizontal")
class ContainerAdaptersManager:
def __init__(self):
self.adapters = {}
@property
def supported_outputs(self):
return {"default"} | set(self.adapters)
def register(self, adapter):
self.adapters[adapter.container_lib] = adapter
ADAPTERS_MANAGER = ContainerAdaptersManager()
ADAPTERS_MANAGER.register(PandasAdapter())
ADAPTERS_MANAGER.register(PolarsAdapter())
def _get_adapter_from_container(container):
"""Get the adapter that knows how to handle such container.
See :class:`sklearn.utils._set_output.ContainerAdapterProtocol` for more
details.
"""
module_name = container.__class__.__module__.split(".")[0]
try:
return ADAPTERS_MANAGER.adapters[module_name]
except KeyError as exc:
available_adapters = list(ADAPTERS_MANAGER.adapters.keys())
raise ValueError(
"The container does not have a registered adapter in scikit-learn. "
f"Available adapters are: {available_adapters} while the container "
f"provided is: {container!r}."
) from exc
def _get_container_adapter(method, estimator=None):
"""Get container adapter."""
dense_config = _get_output_config(method, estimator)["dense"]
try:
return ADAPTERS_MANAGER.adapters[dense_config]
except KeyError:
return None
def _get_output_config(method, estimator=None):
"""Get output config based on estimator and global configuration.
Parameters
----------
method : {"transform"}
Estimator's method for which the output container is looked up.
estimator : estimator instance or None
Estimator to get the output configuration from. If `None`, check global
configuration is used.
Returns
-------
config : dict
Dictionary with keys:
- "dense": specifies the dense container for `method`. This can be
`"default"` or `"pandas"`.
"""
est_sklearn_output_config = getattr(estimator, "_sklearn_output_config", {})
if method in est_sklearn_output_config:
dense_config = est_sklearn_output_config[method]
else:
dense_config = get_config()[f"{method}_output"]
supported_outputs = ADAPTERS_MANAGER.supported_outputs
if dense_config not in supported_outputs:
raise ValueError(
f"output config must be in {sorted(supported_outputs)}, got {dense_config}"
)
return {"dense": dense_config}
def _wrap_data_with_container(method, data_to_wrap, original_input, estimator):
"""Wrap output with container based on an estimator's or global config.
Parameters
----------
method : {"transform"}
Estimator's method to get container output for.
data_to_wrap : {ndarray, dataframe}
Data to wrap with container.
original_input : {ndarray, dataframe}
Original input of function.
estimator : estimator instance
Estimator with to get the output configuration from.
Returns
-------
output : {ndarray, dataframe}
If the output config is "default" or the estimator is not configured
for wrapping return `data_to_wrap` unchanged.
If the output config is "pandas", return `data_to_wrap` as a pandas
DataFrame.
"""
output_config = _get_output_config(method, estimator)
if output_config["dense"] == "default" or not _auto_wrap_is_configured(estimator):
return data_to_wrap
dense_config = output_config["dense"]
if issparse(data_to_wrap):
raise ValueError(
"The transformer outputs a scipy sparse matrix. "
"Try to set the transformer output to a dense array or disable "
f"{dense_config.capitalize()} output with set_output(transform='default')."
)
adapter = ADAPTERS_MANAGER.adapters[dense_config]
return adapter.create_container(
data_to_wrap,
original_input,
columns=estimator.get_feature_names_out,
)
def _wrap_method_output(f, method):
"""Wrapper used by `_SetOutputMixin` to automatically wrap methods."""
@wraps(f)
def wrapped(self, X, *args, **kwargs):
data_to_wrap = f(self, X, *args, **kwargs)
if isinstance(data_to_wrap, tuple):
# only wrap the first output for cross decomposition
return_tuple = (
_wrap_data_with_container(method, data_to_wrap[0], X, self),
*data_to_wrap[1:],
)
# Support for namedtuples `_make` is a documented API for namedtuples:
# https://docs.python.org/3/library/collections.html#collections.somenamedtuple._make
if hasattr(type(data_to_wrap), "_make"):
return type(data_to_wrap)._make(return_tuple)
return return_tuple
return _wrap_data_with_container(method, data_to_wrap, X, self)
return wrapped
def _auto_wrap_is_configured(estimator):
"""Return True if estimator is configured for auto-wrapping the transform method.
`_SetOutputMixin` sets `_sklearn_auto_wrap_output_keys` to `set()` if auto wrapping
is manually disabled.
"""
auto_wrap_output_keys = getattr(estimator, "_sklearn_auto_wrap_output_keys", set())
return (
hasattr(estimator, "get_feature_names_out")
and "transform" in auto_wrap_output_keys
)
class _SetOutputMixin:
"""Mixin that dynamically wraps methods to return container based on config.
Currently `_SetOutputMixin` wraps `transform` and `fit_transform` and configures
it based on `set_output` of the global configuration.
`set_output` is only defined if `get_feature_names_out` is defined and
`auto_wrap_output_keys` is the default value.
"""
def __init_subclass__(cls, auto_wrap_output_keys=("transform",), **kwargs):
super().__init_subclass__(**kwargs)
# Dynamically wraps `transform` and `fit_transform` and configure it's
# output based on `set_output`.
if not (
isinstance(auto_wrap_output_keys, tuple) or auto_wrap_output_keys is None
):
raise ValueError("auto_wrap_output_keys must be None or a tuple of keys.")
if auto_wrap_output_keys is None:
cls._sklearn_auto_wrap_output_keys = set()
return
# Mapping from method to key in configurations
method_to_key = {
"transform": "transform",
"fit_transform": "transform",
}
cls._sklearn_auto_wrap_output_keys = set()
for method, key in method_to_key.items():
if not hasattr(cls, method) or key not in auto_wrap_output_keys:
continue
cls._sklearn_auto_wrap_output_keys.add(key)
# Only wrap methods defined by cls itself
if method not in cls.__dict__:
continue
wrapped_method = _wrap_method_output(getattr(cls, method), key)
setattr(cls, method, wrapped_method)
@available_if(_auto_wrap_is_configured)
def set_output(self, *, transform=None):
"""Set output container.
See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
for an example on how to use the API.
Parameters
----------
transform : {"default", "pandas", "polars"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `"polars"`: Polars output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.4
`"polars"` option was added.
Returns
-------
self : estimator instance
Estimator instance.
"""
if transform is None:
return self
if not hasattr(self, "_sklearn_output_config"):
self._sklearn_output_config = {}
self._sklearn_output_config["transform"] = transform
return self
def _safe_set_output(estimator, *, transform=None):
"""Safely call estimator.set_output and error if it not available.
This is used by meta-estimators to set the output for child estimators.
Parameters
----------
estimator : estimator instance
Estimator instance.
transform : {"default", "pandas", "polars"}, default=None
Configure output of the following estimator's methods:
- `"transform"`
- `"fit_transform"`
If `None`, this operation is a no-op.
Returns
-------
estimator : estimator instance
Estimator instance.
"""
set_output_for_transform = hasattr(estimator, "transform") or (
hasattr(estimator, "fit_transform") and transform is not None
)
if not set_output_for_transform:
# If estimator can not transform, then `set_output` does not need to be
# called.
return
if not hasattr(estimator, "set_output"):
raise ValueError(
f"Unable to configure output for {estimator} because `set_output` "
"is not available."
)
return estimator.set_output(transform=transform)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/metaestimators.py | sklearn/utils/metaestimators.py | """Utilities for meta-estimators."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import ABCMeta, abstractmethod
from contextlib import suppress
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import _safe_indexing
from sklearn.utils._available_if import available_if
from sklearn.utils._tags import get_tags
__all__ = ["available_if"]
class _BaseComposition(BaseEstimator, metaclass=ABCMeta):
"""Base class for estimators that are composed of named sub-estimators.
This abstract class provides parameter management functionality for
meta-estimators that contain collections of named estimators. It handles
the complex logic for getting and setting parameters on nested estimators
using the "estimator_name__parameter" syntax.
The class is designed to work with any attribute containing a list of
(name, estimator) tuples.
"""
@abstractmethod
def __init__(self):
pass
def _get_params(self, attr, deep=True):
out = super().get_params(deep=deep)
if not deep:
return out
estimators = getattr(self, attr)
try:
out.update(estimators)
except (TypeError, ValueError):
# Ignore TypeError for cases where estimators is not a list of
# (name, estimator) and ignore ValueError when the list is not
# formatted correctly. This is to prevent errors when calling
# `set_params`. `BaseEstimator.set_params` calls `get_params` which
# can error for invalid values for `estimators`.
return out
for name, estimator in estimators:
if hasattr(estimator, "get_params"):
for key, value in estimator.get_params(deep=True).items():
out["%s__%s" % (name, key)] = value
return out
def _set_params(self, attr, **params):
# Ensure strict ordering of parameter setting:
# 1. Replace the entire estimators collection
if attr in params:
setattr(self, attr, params.pop(attr))
# 2. Replace individual estimators by name
items = getattr(self, attr)
if isinstance(items, list) and items:
# Get item names used to identify valid names in params
# `zip` raises a TypeError when `items` does not contains
# elements of length 2
with suppress(TypeError):
item_names, _ = zip(*items)
for name in list(params.keys()):
if "__" not in name and name in item_names:
self._replace_estimator(attr, name, params.pop(name))
# 3. Individual estimator parameters and other initialisation arguments
super().set_params(**params)
return self
def _replace_estimator(self, attr, name, new_val):
# assumes `name` is a valid estimator name
new_estimators = list(getattr(self, attr))
for i, (estimator_name, _) in enumerate(new_estimators):
if estimator_name == name:
new_estimators[i] = (name, new_val)
break
setattr(self, attr, new_estimators)
def _validate_names(self, names):
if len(set(names)) != len(names):
raise ValueError("Names provided are not unique: {0!r}".format(list(names)))
invalid_names = set(names).intersection(self.get_params(deep=False))
if invalid_names:
raise ValueError(
"Estimator names conflict with constructor arguments: {0!r}".format(
sorted(invalid_names)
)
)
invalid_names = [name for name in names if "__" in name]
if invalid_names:
raise ValueError(
"Estimator names must not contain __: got {0!r}".format(invalid_names)
)
def _check_estimators_are_instances(self, estimators):
for estimator in estimators:
if isinstance(estimator, type):
raise TypeError(
"Expected an estimator instance ({estimator.__name__}()), got "
"estimator class instead ({estimator.__name__})."
)
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels.
Slice X, y according to indices for cross-validation, but take care of
precomputed kernel-matrices or pairwise affinities / distances.
If ``estimator._pairwise is True``, X needs to be square and
we slice rows and columns. If ``train_indices`` is not None,
we slice rows using ``indices`` (assumed the test set) and columns
using ``train_indices``, indicating the training set.
Labels y will always be indexed only along the first axis.
Parameters
----------
estimator : object
Estimator to determine whether we should slice only rows or rows and
columns.
X : array-like, sparse matrix or iterable
Data to be indexed. If ``estimator._pairwise is True``,
this needs to be a square array-like or sparse matrix.
y : array-like, sparse matrix or iterable
Targets to be indexed.
indices : array of int
Rows to select from X and y.
If ``estimator._pairwise is True`` and ``train_indices is None``
then ``indices`` will also be used to slice columns.
train_indices : array of int or None, default=None
If ``estimator._pairwise is True`` and ``train_indices is not None``,
then ``train_indices`` will be use to slice the columns of X.
Returns
-------
X_subset : array-like, sparse matrix or list
Indexed data.
y_subset : array-like, sparse matrix or list
Indexed targets.
"""
if get_tags(estimator).input_tags.pairwise:
if not hasattr(X, "shape"):
raise ValueError(
"Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices."
)
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = _safe_indexing(X, indices)
if y is not None:
y_subset = _safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_param_validation.py | sklearn/utils/_param_validation.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import functools
import math
import operator
import re
from abc import ABC, abstractmethod
from collections.abc import Iterable
from inspect import signature
from numbers import Integral, Real
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn._config import config_context, get_config
from sklearn.utils.validation import _is_arraylike_not_scalar
class InvalidParameterError(ValueError, TypeError):
"""Custom exception to be raised when the parameter of a class/method/function
does not have a valid type or value.
"""
# Inherits from ValueError and TypeError to keep backward compatibility.
def validate_parameter_constraints(parameter_constraints, params, caller_name):
"""Validate types and values of given parameters.
Parameters
----------
parameter_constraints : dict or {"no_validation"}
If "no_validation", validation is skipped for this parameter.
If a dict, it must be a dictionary `param_name: list of constraints`.
A parameter is valid if it satisfies one of the constraints from the list.
Constraints can be:
- an Interval object, representing a continuous or discrete range of numbers
- the string "array-like"
- the string "sparse matrix"
- the string "random_state"
- callable
- None, meaning that None is a valid value for the parameter
- any type, meaning that any instance of this type is valid
- an Options object, representing a set of elements of a given type
- a StrOptions object, representing a set of strings
- the string "boolean"
- the string "verbose"
- the string "cv_object"
- the string "nan"
- a MissingValues object representing markers for missing values
- a HasMethods object, representing method(s) an object must have
- a Hidden object, representing a constraint not meant to be exposed to the user
params : dict
A dictionary `param_name: param_value`. The parameters to validate against the
constraints.
caller_name : str
The name of the estimator or function or method that called this function.
"""
for param_name, param_val in params.items():
# We allow parameters to not have a constraint so that third party estimators
# can inherit from sklearn estimators without having to necessarily use the
# validation tools.
if param_name not in parameter_constraints:
continue
constraints = parameter_constraints[param_name]
if constraints == "no_validation":
continue
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
if constraint.is_satisfied_by(param_val):
# this constraint is satisfied, no need to check further.
break
else:
# No constraint is satisfied, raise with an informative message.
# Ignore constraints that we don't want to expose in the error message,
# i.e. options that are for internal purpose or not officially supported.
constraints = [
constraint for constraint in constraints if not constraint.hidden
]
if len(constraints) == 1:
constraints_str = f"{constraints[0]}"
else:
constraints_str = (
f"{', '.join([str(c) for c in constraints[:-1]])} or"
f" {constraints[-1]}"
)
raise InvalidParameterError(
f"The {param_name!r} parameter of {caller_name} must be"
f" {constraints_str}. Got {param_val!r} instead."
)
def make_constraint(constraint):
"""Convert the constraint into the appropriate Constraint object.
Parameters
----------
constraint : object
The constraint to convert.
Returns
-------
constraint : instance of _Constraint
The converted constraint.
"""
if isinstance(constraint, str) and constraint == "array-like":
return _ArrayLikes()
if isinstance(constraint, str) and constraint == "sparse matrix":
return _SparseMatrices()
if isinstance(constraint, str) and constraint == "random_state":
return _RandomStates()
if constraint is callable:
return _Callables()
if constraint is None:
return _NoneConstraint()
if isinstance(constraint, type):
return _InstancesOf(constraint)
if isinstance(
constraint, (Interval, StrOptions, Options, HasMethods, MissingValues)
):
return constraint
if isinstance(constraint, str) and constraint == "boolean":
return _Booleans()
if isinstance(constraint, str) and constraint == "verbose":
return _VerboseHelper()
if isinstance(constraint, str) and constraint == "cv_object":
return _CVObjects()
if isinstance(constraint, Hidden):
constraint = make_constraint(constraint.constraint)
constraint.hidden = True
return constraint
if (isinstance(constraint, str) and constraint == "nan") or (
isinstance(constraint, float) and np.isnan(constraint)
):
return _NanConstraint()
raise ValueError(f"Unknown constraint type: {constraint}")
def validate_params(parameter_constraints, *, prefer_skip_nested_validation):
"""Decorator to validate types and values of functions and methods.
Parameters
----------
parameter_constraints : dict
A dictionary `param_name: list of constraints`. See the docstring of
`validate_parameter_constraints` for a description of the accepted constraints.
Note that the *args and **kwargs parameters are not validated and must not be
present in the parameter_constraints dictionary.
prefer_skip_nested_validation : bool
If True, the validation of parameters of inner estimators or functions
called by the decorated function will be skipped.
This is useful to avoid validating many times the parameters passed by the
user from the public facing API. It's also useful to avoid validating
parameters that we pass internally to inner functions that are guaranteed to
be valid by the test suite.
It should be set to True for most functions, except for those that receive
non-validated objects as parameters or that are just wrappers around classes
because they only perform a partial validation.
Returns
-------
decorated_function : function or method
The decorated function.
"""
def decorator(func):
# The dict of parameter constraints is set as an attribute of the function
# to make it possible to dynamically introspect the constraints for
# automatic testing.
setattr(func, "_skl_parameter_constraints", parameter_constraints)
@functools.wraps(func)
def wrapper(*args, **kwargs):
global_skip_validation = get_config()["skip_parameter_validation"]
if global_skip_validation:
return func(*args, **kwargs)
func_sig = signature(func)
# Map *args/**kwargs to the function signature
params = func_sig.bind(*args, **kwargs)
params.apply_defaults()
# ignore self/cls and positional/keyword markers
to_ignore = [
p.name
for p in func_sig.parameters.values()
if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
]
to_ignore += ["self", "cls"]
params = {k: v for k, v in params.arguments.items() if k not in to_ignore}
validate_parameter_constraints(
parameter_constraints, params, caller_name=func.__qualname__
)
try:
with config_context(
skip_parameter_validation=(
prefer_skip_nested_validation or global_skip_validation
)
):
return func(*args, **kwargs)
except InvalidParameterError as e:
# When the function is just a wrapper around an estimator, we allow
# the function to delegate validation to the estimator, but we replace
# the name of the estimator by the name of the function in the error
# message to avoid confusion.
msg = re.sub(
r"parameter of \w+ must be",
f"parameter of {func.__qualname__} must be",
str(e),
)
raise InvalidParameterError(msg) from e
return wrapper
return decorator
class RealNotInt(Real):
"""A type that represents reals that are not instances of int.
Behaves like float, but also works with values extracted from numpy arrays.
isintance(1, RealNotInt) -> False
isinstance(1.0, RealNotInt) -> True
"""
RealNotInt.register(float)
def _type_name(t):
"""Convert type into human readable string."""
module = t.__module__
qualname = t.__qualname__
if module == "builtins":
return qualname
elif t == Real:
return "float"
elif t == Integral:
return "int"
return f"{module}.{qualname}"
class _Constraint(ABC):
"""Base class for the constraint objects."""
def __init__(self):
self.hidden = False
@abstractmethod
def is_satisfied_by(self, val):
"""Whether or not a value satisfies the constraint.
Parameters
----------
val : object
The value to check.
Returns
-------
is_satisfied : bool
Whether or not the constraint is satisfied by this value.
"""
@abstractmethod
def __str__(self):
"""A human readable representational string of the constraint."""
class _InstancesOf(_Constraint):
"""Constraint representing instances of a given type.
Parameters
----------
type : type
The valid type.
"""
def __init__(self, type):
super().__init__()
self.type = type
def is_satisfied_by(self, val):
return isinstance(val, self.type)
def __str__(self):
return f"an instance of {_type_name(self.type)!r}"
class _NoneConstraint(_Constraint):
"""Constraint representing the None singleton."""
def is_satisfied_by(self, val):
return val is None
def __str__(self):
return "None"
class _NanConstraint(_Constraint):
"""Constraint representing the indicator `np.nan`."""
def is_satisfied_by(self, val):
return (
not isinstance(val, Integral) and isinstance(val, Real) and math.isnan(val)
)
def __str__(self):
return "numpy.nan"
class _PandasNAConstraint(_Constraint):
"""Constraint representing the indicator `pd.NA`."""
def is_satisfied_by(self, val):
try:
import pandas as pd
return isinstance(val, type(pd.NA)) and pd.isna(val)
except ImportError:
return False
def __str__(self):
return "pandas.NA"
class Options(_Constraint):
"""Constraint representing a finite set of instances of a given type.
Parameters
----------
type : type
options : set
The set of valid scalars.
deprecated : set or None, default=None
A subset of the `options` to mark as deprecated in the string
representation of the constraint.
"""
def __init__(self, type, options, *, deprecated=None):
super().__init__()
self.type = type
self.options = options
self.deprecated = deprecated or set()
if self.deprecated - self.options:
raise ValueError("The deprecated options must be a subset of the options.")
def is_satisfied_by(self, val):
return isinstance(val, self.type) and val in self.options
def _mark_if_deprecated(self, option):
"""Add a deprecated mark to an option if needed."""
option_str = f"{option!r}"
if option in self.deprecated:
option_str = f"{option_str} (deprecated)"
return option_str
def __str__(self):
options_str = (
f"{', '.join([self._mark_if_deprecated(o) for o in self.options])}"
)
return f"a {_type_name(self.type)} among {{{options_str}}}"
class StrOptions(Options):
"""Constraint representing a finite set of strings.
Parameters
----------
options : set of str
The set of valid strings.
deprecated : set of str or None, default=None
A subset of the `options` to mark as deprecated in the string
representation of the constraint.
"""
def __init__(self, options, *, deprecated=None):
super().__init__(type=str, options=options, deprecated=deprecated)
class Interval(_Constraint):
"""Constraint representing a typed interval.
Parameters
----------
type : {numbers.Integral, numbers.Real, RealNotInt}
The set of numbers in which to set the interval.
If RealNotInt, only reals that don't have the integer type
are allowed. For example 1.0 is allowed but 1 is not.
left : float or int or None
The left bound of the interval. None means left bound is -∞.
right : float, int or None
The right bound of the interval. None means right bound is +∞.
closed : {"left", "right", "both", "neither"}
Whether the interval is open or closed. Possible choices are:
- `"left"`: the interval is closed on the left and open on the right.
It is equivalent to the interval `[ left, right )`.
- `"right"`: the interval is closed on the right and open on the left.
It is equivalent to the interval `( left, right ]`.
- `"both"`: the interval is closed.
It is equivalent to the interval `[ left, right ]`.
- `"neither"`: the interval is open.
It is equivalent to the interval `( left, right )`.
Notes
-----
Setting a bound to `None` and setting the interval closed is valid. For instance,
strictly speaking, `Interval(Real, 0, None, closed="both")` corresponds to
`[0, +∞) U {+∞}`.
"""
def __init__(self, type, left, right, *, closed):
super().__init__()
self.type = type
self.left = left
self.right = right
self.closed = closed
self._check_params()
def _check_params(self):
if self.type not in (Integral, Real, RealNotInt):
raise ValueError(
"type must be either numbers.Integral, numbers.Real or RealNotInt."
f" Got {self.type} instead."
)
if self.closed not in ("left", "right", "both", "neither"):
raise ValueError(
"closed must be either 'left', 'right', 'both' or 'neither'. "
f"Got {self.closed} instead."
)
if self.type is Integral:
suffix = "for an interval over the integers."
if self.left is not None and not isinstance(self.left, Integral):
raise TypeError(f"Expecting left to be an int {suffix}")
if self.right is not None and not isinstance(self.right, Integral):
raise TypeError(f"Expecting right to be an int {suffix}")
if self.left is None and self.closed in ("left", "both"):
raise ValueError(
f"left can't be None when closed == {self.closed} {suffix}"
)
if self.right is None and self.closed in ("right", "both"):
raise ValueError(
f"right can't be None when closed == {self.closed} {suffix}"
)
else:
if self.left is not None and not isinstance(self.left, Real):
raise TypeError("Expecting left to be a real number.")
if self.right is not None and not isinstance(self.right, Real):
raise TypeError("Expecting right to be a real number.")
if self.right is not None and self.left is not None and self.right <= self.left:
raise ValueError(
f"right can't be less than left. Got left={self.left} and "
f"right={self.right}"
)
def __contains__(self, val):
if not isinstance(val, Integral) and np.isnan(val):
return False
left_cmp = operator.lt if self.closed in ("left", "both") else operator.le
right_cmp = operator.gt if self.closed in ("right", "both") else operator.ge
left = -np.inf if self.left is None else self.left
right = np.inf if self.right is None else self.right
if left_cmp(val, left):
return False
if right_cmp(val, right):
return False
return True
def is_satisfied_by(self, val):
if not isinstance(val, self.type):
return False
return val in self
def __str__(self):
type_str = "an int" if self.type is Integral else "a float"
left_bracket = "[" if self.closed in ("left", "both") else "("
left_bound = "-inf" if self.left is None else self.left
right_bound = "inf" if self.right is None else self.right
right_bracket = "]" if self.closed in ("right", "both") else ")"
# better repr if the bounds were given as integers
if not self.type == Integral and isinstance(self.left, Real):
left_bound = float(left_bound)
if not self.type == Integral and isinstance(self.right, Real):
right_bound = float(right_bound)
return (
f"{type_str} in the range "
f"{left_bracket}{left_bound}, {right_bound}{right_bracket}"
)
class _ArrayLikes(_Constraint):
"""Constraint representing array-likes"""
def is_satisfied_by(self, val):
return _is_arraylike_not_scalar(val)
def __str__(self):
return "an array-like"
class _SparseMatrices(_Constraint):
"""Constraint representing sparse matrices."""
def is_satisfied_by(self, val):
return issparse(val)
def __str__(self):
return "a sparse matrix"
class _Callables(_Constraint):
"""Constraint representing callables."""
def is_satisfied_by(self, val):
return callable(val)
def __str__(self):
return "a callable"
class _RandomStates(_Constraint):
"""Constraint representing random states.
Convenience class for
[Interval(Integral, 0, 2**32 - 1, closed="both"), np.random.RandomState, None]
"""
def __init__(self):
super().__init__()
self._constraints = [
Interval(Integral, 0, 2**32 - 1, closed="both"),
_InstancesOf(np.random.RandomState),
_NoneConstraint(),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class _Booleans(_Constraint):
"""Constraint representing boolean likes.
Convenience class for
[bool, np.bool_]
"""
def __init__(self):
super().__init__()
self._constraints = [
_InstancesOf(bool),
_InstancesOf(np.bool_),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class _VerboseHelper(_Constraint):
"""Helper constraint for the verbose parameter.
Convenience class for
[Interval(Integral, 0, None, closed="left"), bool, numpy.bool_]
"""
def __init__(self):
super().__init__()
self._constraints = [
Interval(Integral, 0, None, closed="left"),
_InstancesOf(bool),
_InstancesOf(np.bool_),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class MissingValues(_Constraint):
"""Helper constraint for the `missing_values` parameters.
Convenience for
[
Integral,
Interval(Real, None, None, closed="both"),
str, # when numeric_only is False
None, # when numeric_only is False
_NanConstraint(),
_PandasNAConstraint(),
]
Parameters
----------
numeric_only : bool, default=False
Whether to consider only numeric missing value markers.
"""
def __init__(self, numeric_only=False):
super().__init__()
self.numeric_only = numeric_only
self._constraints = [
_InstancesOf(Integral),
# we use an interval of Real to ignore np.nan that has its own constraint
Interval(Real, None, None, closed="both"),
_NanConstraint(),
_PandasNAConstraint(),
]
if not self.numeric_only:
self._constraints.extend([_InstancesOf(str), _NoneConstraint()])
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class HasMethods(_Constraint):
"""Constraint representing objects that expose specific methods.
It is useful for parameters following a protocol and where we don't want to impose
an affiliation to a specific module or class.
Parameters
----------
methods : str or list of str
The method(s) that the object is expected to expose.
"""
@validate_params(
{"methods": [str, list]},
prefer_skip_nested_validation=True,
)
def __init__(self, methods):
super().__init__()
if isinstance(methods, str):
methods = [methods]
self.methods = methods
def is_satisfied_by(self, val):
return all(callable(getattr(val, method, None)) for method in self.methods)
def __str__(self):
if len(self.methods) == 1:
methods = f"{self.methods[0]!r}"
else:
methods = (
f"{', '.join([repr(m) for m in self.methods[:-1]])} and"
f" {self.methods[-1]!r}"
)
return f"an object implementing {methods}"
class _IterablesNotString(_Constraint):
"""Constraint representing iterables that are not strings."""
def is_satisfied_by(self, val):
return isinstance(val, Iterable) and not isinstance(val, str)
def __str__(self):
return "an iterable"
class _CVObjects(_Constraint):
"""Constraint representing cv objects.
Convenient class for
[
Interval(Integral, 2, None, closed="left"),
HasMethods(["split", "get_n_splits"]),
_IterablesNotString(),
None,
]
"""
def __init__(self):
super().__init__()
self._constraints = [
Interval(Integral, 2, None, closed="left"),
HasMethods(["split", "get_n_splits"]),
_IterablesNotString(),
_NoneConstraint(),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class Hidden:
"""Class encapsulating a constraint not meant to be exposed to the user.
Parameters
----------
constraint : str or _Constraint instance
The constraint to be used internally.
"""
def __init__(self, constraint):
self.constraint = constraint
def generate_invalid_param_val(constraint):
"""Return a value that does not satisfy the constraint.
Raises a NotImplementedError if there exists no invalid value for this constraint.
This is only useful for testing purpose.
Parameters
----------
constraint : _Constraint instance
The constraint to generate a value for.
Returns
-------
val : object
A value that does not satisfy the constraint.
"""
if isinstance(constraint, StrOptions):
return f"not {' or '.join(constraint.options)}"
if isinstance(constraint, MissingValues):
return np.array([1, 2, 3])
if isinstance(constraint, _VerboseHelper):
return -1
if isinstance(constraint, HasMethods):
return type("HasNotMethods", (), {})()
if isinstance(constraint, _IterablesNotString):
return "a string"
if isinstance(constraint, _CVObjects):
return "not a cv object"
if isinstance(constraint, Interval) and constraint.type is Integral:
if constraint.left is not None:
return constraint.left - 1
if constraint.right is not None:
return constraint.right + 1
# There's no integer outside (-inf, +inf)
raise NotImplementedError
if isinstance(constraint, Interval) and constraint.type in (Real, RealNotInt):
if constraint.left is not None:
return constraint.left - 1e-6
if constraint.right is not None:
return constraint.right + 1e-6
# bounds are -inf, +inf
if constraint.closed in ("right", "neither"):
return -np.inf
if constraint.closed in ("left", "neither"):
return np.inf
# interval is [-inf, +inf]
return np.nan
raise NotImplementedError
def generate_valid_param(constraint):
"""Return a value that does satisfy a constraint.
This is only useful for testing purpose.
Parameters
----------
constraint : Constraint instance
The constraint to generate a value for.
Returns
-------
val : object
A value that does satisfy the constraint.
"""
if isinstance(constraint, _ArrayLikes):
return np.array([1, 2, 3])
if isinstance(constraint, _SparseMatrices):
return csr_matrix([[0, 1], [1, 0]])
if isinstance(constraint, _RandomStates):
return np.random.RandomState(42)
if isinstance(constraint, _Callables):
return lambda x: x
if isinstance(constraint, _NoneConstraint):
return None
if isinstance(constraint, _InstancesOf):
if constraint.type is np.ndarray:
# special case for ndarray since it can't be instantiated without arguments
return np.array([1, 2, 3])
if constraint.type in (Integral, Real):
# special case for Integral and Real since they are abstract classes
return 1
return constraint.type()
if isinstance(constraint, _Booleans):
return True
if isinstance(constraint, _VerboseHelper):
return 1
if isinstance(constraint, MissingValues) and constraint.numeric_only:
return np.nan
if isinstance(constraint, MissingValues) and not constraint.numeric_only:
return "missing"
if isinstance(constraint, HasMethods):
return type(
"ValidHasMethods", (), {m: lambda self: None for m in constraint.methods}
)()
if isinstance(constraint, _IterablesNotString):
return [1, 2, 3]
if isinstance(constraint, _CVObjects):
return 5
if isinstance(constraint, Options): # includes StrOptions
for option in constraint.options:
return option
if isinstance(constraint, Interval):
interval = constraint
if interval.left is None and interval.right is None:
return 0
elif interval.left is None:
return interval.right - 1
elif interval.right is None:
return interval.left + 1
else:
if interval.type is Real:
return (interval.left + interval.right) / 2
else:
return interval.left + 1
raise ValueError(f"Unknown constraint type: {constraint}")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/deprecation.py | sklearn/utils/deprecation.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import functools
import warnings
from inspect import signature
__all__ = ["deprecated"]
class deprecated:
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
Examples
--------
>>> from sklearn.utils import deprecated
>>> deprecated()
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra : str, default=''
To be added to the deprecation messages.
"""
# Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=""):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
elif isinstance(obj, property):
# Note that this is only triggered properly if the `deprecated`
# decorator is placed before the `property` decorator, like so:
#
# @deprecated(msg)
# @property
# def deprecated_attribute_(self):
# ...
return self._decorate_property(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
new = cls.__new__
sig = signature(cls)
def wrapped(cls, *args, **kwargs):
warnings.warn(msg, category=FutureWarning)
if new is object.__new__:
return object.__new__(cls)
return new(cls, *args, **kwargs)
cls.__new__ = wrapped
wrapped.__name__ = "__new__"
wrapped.deprecated_original = new
# Restore the original signature, see PEP 362.
cls.__signature__ = sig
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
@functools.wraps(fun)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return fun(*args, **kwargs)
# Add a reference to the wrapped function so that we can introspect
# on function arguments in Python 2 (already works in Python 3)
wrapped.__wrapped__ = fun
return wrapped
def _decorate_property(self, prop):
msg = self.extra
@property
@functools.wraps(prop.fget)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return prop.fget(*args, **kwargs)
return wrapped
def _is_deprecated(func):
"""Helper to check if func is wrapped by our deprecated decorator"""
closures = getattr(func, "__closure__", [])
if closures is None:
closures = []
is_deprecated = "deprecated" in "".join(
[c.cell_contents for c in closures if isinstance(c.cell_contents, str)]
)
return is_deprecated
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_show_versions.py | sklearn/utils/_show_versions.py | """
Utility methods to print system info for debugging
adapted from :func:`pandas.show_versions`
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import sys
from threadpoolctl import threadpool_info
from sklearn import __version__
from sklearn.utils._openmp_helpers import _openmp_parallelism_enabled
def _get_sys_info():
"""System information
Returns
-------
sys_info : dict
system and Python version information
"""
python = sys.version.replace("\n", " ")
blob = [
("python", python),
("executable", sys.executable),
("machine", platform.platform()),
]
return dict(blob)
def _get_deps_info():
"""Overview of the installed version of main dependencies
This function does not import the modules to collect the version numbers
but instead relies on standard Python package metadata.
Returns
-------
deps_info: dict
version information on relevant Python libraries
"""
deps = [
"pip",
"setuptools",
"numpy",
"scipy",
"Cython",
"pandas",
"matplotlib",
"joblib",
"threadpoolctl",
]
deps_info = {
"sklearn": __version__,
}
from importlib.metadata import PackageNotFoundError, version
for modname in deps:
try:
deps_info[modname] = version(modname)
except PackageNotFoundError:
deps_info[modname] = None
return deps_info
def show_versions():
"""Print useful debugging information.
.. versionadded:: 0.20
Examples
--------
>>> from sklearn import show_versions
>>> show_versions() # doctest: +SKIP
"""
sys_info = _get_sys_info()
deps_info = _get_deps_info()
print("\nSystem:")
for k, stat in sys_info.items():
print("{k:>10}: {stat}".format(k=k, stat=stat))
print("\nPython dependencies:")
for k, stat in deps_info.items():
print("{k:>13}: {stat}".format(k=k, stat=stat))
print(
"\n{k}: {stat}".format(
k="Built with OpenMP", stat=_openmp_parallelism_enabled()
)
)
# show threadpoolctl results
threadpool_results = threadpool_info()
if threadpool_results:
print()
print("threadpoolctl info:")
for i, result in enumerate(threadpool_results):
for key, val in result.items():
print(f"{key:>15}: {val}")
if i != len(threadpool_results) - 1:
print()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_array_api.py | sklearn/utils/_array_api.py | """Tools to support array_api."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import math
import os
import numpy
import scipy
import scipy.sparse as sp
import scipy.special as special
from sklearn._config import get_config
from sklearn.externals import array_api_compat
from sklearn.externals import array_api_extra as xpx
from sklearn.externals.array_api_compat import numpy as np_compat
from sklearn.utils._dataframe import is_df_or_series
from sklearn.utils.fixes import parse_version
# TODO: complete __all__
__all__ = ["xpx"] # we import xpx here just to re-export it, need this to appease ruff
_NUMPY_NAMESPACE_NAMES = {"numpy", "sklearn.externals.array_api_compat.numpy"}
def yield_namespaces(include_numpy_namespaces=True):
"""Yield supported namespace.
This is meant to be used for testing purposes only.
Parameters
----------
include_numpy_namespaces : bool, default=True
If True, also yield numpy namespaces.
Returns
-------
array_namespace : str
The name of the Array API namespace.
"""
for array_namespace in [
# The following is used to test the array_api_compat wrapper when
# array_api_dispatch is enabled: in particular, the arrays used in the
# tests are regular numpy arrays without any "device" attribute.
"numpy",
# Stricter NumPy-based Array API implementation. The
# array_api_strict.Array instances always have a dummy "device" attribute.
"array_api_strict",
"cupy",
"torch",
]:
if not include_numpy_namespaces and array_namespace in _NUMPY_NAMESPACE_NAMES:
continue
yield array_namespace
def yield_namespace_device_dtype_combinations(include_numpy_namespaces=True):
"""Yield supported namespace, device, dtype tuples for testing.
Use this to test that an estimator works with all combinations.
Use in conjunction with `ids=_get_namespace_device_dtype_ids` to give
clearer pytest parametrization ID names.
Parameters
----------
include_numpy_namespaces : bool, default=True
If True, also yield numpy namespaces.
Returns
-------
array_namespace : str
The name of the Array API namespace.
device : str
The name of the device on which to allocate the arrays. Can be None to
indicate that the default value should be used.
dtype_name : str
The name of the data type to use for arrays. Can be None to indicate
that the default value should be used.
"""
for array_namespace in yield_namespaces(
include_numpy_namespaces=include_numpy_namespaces
):
if array_namespace == "torch":
for device, dtype in itertools.product(
("cpu", "cuda", "xpu"), ("float64", "float32")
):
yield array_namespace, device, dtype
yield array_namespace, "mps", "float32"
elif array_namespace == "array_api_strict":
try:
import array_api_strict
yield array_namespace, array_api_strict.Device("CPU_DEVICE"), "float64"
yield array_namespace, array_api_strict.Device("device1"), "float32"
except ImportError:
# Those combinations will typically be skipped by pytest if
# array_api_strict is not installed but we still need to see them in
# the test output.
yield array_namespace, "CPU_DEVICE", "float64"
yield array_namespace, "device1", "float32"
else:
yield array_namespace, None, None
def _get_namespace_device_dtype_ids(param):
"""Get pytest parametrization IDs for `yield_namespace_device_dtype_combinations`"""
# Gives clearer IDs for array-api-strict devices, see #31042 for details
try:
import array_api_strict
except ImportError:
# `None` results in the default pytest representation
return None
else:
if param == array_api_strict.Device("CPU_DEVICE"):
return "CPU_DEVICE"
if param == array_api_strict.Device("device1"):
return "device1"
if param == array_api_strict.Device("device2"):
return "device2"
def _check_array_api_dispatch(array_api_dispatch):
"""Checks that array API support is functional.
In particular scipy needs to be recent enough and the environment variable
needs to be set: SCIPY_ARRAY_API=1.
"""
if not array_api_dispatch:
return
scipy_version = parse_version(scipy.__version__)
min_scipy_version = "1.14.0"
if scipy_version < parse_version(min_scipy_version):
raise ImportError(
f"SciPy must be {min_scipy_version} or newer"
" (found {scipy.__version__}) to dispatch array using"
" the array API specification"
)
if os.environ.get("SCIPY_ARRAY_API") != "1":
raise RuntimeError(
"Scikit-learn array API support was enabled but scipy's own support is "
"not enabled. Please set the SCIPY_ARRAY_API=1 environment variable "
"before importing sklearn or scipy. More details at: "
"https://docs.scipy.org/doc/scipy/dev/api-dev/array_api.html"
)
def _single_array_device(array):
"""Hardware device where the array data resides on."""
if (
not hasattr(array, "device")
# When array API dispatch is disabled, we expect the scikit-learn code
# to use np.asarray so that the resulting NumPy array will implicitly use the
# CPU. In this case, scikit-learn should stay as device neutral as possible,
# hence the use of `device=None` which is accepted by all libraries, before
# and after the expected conversion to NumPy via np.asarray.
or not get_config()["array_api_dispatch"]
):
return None
else:
return array.device
def device(*array_list, remove_none=True, remove_types=(str,)):
"""Hardware device where the array data resides on.
If the hardware device is not the same for all arrays, an error is raised.
Parameters
----------
*array_list : arrays
List of array instances from NumPy or an array API compatible library.
remove_none : bool, default=True
Whether to ignore None objects passed in array_list.
remove_types : tuple or list, default=(str,)
Types to ignore in array_list.
Returns
-------
out : device
`device` object (see the "Device Support" section of the array API spec).
"""
array_list = _remove_non_arrays(
*array_list, remove_none=remove_none, remove_types=remove_types
)
if not array_list:
return None
device_ = _single_array_device(array_list[0])
# Note: here we cannot simply use a Python `set` as it requires
# hashable members which is not guaranteed for Array API device
# objects. In particular, CuPy devices are not hashable at the
# time of writing.
for array in array_list[1:]:
device_other = _single_array_device(array)
if device_ != device_other:
raise ValueError(
f"Input arrays use different devices: {device_}, {device_other}"
)
return device_
def size(x):
"""Return the total number of elements of x.
Parameters
----------
x : array
Array instance from NumPy or an array API compatible library.
Returns
-------
out : int
Total number of elements.
"""
return math.prod(x.shape)
def _is_numpy_namespace(xp):
"""Return True if xp is backed by NumPy."""
return xp.__name__ in _NUMPY_NAMESPACE_NAMES
def _union1d(a, b, xp):
if _is_numpy_namespace(xp):
# avoid circular import
from sklearn.utils._unique import cached_unique
a_unique, b_unique = cached_unique(a, b, xp=xp)
return xp.asarray(numpy.union1d(a_unique, b_unique))
assert a.ndim == b.ndim == 1
return xp.unique_values(xp.concat([xp.unique_values(a), xp.unique_values(b)]))
def supported_float_dtypes(xp, device=None):
"""Supported floating point types for the namespace.
Parameters
----------
xp : module
Array namespace to inspect.
device : str or device instance from xp, default=None
Device to use for dtype selection. If ``None``, then a default device
is assumed.
Returns
-------
supported_dtypes : tuple
Tuple of real floating data types supported by the provided array namespace,
ordered from the highest precision to lowest.
See Also
--------
max_precision_float_dtype : Maximum float dtype for a namespace/device pair.
Notes
-----
`float16` is not officially part of the Array API spec at the
time of writing but scikit-learn estimators and functions can choose
to accept it when xp.float16 is defined.
Additionally, some devices available within a namespace may not support
all floating-point types that the namespace provides.
https://data-apis.org/array-api/latest/API_specification/data_types.html
"""
dtypes_dict = xp.__array_namespace_info__().dtypes(
kind="real floating", device=device
)
valid_float_dtypes = []
for dtype_key in ("float64", "float32"):
if dtype_key in dtypes_dict:
valid_float_dtypes.append(dtypes_dict[dtype_key])
if hasattr(xp, "float16"):
valid_float_dtypes.append(xp.float16)
return tuple(valid_float_dtypes)
def _remove_non_arrays(*arrays, remove_none=True, remove_types=(str,)):
"""Filter arrays to exclude None and/or specific types.
Sparse arrays are always filtered out.
Parameters
----------
*arrays : array objects
Array objects.
remove_none : bool, default=True
Whether to ignore None objects passed in arrays.
remove_types : tuple or list, default=(str,)
Types to ignore in the arrays.
Returns
-------
filtered_arrays : list
List of arrays filtered as requested. An empty list is returned if no input
passes the filters.
"""
filtered_arrays = []
remove_types = tuple(remove_types)
for array in arrays:
if remove_none and array is None:
continue
if isinstance(array, remove_types):
continue
if sp.issparse(array):
continue
if is_df_or_series(array):
continue
filtered_arrays.append(array)
return filtered_arrays
def get_namespace(*arrays, remove_none=True, remove_types=(str,), xp=None):
"""Get namespace of arrays.
Introspect `arrays` arguments and return their common Array API compatible
namespace object, if any.
Note that sparse arrays are filtered by default.
See: https://numpy.org/neps/nep-0047-array-api-standard.html
If `arrays` are regular numpy arrays, `array_api_compat.numpy` is returned instead.
Namespace support is not enabled by default. To enabled it call:
sklearn.set_config(array_api_dispatch=True)
or:
with sklearn.config_context(array_api_dispatch=True):
# your code here
Otherwise `array_api_compat.numpy` is
always returned irrespective of the fact that arrays implement the
`__array_namespace__` protocol or not.
Note that if no arrays pass the set filters, ``_NUMPY_API_WRAPPER_INSTANCE, False``
is returned.
Parameters
----------
*arrays : array objects
Array objects.
remove_none : bool, default=True
Whether to ignore None objects passed in arrays.
remove_types : tuple or list, default=(str,)
Types to ignore in the arrays.
xp : module, default=None
Precomputed array namespace module. When passed, typically from a caller
that has already performed inspection of its own inputs, skips array
namespace inspection.
Returns
-------
namespace : module
Namespace shared by array objects. If any of the `arrays` are not arrays,
the namespace defaults to the NumPy namespace.
is_array_api_compliant : bool
True if the arrays are containers that implement the array API spec (see
https://data-apis.org/array-api/latest/index.html).
Always False when array_api_dispatch=False.
"""
array_api_dispatch = get_config()["array_api_dispatch"]
if not array_api_dispatch:
if xp is not None:
return xp, False
else:
return np_compat, False
if xp is not None:
return xp, True
arrays = _remove_non_arrays(
*arrays,
remove_none=remove_none,
remove_types=remove_types,
)
if not arrays:
return np_compat, False
_check_array_api_dispatch(array_api_dispatch)
namespace, is_array_api_compliant = array_api_compat.get_namespace(*arrays), True
if namespace.__name__ == "array_api_strict" and hasattr(
namespace, "set_array_api_strict_flags"
):
namespace.set_array_api_strict_flags(api_version="2024.12")
return namespace, is_array_api_compliant
def get_namespace_and_device(
*array_list, remove_none=True, remove_types=(str,), xp=None
):
"""Combination into one single function of `get_namespace` and `device`.
Parameters
----------
*array_list : array objects
Array objects.
remove_none : bool, default=True
Whether to ignore None objects passed in arrays.
remove_types : tuple or list, default=(str,)
Types to ignore in the arrays.
xp : module, default=None
Precomputed array namespace module. When passed, typically from a caller
that has already performed inspection of its own inputs, skips array
namespace inspection.
Returns
-------
namespace : module
Namespace shared by array objects. If any of the `arrays` are not arrays,
the namespace defaults to NumPy.
is_array_api_compliant : bool
True if the arrays are containers that implement the Array API spec.
Always False when array_api_dispatch=False.
device : device
`device` object (see the "Device Support" section of the array API spec).
"""
skip_remove_kwargs = dict(remove_none=False, remove_types=[])
array_list = _remove_non_arrays(
*array_list,
remove_none=remove_none,
remove_types=remove_types,
)
arrays_device = device(*array_list, **skip_remove_kwargs)
if xp is None:
xp, is_array_api = get_namespace(*array_list, **skip_remove_kwargs)
else:
xp, is_array_api = xp, True
if is_array_api:
return xp, is_array_api, arrays_device
else:
return xp, False, arrays_device
def move_to(*arrays, xp, device):
"""Move all arrays to `xp` and `device`.
Each array will be moved to the reference namespace and device if
it is not already using it. Otherwise the array is left unchanged.
`array` may contain `None` entries, these are left unchanged.
Sparse arrays are accepted (as pass through) if the reference namespace is
NumPy, in which case they are returned unchanged. Otherwise a `TypeError`
is raised.
Parameters
----------
*arrays : iterable of arrays
Arrays to (potentially) move.
xp : namespace
Array API namespace to move arrays to.
device : device
Array API device to move arrays to.
Returns
-------
arrays : tuple or array
Tuple of arrays with the same namespace and device as reference. Single array
returned if only one `arrays` input.
"""
sparse_mask = [sp.issparse(array) for array in arrays]
none_mask = [array is None for array in arrays]
if any(sparse_mask) and not _is_numpy_namespace(xp):
raise TypeError(
"Sparse arrays are only accepted (and passed through) when the target "
"namespace is Numpy"
)
converted_arrays = []
for array, is_sparse, is_none in zip(arrays, sparse_mask, none_mask):
if is_none:
converted_arrays.append(None)
elif is_sparse:
converted_arrays.append(array)
else:
xp_array, _, device_array = get_namespace_and_device(array)
if xp == xp_array and device == device_array:
converted_arrays.append(array)
else:
try:
# The dlpack protocol is the future proof and library agnostic
# method to transfer arrays across namespace and device boundaries
# hence this method is attempted first and going through NumPy is
# only used as fallback in case of failure.
# Note: copy=None is the default since array-api 2023.12. Namespace
# libraries should only trigger a copy automatically if needed.
array_converted = xp.from_dlpack(array, device=device)
# `AttributeError` occurs when `__dlpack__` and `__dlpack_device__`
# methods are not present on the input array
# `TypeError` and `NotImplementedError` for packages that do not
# yet support dlpack 1.0
# (i.e. the `device`/`copy` kwargs, e.g., torch <= 2.8.0)
# See https://github.com/data-apis/array-api/pull/741 for
# more details about the introduction of the `copy` and `device`
# kwargs in the from_dlpack method and their expected
# meaning by namespaces implementing the array API spec.
# TODO: try removing this once DLPack v1 more widely supported
# TODO: ValueError should not be needed but is in practice:
# https://github.com/numpy/numpy/issues/30341
except (
AttributeError,
TypeError,
NotImplementedError,
BufferError,
ValueError,
):
# Converting to numpy is tricky, handle this via dedicated function
if _is_numpy_namespace(xp):
array_converted = _convert_to_numpy(array, xp_array)
# Convert from numpy, all array libraries can do this
elif _is_numpy_namespace(xp_array):
array_converted = xp.asarray(array, device=device)
else:
# There is no generic way to convert from namespace A to B
# So we first convert from A to numpy and then from numpy to B
# The way to avoid this round trip is to lobby for DLpack
# support in libraries A and B
array_np = _convert_to_numpy(array, xp_array)
array_converted = xp.asarray(array_np, device=device)
converted_arrays.append(array_converted)
return (
converted_arrays[0] if len(converted_arrays) == 1 else tuple(converted_arrays)
)
def _expit(X, xp=None):
xp, _ = get_namespace(X, xp=xp)
if _is_numpy_namespace(xp):
return xp.asarray(special.expit(numpy.asarray(X)))
return 1.0 / (1.0 + xp.exp(-X))
def _validate_diagonal_args(array, value, xp):
"""Validate arguments to `_fill_diagonal`/`_add_to_diagonal`."""
if array.ndim != 2:
raise ValueError(
f"`array` should be 2D. Got array with shape {tuple(array.shape)}"
)
value = xp.asarray(value, dtype=array.dtype, device=device(array))
if value.ndim not in [0, 1]:
raise ValueError(
"`value` needs to be a scalar or a 1D array, "
f"got a {value.ndim}D array instead."
)
min_rows_columns = min(array.shape)
if value.ndim == 1 and value.shape[0] != min_rows_columns:
raise ValueError(
"`value` needs to be a scalar or 1D array of the same length as the "
f"diagonal of `array` ({min_rows_columns}). Got {value.shape[0]}"
)
return value, min_rows_columns
def _fill_diagonal(array, value, xp):
"""Minimal implementation of `numpy.fill_diagonal`.
`wrap` is not supported (i.e. always False). `value` should be a scalar or
1D of greater or equal length as the diagonal (i.e., `value` is never repeated
when shorter).
Note `array` is altered in place.
"""
value, min_rows_columns = _validate_diagonal_args(array, value, xp)
if _is_numpy_namespace(xp):
xp.fill_diagonal(array, value, wrap=False)
else:
# TODO: when array libraries support `reshape(copy)`, use
# `reshape(array, (-1,), copy=False)`, then fill with `[:end:step]` (within
# `try/except`). This is faster than for loop, when no copy needs to be
# made within `reshape`. See #31445 for details.
if value.ndim == 0:
for i in range(min_rows_columns):
array[i, i] = value
else:
for i in range(min_rows_columns):
array[i, i] = value[i]
def _add_to_diagonal(array, value, xp):
"""Add `value` to diagonal of `array`.
Related to `fill_diagonal`. `value` should be a scalar or
1D of greater or equal length as the diagonal (i.e., `value` is never repeated
when shorter).
Note `array` is altered in place.
"""
value, min_rows_columns = _validate_diagonal_args(array, value, xp)
if _is_numpy_namespace(xp):
step = array.shape[1] + 1
# Ensure we do not wrap
end = array.shape[1] * array.shape[1]
array.flat[:end:step] += value
return
# TODO: when array libraries support `reshape(copy)`, use
# `reshape(array, (-1,), copy=False)`, then fill with `[:end:step]` (within
# `try/except`). This is faster than for loop, when no copy needs to be
# made within `reshape`. See #31445 for details.
value = xp.linalg.diagonal(array) + value
for i in range(min_rows_columns):
array[i, i] = value[i]
def _is_xp_namespace(xp, name):
return xp.__name__ in (
name,
f"array_api_compat.{name}",
f"sklearn.externals.array_api_compat.{name}",
)
def _max_precision_float_dtype(xp, device):
"""Return the float dtype with the highest precision supported by the device."""
# TODO: Update to use `__array_namespace__info__()` from array-api v2023.12
# when/if that becomes more widespread.
if _is_xp_namespace(xp, "torch") and str(device).startswith(
"mps"
): # pragma: no cover
return xp.float32
return xp.float64
def _find_matching_floating_dtype(*arrays, xp):
"""Find a suitable floating point dtype when computing with arrays.
If any of the arrays are floating point, return the dtype with the highest
precision by following official type promotion rules:
https://data-apis.org/array-api/latest/API_specification/type_promotion.html
If there are no floating point input arrays (all integral inputs for
instance), return the default floating point dtype for the namespace.
"""
dtyped_arrays = [xp.asarray(a) for a in arrays if hasattr(a, "dtype")]
floating_dtypes = [
a.dtype for a in dtyped_arrays if xp.isdtype(a.dtype, "real floating")
]
if floating_dtypes:
# Return the floating dtype with the highest precision:
return xp.result_type(*floating_dtypes)
# If none of the input arrays have a floating point dtype, they must be all
# integer arrays or containers of Python scalars: return the default
# floating point dtype for the namespace (implementation specific).
return xp.asarray(0.0).dtype
def _average(a, axis=None, weights=None, normalize=True, xp=None):
"""Partial port of np.average to support the Array API.
It does a best effort at mimicking the return dtype rule described at
https://numpy.org/doc/stable/reference/generated/numpy.average.html but
only for the common cases needed in scikit-learn.
"""
xp, _, device_ = get_namespace_and_device(a, weights, xp=xp)
if _is_numpy_namespace(xp):
if normalize:
return xp.asarray(numpy.average(a, axis=axis, weights=weights))
elif axis is None and weights is not None:
return xp.asarray(numpy.dot(a, weights))
a = xp.asarray(a, device=device_)
if weights is not None:
weights = xp.asarray(weights, device=device_)
if weights is not None and a.shape != weights.shape:
if axis is None:
raise TypeError(
f"Axis must be specified when the shape of a {tuple(a.shape)} and "
f"weights {tuple(weights.shape)} differ."
)
if tuple(weights.shape) != (a.shape[axis],):
raise ValueError(
f"Shape of weights weights.shape={tuple(weights.shape)} must be "
f"consistent with a.shape={tuple(a.shape)} and {axis=}."
)
# If weights are 1D, add singleton dimensions for broadcasting
shape = [1] * a.ndim
shape[axis] = a.shape[axis]
weights = xp.reshape(weights, tuple(shape))
if xp.isdtype(a.dtype, "complex floating"):
raise NotImplementedError(
"Complex floating point values are not supported by average."
)
if weights is not None and xp.isdtype(weights.dtype, "complex floating"):
raise NotImplementedError(
"Complex floating point values are not supported by average."
)
output_dtype = _find_matching_floating_dtype(a, weights, xp=xp)
a = xp.astype(a, output_dtype)
if weights is None:
return (xp.mean if normalize else xp.sum)(a, axis=axis)
weights = xp.astype(weights, output_dtype)
sum_ = xp.sum(xp.multiply(a, weights), axis=axis)
if not normalize:
return sum_
scale = xp.sum(weights, axis=axis)
if xp.any(scale == 0.0):
raise ZeroDivisionError("Weights sum to zero, can't be normalized")
return sum_ / scale
def _median(x, axis=None, keepdims=False, xp=None):
# XXX: `median` is not included in the array API spec, but is implemented
# in most array libraries, and all that we support (as of May 2025).
# TODO: consider simplifying this code to use scipy instead once the oldest
# supported SciPy version provides `scipy.stats.quantile` with native array API
# support (likely scipy 1.16 at the time of writing). Proper benchmarking of
# either option with popular array namespaces is required to evaluate the
# impact of this choice.
xp, _, device = get_namespace_and_device(x, xp=xp)
# `torch.median` takes the lower of the two medians when `x` has even number
# of elements, thus we use `torch.quantile(q=0.5)`, which gives mean of the two
if array_api_compat.is_torch_namespace(xp):
return xp.quantile(x, q=0.5, dim=axis, keepdim=keepdims)
if hasattr(xp, "median"):
return xp.median(x, axis=axis, keepdims=keepdims)
# Intended mostly for array-api-strict (which as no "median", as per the spec)
# as `_convert_to_numpy` does not necessarily work for all array types.
x_np = _convert_to_numpy(x, xp=xp)
return xp.asarray(numpy.median(x_np, axis=axis, keepdims=keepdims), device=device)
def _xlogy(x, y, xp=None):
# TODO: Remove this once https://github.com/scipy/scipy/issues/21736 is fixed
xp, _, device_ = get_namespace_and_device(x, y, xp=xp)
with numpy.errstate(divide="ignore", invalid="ignore"):
temp = x * xp.log(y)
return xp.where(x == 0.0, xp.asarray(0.0, dtype=temp.dtype, device=device_), temp)
def _nanmin(X, axis=None, xp=None):
# TODO: refactor once nan-aware reductions are standardized:
# https://github.com/data-apis/array-api/issues/621
xp, _, device_ = get_namespace_and_device(X, xp=xp)
if _is_numpy_namespace(xp):
return xp.asarray(numpy.nanmin(X, axis=axis))
else:
mask = xp.isnan(X)
X = xp.min(
xp.where(mask, xp.asarray(+xp.inf, dtype=X.dtype, device=device_), X),
axis=axis,
)
# Replace Infs from all NaN slices with NaN again
mask = xp.all(mask, axis=axis)
if xp.any(mask):
X = xp.where(mask, xp.asarray(xp.nan, dtype=X.dtype, device=device_), X)
return X
def _nanmax(X, axis=None, xp=None):
# TODO: refactor once nan-aware reductions are standardized:
# https://github.com/data-apis/array-api/issues/621
xp, _, device_ = get_namespace_and_device(X, xp=xp)
if _is_numpy_namespace(xp):
return xp.asarray(numpy.nanmax(X, axis=axis))
else:
mask = xp.isnan(X)
X = xp.max(
xp.where(mask, xp.asarray(-xp.inf, dtype=X.dtype, device=device_), X),
axis=axis,
)
# Replace Infs from all NaN slices with NaN again
mask = xp.all(mask, axis=axis)
if xp.any(mask):
X = xp.where(mask, xp.asarray(xp.nan, dtype=X.dtype, device=device_), X)
return X
def _nanmean(X, axis=None, xp=None):
# TODO: refactor once nan-aware reductions are standardized:
# https://github.com/data-apis/array-api/issues/621
xp, _, device_ = get_namespace_and_device(X, xp=xp)
if _is_numpy_namespace(xp):
return xp.asarray(numpy.nanmean(X, axis=axis))
else:
mask = xp.isnan(X)
total = xp.sum(
xp.where(mask, xp.asarray(0.0, dtype=X.dtype, device=device_), X), axis=axis
)
count = xp.sum(xp.astype(xp.logical_not(mask), X.dtype), axis=axis)
return total / count
def _nansum(X, axis=None, xp=None, keepdims=False, dtype=None):
# TODO: refactor once nan-aware reductions are standardized:
# https://github.com/data-apis/array-api/issues/621
xp, _, X_device = get_namespace_and_device(X, xp=xp)
if _is_numpy_namespace(xp):
return xp.asarray(numpy.nansum(X, axis=axis, keepdims=keepdims, dtype=dtype))
mask = xp.isnan(X)
masked_arr = xp.where(mask, xp.asarray(0, device=X_device, dtype=X.dtype), X)
return xp.sum(masked_arr, axis=axis, keepdims=keepdims, dtype=dtype)
def _asarray_with_order(
array, dtype=None, order=None, copy=None, *, xp=None, device=None
):
"""Helper to support the order kwarg only for NumPy-backed arrays
Memory layout parameter `order` is not exposed in the Array API standard,
however some input validation code in scikit-learn needs to work both
for classes and functions that will leverage Array API only operations
and for code that inherently relies on NumPy backed data containers with
specific memory layout constraints (e.g. our own Cython code). The
purpose of this helper is to make it possible to share code for data
container validation without memory copies for both downstream use cases:
the `order` parameter is only enforced if the input array implementation
is NumPy based, otherwise `order` is just silently ignored.
"""
xp, _ = get_namespace(array, xp=xp)
if _is_numpy_namespace(xp):
# Use NumPy API to support order
if copy is True:
array = numpy.array(array, order=order, dtype=dtype)
else:
array = numpy.asarray(array, order=order, dtype=dtype)
# At this point array is a NumPy ndarray. We convert it to an array
# container that is consistent with the input's namespace.
return xp.asarray(array)
else:
return xp.asarray(array, dtype=dtype, copy=copy, device=device)
def _ravel(array, xp=None):
"""Array API compliant version of np.ravel.
For non numpy namespaces, it just returns a flattened array, that might
be or not be a copy.
"""
xp, _ = get_namespace(array, xp=xp)
if _is_numpy_namespace(xp):
array = numpy.asarray(array)
return xp.asarray(numpy.ravel(array, order="C"))
return xp.reshape(array, shape=(-1,))
def _convert_to_numpy(array, xp):
"""Convert X into a NumPy ndarray on the CPU."""
if _is_xp_namespace(xp, "torch"):
return array.cpu().numpy()
elif _is_xp_namespace(xp, "cupy"): # pragma: nocover
return array.get()
elif _is_xp_namespace(xp, "array_api_strict"):
return numpy.asarray(xp.asarray(array, device=xp.Device("CPU_DEVICE")))
return numpy.asarray(array)
def _estimator_with_converted_arrays(estimator, converter):
"""Create new estimator which converting all attributes that are arrays.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/optimize.py | sklearn/utils/optimize.py | """
Our own implementation of the Newton algorithm
Unlike the scipy.optimize version, this version of the Newton conjugate
gradient solver uses only one function call to retrieve the
func value, the gradient value and a callable for the Hessian matvec
product. If the function call is very expensive (e.g. for logistic
regression with large design matrix), this approach gives very
significant speedups.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# This is a modified file from scipy.optimize
# Original authors: Travis Oliphant, Eric Jones
import warnings
import numpy as np
import scipy
from scipy.optimize._linesearch import line_search_wolfe1, line_search_wolfe2
from sklearn.exceptions import ConvergenceWarning
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(
f, fprime, xk, pk, gfk, old_fval, old_old_fval, verbose=0, **kwargs
):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found.
"""
is_verbose = verbose >= 2
eps = 16 * np.finfo(np.asarray(old_fval).dtype).eps
if is_verbose:
print(" Line Search")
print(f" eps=16 * finfo.eps={eps}")
print(" try line search wolfe1")
ret = line_search_wolfe1(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs)
if is_verbose:
_not_ = "not " if ret[0] is None else ""
print(" wolfe1 line search was " + _not_ + "successful")
if ret[0] is None:
# Have a look at the line_search method of our NewtonSolver class. We borrow
# the logic from there
# Deal with relative loss differences around machine precision.
args = kwargs.get("args", tuple())
fval = f(xk + pk, *args)
tiny_loss = np.abs(old_fval * eps)
loss_improvement = fval - old_fval
check = np.abs(loss_improvement) <= tiny_loss
if is_verbose:
print(
" check loss |improvement| <= eps * |loss_old|:"
f" {np.abs(loss_improvement)} <= {tiny_loss} {check}"
)
if check:
# 2.1 Check sum of absolute gradients as alternative condition.
sum_abs_grad_old = scipy.linalg.norm(gfk, ord=1)
grad = fprime(xk + pk, *args)
sum_abs_grad = scipy.linalg.norm(grad, ord=1)
check = sum_abs_grad < sum_abs_grad_old
if is_verbose:
print(
" check sum(|gradient|) < sum(|gradient_old|): "
f"{sum_abs_grad} < {sum_abs_grad_old} {check}"
)
if check:
ret = (
1.0, # step size
ret[1] + 1, # number of function evaluations
ret[2] + 1, # number of gradient evaluations
fval,
old_fval,
grad,
)
if ret[0] is None:
# line search failed: try different one.
# TODO: It seems that the new check for the sum of absolute gradients above
# catches all cases that, earlier, ended up here. In fact, our tests never
# trigger this "if branch" here and we can consider to remove it.
if is_verbose:
print(" last resort: try line search wolfe2")
ret = line_search_wolfe2(
f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs
)
if is_verbose:
_not_ = "not " if ret[0] is None else ""
print(" wolfe2 line search was " + _not_ + "successful")
if ret[0] is None:
raise _LineSearchError()
return ret
def _cg(fhess_p, fgrad, maxiter, tol, verbose=0):
"""
Solve iteratively the linear system 'fhess_p . xsupi = fgrad'
with a conjugate gradient descent.
Parameters
----------
fhess_p : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
fgrad : ndarray of shape (n_features,) or (n_features + 1,)
Gradient vector.
maxiter : int
Number of CG iterations.
tol : float
Stopping criterion.
Returns
-------
xsupi : ndarray of shape (n_features,) or (n_features + 1,)
Estimated solution.
"""
eps = 16 * np.finfo(np.float64).eps
xsupi = np.zeros(len(fgrad), dtype=fgrad.dtype)
ri = np.copy(fgrad) # residual = fgrad - fhess_p @ xsupi
psupi = -ri
i = 0
dri0 = np.dot(ri, ri)
# We also keep track of |p_i|^2.
psupi_norm2 = dri0
is_verbose = verbose >= 2
while i <= maxiter:
if np.sum(np.abs(ri)) <= tol:
if is_verbose:
print(
f" Inner CG solver iteration {i} stopped with\n"
f" sum(|residuals|) <= tol: {np.sum(np.abs(ri))} <= {tol}"
)
break
Ap = fhess_p(psupi)
# check curvature
curv = np.dot(psupi, Ap)
if 0 <= curv <= eps * psupi_norm2:
# See https://arxiv.org/abs/1803.02924, Algo 1 Capped Conjugate Gradient.
if is_verbose:
print(
f" Inner CG solver iteration {i} stopped with\n"
f" tiny_|p| = eps * ||p||^2, eps = {eps}, "
f"squared L2 norm ||p||^2 = {psupi_norm2}\n"
f" curvature <= tiny_|p|: {curv} <= {eps * psupi_norm2}"
)
break
elif curv < 0:
if i > 0:
if is_verbose:
print(
f" Inner CG solver iteration {i} stopped with negative "
f"curvature, curvature = {curv}"
)
break
else:
# fall back to steepest descent direction
xsupi += dri0 / curv * psupi
if is_verbose:
print(" Inner CG solver iteration 0 fell back to steepest descent")
break
alphai = dri0 / curv
xsupi += alphai * psupi
ri += alphai * Ap
dri1 = np.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
# We use |p_i|^2 = |r_i|^2 + beta_i^2 |p_{i-1}|^2
psupi_norm2 = dri1 + betai**2 * psupi_norm2
i = i + 1
dri0 = dri1 # update np.dot(ri,ri) for next time.
if is_verbose and i > maxiter:
print(
f" Inner CG solver stopped reaching maxiter={i - 1} with "
f"sum(|residuals|) = {np.sum(np.abs(ri))}"
)
return xsupi
def _newton_cg(
grad_hess,
func,
grad,
x0,
args=(),
tol=1e-4,
maxiter=100,
maxinner=200,
line_search=True,
warn=True,
verbose=0,
):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Parameters
----------
grad_hess : callable
Should return the gradient and a callable returning the matvec product
of the Hessian.
func : callable
Should return the value of the function.
grad : callable
Should return the function value and the gradient. This is used
by the linesearch functions.
x0 : array of float
Initial guess.
args : tuple, default=()
Arguments passed to func_grad_hess, func and grad.
tol : float, default=1e-4
Stopping criterion. The iteration will stop when
``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
maxiter : int, default=100
Number of Newton iterations.
maxinner : int, default=200
Number of CG iterations.
line_search : bool, default=True
Whether to use a line search or not.
warn : bool, default=True
Whether to warn when didn't converge.
Returns
-------
xk : ndarray of float
Estimated minimum.
"""
x0 = np.asarray(x0).flatten()
xk = np.copy(x0)
k = 0
if line_search:
old_fval = func(x0, *args)
old_old_fval = None
else:
old_fval = 0
is_verbose = verbose > 0
# Outer loop: our Newton iteration
while k < maxiter:
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - fgrad f(xk) starting from 0.
fgrad, fhess_p = grad_hess(xk, *args)
absgrad = np.abs(fgrad)
max_absgrad = np.max(absgrad)
check = max_absgrad <= tol
if is_verbose:
print(f"Newton-CG iter = {k}")
print(" Check Convergence")
print(f" max |gradient| <= tol: {max_absgrad} <= {tol} {check}")
if check:
break
maggrad = np.sum(absgrad)
eta = min([0.5, np.sqrt(maggrad)])
termcond = eta * maggrad
# Inner loop: solve the Newton update by conjugate gradient, to
# avoid inverting the Hessian
xsupi = _cg(fhess_p, fgrad, maxiter=maxinner, tol=termcond, verbose=verbose)
alphak = 1.0
if line_search:
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = _line_search_wolfe12(
func,
grad,
xk,
xsupi,
fgrad,
old_fval,
old_old_fval,
verbose=verbose,
args=args,
)
except _LineSearchError:
warnings.warn("Line Search failed")
break
xk += alphak * xsupi # upcast if necessary
k += 1
if warn and k >= maxiter:
warnings.warn(
(
f"newton-cg failed to converge at loss = {old_fval}. Increase the"
" number of iterations."
),
ConvergenceWarning,
)
elif is_verbose:
print(f" Solver did converge at loss = {old_fval}.")
return xk, k
def _check_optimize_result(solver, result, max_iter=None, extra_warning_msg=None):
"""Check the OptimizeResult for successful convergence
Parameters
----------
solver : str
Solver name. Currently only `lbfgs` is supported.
result : OptimizeResult
Result of the scipy.optimize.minimize function.
max_iter : int, default=None
Expected maximum number of iterations.
extra_warning_msg : str, default=None
Extra warning message.
Returns
-------
n_iter : int
Number of iterations.
"""
# handle both scipy and scikit-learn solver names
if solver == "lbfgs":
if max_iter is not None:
# In scipy <= 1.0.0, nit may exceed maxiter for lbfgs.
# See https://github.com/scipy/scipy/issues/7854
n_iter_i = min(result.nit, max_iter)
else:
n_iter_i = result.nit
if result.status != 0:
warning_msg = (
f"{solver} failed to converge after {n_iter_i} iteration(s) "
f"(status={result.status}):\n"
f"{result.message}\n"
)
# Append a recommendation to increase iterations only when the
# number of iterations reaches the maximum allowed (max_iter),
# as this suggests the optimization may have been prematurely
# terminated due to the iteration limit.
if max_iter is not None and n_iter_i == max_iter:
warning_msg += (
f"\nIncrease the number of iterations to improve the "
f"convergence (max_iter={max_iter})."
)
warning_msg += (
"\nYou might also want to scale the data as shown in:\n"
" https://scikit-learn.org/stable/modules/"
"preprocessing.html"
)
if extra_warning_msg is not None:
warning_msg += "\n" + extra_warning_msg
warnings.warn(warning_msg, ConvergenceWarning, stacklevel=2)
else:
raise NotImplementedError
return n_iter_i
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/_mask.py | sklearn/utils/_mask.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from contextlib import suppress
import numpy as np
from scipy import sparse as sp
from sklearn.utils._missing import is_scalar_nan
from sklearn.utils._param_validation import validate_params
from sklearn.utils.fixes import _object_dtype_isnan
def _get_dense_mask(X, value_to_mask):
with suppress(ImportError, AttributeError):
# We also suppress `AttributeError` because older versions of pandas do
# not have `NA`.
import pandas
if value_to_mask is pandas.NA:
return pandas.isna(X)
if is_scalar_nan(value_to_mask):
if X.dtype.kind == "f":
Xt = np.isnan(X)
elif X.dtype.kind in ("i", "u"):
# can't have NaNs in integer array.
Xt = np.zeros(X.shape, dtype=bool)
else:
# np.isnan does not work on object dtypes.
Xt = _object_dtype_isnan(X)
else:
Xt = X == value_to_mask
return Xt
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == value_to_mask.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
value_to_mask : {int, float}
The value which is to be masked in X.
Returns
-------
X_mask : {ndarray, sparse matrix} of shape (n_samples, n_features)
Missing mask.
"""
if not sp.issparse(X):
# For all cases apart of a sparse input where we need to reconstruct
# a sparse output
return _get_dense_mask(X, value_to_mask)
Xt = _get_dense_mask(X.data, value_to_mask)
sparse_constructor = sp.csr_matrix if X.format == "csr" else sp.csc_matrix
Xt_sparse = sparse_constructor(
(Xt, X.indices.copy(), X.indptr.copy()), shape=X.shape, dtype=bool
)
return Xt_sparse
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"mask": ["array-like"],
},
prefer_skip_nested_validation=True,
)
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : array-like
Mask to be used on X.
Returns
-------
mask : ndarray
Array that is safe to use on X.
Examples
--------
>>> from sklearn.utils import safe_mask
>>> from scipy.sparse import csr_matrix
>>> data = csr_matrix([[1], [2], [3], [4], [5]])
>>> condition = [False, True, True, False, True]
>>> mask = safe_mask(data, condition)
>>> data[mask].toarray()
array([[2],
[3],
[5]])
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.signedinteger):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def axis0_safe_slice(X, mask, len_mask):
"""Return a mask which is safer to use on X than safe_mask.
This mask is safer than safe_mask since it returns an
empty array, when a sparse matrix is sliced with a boolean mask
with all False, instead of raising an unhelpful error in older
versions of SciPy.
See: https://github.com/scipy/scipy/issues/5361
Also note that we can avoid doing the dot product by checking if
the len_mask is not zero in _huber_loss_and_gradient but this
is not going to be the bottleneck, since the number of outliers
and non_outliers are typically non-zero and it makes the code
tougher to follow.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : ndarray
Mask to be used on X.
len_mask : int
The length of the mask.
Returns
-------
mask : ndarray
Array that is safe to use on X.
"""
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
def indices_to_mask(indices, mask_length):
"""Convert list of indices to boolean mask.
Parameters
----------
indices : list-like
List of integers treated as indices.
mask_length : int
Length of boolean mask to be generated.
This parameter must be greater than max(indices).
Returns
-------
mask : 1d boolean nd-array
Boolean array that is True where indices are present, else False.
Examples
--------
>>> from sklearn.utils._mask import indices_to_mask
>>> indices = [1, 2 , 3, 4]
>>> indices_to_mask(indices, 5)
array([False, True, True, True, True])
"""
if mask_length <= np.max(indices):
raise ValueError("mask_length must be greater than max(indices)")
mask = np.zeros(mask_length, dtype=bool)
mask[indices] = True
return mask
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/multiclass.py | sklearn/utils/multiclass.py | """Utilities to handle multiclass/multioutput target in classifiers."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from collections.abc import Sequence
from itertools import chain
import numpy as np
from scipy.sparse import issparse
from sklearn.utils._array_api import get_namespace
from sklearn.utils._unique import attach_unique, cached_unique
from sklearn.utils.fixes import VisibleDeprecationWarning
from sklearn.utils.validation import _assert_all_finite, _num_samples, check_array
def _unique_multiclass(y, xp=None):
xp, is_array_api_compliant = get_namespace(y, xp=xp)
if hasattr(y, "__array__") or is_array_api_compliant:
return cached_unique(xp.asarray(y), xp=xp)
else:
return set(y)
def _unique_indicator(y, xp=None):
xp, _ = get_namespace(y, xp=xp)
return xp.arange(
check_array(y, input_name="y", accept_sparse=["csr", "csc", "coo"]).shape[1]
)
_FN_UNIQUE_LABELS = {
"binary": _unique_multiclass,
"multiclass": _unique_multiclass,
"multilabel-indicator": _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Label values.
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
ys = attach_unique(*ys, return_tuple=True)
xp, is_array_api_compliant = get_namespace(*ys)
if len(ys) == 0:
raise ValueError("No argument has been passed.")
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (
label_type == "multilabel-indicator"
and len(
set(
check_array(y, accept_sparse=["csr", "csc", "coo"]).shape[1] for y in ys
)
)
> 1
):
raise ValueError(
"Multi-label binary indicator input with different numbers of labels"
)
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
if is_array_api_compliant:
# array_api does not allow for mixed dtypes
unique_ys = xp.concat([_unique_labels(y, xp=xp) for y in ys])
return xp.unique_values(unique_ys)
ys_labels = set(
chain.from_iterable((i for i in _unique_labels(y, xp=xp)) for y in ys)
)
# Check that we don't mix string type with number type
if len(set(isinstance(label, str) for label in ys_labels)) > 1:
raise ValueError("Mix of label input types (string and number)")
return xp.asarray(sorted(ys_labels))
def _is_integral_float(y):
xp, is_array_api_compliant = get_namespace(y)
return xp.isdtype(y.dtype, "real floating") and bool(
xp.all(xp.astype((xp.astype(y, xp.int64)), y.dtype) == y)
)
def is_multilabel(y):
"""Check if ``y`` is in a multilabel format.
Parameters
----------
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
out : bool
Return ``True``, if ``y`` is in a multilabel format, else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
xp, is_array_api_compliant = get_namespace(y)
if hasattr(y, "__array__") or isinstance(y, Sequence) or is_array_api_compliant:
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
check_y_kwargs = dict(
accept_sparse=True,
allow_nd=True,
ensure_all_finite=False,
ensure_2d=False,
ensure_min_samples=0,
ensure_min_features=0,
)
with warnings.catch_warnings():
warnings.simplefilter("error", VisibleDeprecationWarning)
try:
y = check_array(y, dtype=None, **check_y_kwargs)
except (VisibleDeprecationWarning, ValueError) as e:
if str(e).startswith("Complex data not supported"):
raise
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = check_array(y, dtype=object, **check_y_kwargs)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if y.format in ("dok", "lil"):
y = y.tocsr()
labels = xp.unique_values(y.data)
return len(y.data) == 0 or (
(labels.size == 1 or ((labels.size == 2) and (0 in labels)))
and (y.dtype.kind in "biu" or _is_integral_float(labels)) # bool, int, uint
)
else:
labels = cached_unique(y, xp=xp)
return labels.shape[0] < 3 and (
xp.isdtype(y.dtype, ("bool", "signed integer", "unsigned integer"))
or _is_integral_float(labels)
)
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
Target values.
"""
y_type = type_of_target(y, input_name="y")
if y_type not in [
"binary",
"multiclass",
"multiclass-multioutput",
"multilabel-indicator",
"multilabel-sequences",
]:
raise ValueError(
f"Unknown label type: {y_type}. Maybe you are trying to fit a "
"classifier, which expects discrete classes on a "
"regression target with continuous values."
)
if "multiclass" in y_type:
n_samples = _num_samples(y)
if n_samples > 20 and cached_unique(y).shape[0] > round(0.5 * n_samples):
# Only raise the warning when we have at least 20 samples.
warnings.warn(
"The number of unique classes is greater than 50% of the number "
"of samples. `y` could represent a regression problem, not a "
"classification problem.",
UserWarning,
stacklevel=2,
)
def type_of_target(y, input_name="", raise_unknown=False):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with ``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : {array-like, sparse matrix}
Target values. If a sparse matrix, `y` is expected to be a
CSR/CSC matrix.
input_name : str, default=""
The data name used to construct the error message.
.. versionadded:: 1.1.0
raise_unknown : bool, default=False
If `True`, raise an error when the type of target returned by
:func:`~sklearn.utils.multiclass.type_of_target` is `"unknown"`.
.. versionadded:: 1.6
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
xp, is_array_api_compliant = get_namespace(y)
def _raise_or_return():
"""Depending on the value of raise_unknown, either raise an error or return
'unknown'.
"""
if raise_unknown:
input = input_name if input_name else "data"
raise ValueError(f"Unknown label type for {input}: {y!r}")
else:
return "unknown"
valid = (
(isinstance(y, Sequence) or issparse(y) or hasattr(y, "__array__"))
and not isinstance(y, str)
) or is_array_api_compliant
if not valid:
raise ValueError(
"Expected array-like (array or non-string sequence), got %r" % y
)
sparse_pandas = y.__class__.__name__ in ["SparseSeries", "SparseArray"]
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return "multilabel-indicator"
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
# We therefore catch both deprecation (NumPy < 1.24) warning and
# value error (NumPy >= 1.24).
check_y_kwargs = dict(
accept_sparse=True,
allow_nd=True,
ensure_all_finite=False,
ensure_2d=False,
ensure_min_samples=0,
ensure_min_features=0,
)
with warnings.catch_warnings():
warnings.simplefilter("error", VisibleDeprecationWarning)
if not issparse(y):
try:
y = check_array(y, dtype=None, **check_y_kwargs)
except (VisibleDeprecationWarning, ValueError) as e:
if str(e).startswith("Complex data not supported"):
raise
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = check_array(y, dtype=object, **check_y_kwargs)
try:
first_row_or_val = y[[0], :] if issparse(y) else y[0]
# labels in bytes format
if isinstance(first_row_or_val, bytes):
raise TypeError(
"Support for labels represented as bytes is not supported. Convert "
"the labels to a string or integer format."
)
# The old sequence of sequences format
if (
not hasattr(first_row_or_val, "__array__")
and isinstance(first_row_or_val, Sequence)
and not isinstance(first_row_or_val, str)
):
raise ValueError(
"You appear to be using a legacy multi-label data"
" representation. Sequence of sequences are no"
" longer supported; use a binary array or sparse"
" matrix instead - the MultiLabelBinarizer"
" transformer can convert to this format."
)
except IndexError:
pass
# Invalid inputs
if y.ndim not in (1, 2):
# Number of dimension greater than 2: [[[1, 2]]]
return _raise_or_return()
if not min(y.shape):
# Empty ndarray: []/[[]]
if y.ndim == 1:
# 1-D empty array: []
return "binary" # []
# 2-D empty array: [[]]
return _raise_or_return()
if not issparse(y) and y.dtype == object and not isinstance(y.flat[0], str):
# [obj_1] and not ["label_1"]
return _raise_or_return()
# Check if multioutput
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# Check float and contains non-integer float values
if xp.isdtype(y.dtype, "real floating"):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
data = y.data if issparse(y) else y
integral_data = xp.astype(data, xp.int64)
# conversion back to the original float dtype of y is required to
# satisfy array-api-strict which does not allow a comparison between
# arrays having different dtypes.
if xp.any(data != xp.astype(integral_data, y.dtype)):
_assert_all_finite(data, input_name=input_name)
return "continuous" + suffix
# Check multiclass
if issparse(first_row_or_val):
first_row_or_val = first_row_or_val.data
if cached_unique(y).shape[0] > 2 or (y.ndim == 2 and len(first_row_or_val) > 1):
# [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
return "multiclass" + suffix
else:
return "binary" # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic.
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, "classes_", None) is None and classes is None:
raise ValueError("classes must be passed on the first call to partial_fit.")
elif classes is not None:
if getattr(clf, "classes_", None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_)
)
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data.
Parameters
----------
y : {array-like, sparse matrix} of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
classes : list of size n_outputs of ndarray of size (n_classes,)
List of classes for each column.
n_classes : list of int of size n_outputs
Number of classes in each column.
class_prior : list of size n_outputs of ndarray of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k] : y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = sample_weight[col_nonzero]
zeros_samp_weight_sum = np.sum(sample_weight) - np.sum(nz_samp_weight)
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(
y.data[y.indptr[k] : y.indptr[k + 1]], return_inverse=True
)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0, zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like of shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like of shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``.
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
# Monotonically transform the sum_of_confidences to (-1/3, 1/3)
# and add it with votes. The monotonic transformation is
# f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2
# to ensure that we won't reach the limits and change vote order.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
transformed_confidences = sum_of_confidences / (
3 * (np.abs(sum_of_confidences) + 1)
)
return votes + transformed_confidences
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/utils/estimator_checks.py | sklearn/utils/estimator_checks.py | """Various utilities to check the compatibility of estimators with scikit-learn API."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import pickle
import re
import textwrap
import warnings
from contextlib import nullcontext
from copy import deepcopy
from functools import partial, wraps
from inspect import signature
from numbers import Integral, Real
from typing import Callable, Literal
import joblib
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
from sklearn import config_context
from sklearn.base import (
BaseEstimator,
BiclusterMixin,
ClassifierMixin,
ClassNamePrefixFeaturesOutMixin,
ClusterMixin,
DensityMixin,
MetaEstimatorMixin,
MultiOutputMixin,
OneToOneFeatureMixin,
OutlierMixin,
RegressorMixin,
TransformerMixin,
clone,
is_classifier,
is_outlier_detector,
is_regressor,
)
from sklearn.datasets import (
load_iris,
make_blobs,
make_classification,
make_multilabel_classification,
make_regression,
)
from sklearn.exceptions import (
DataConversionWarning,
EstimatorCheckFailedWarning,
NotFittedError,
SkipTestWarning,
)
from sklearn.linear_model._base import LinearClassifierMixin
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.metrics.pairwise import linear_kernel, pairwise_distances, rbf_kernel
from sklearn.model_selection import LeaveOneGroupOut, ShuffleSplit, train_test_split
from sklearn.model_selection._validation import _safe_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, scale
from sklearn.utils import _safe_indexing, shuffle
from sklearn.utils._array_api import (
_atol_for_type,
_convert_to_numpy,
get_namespace,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._array_api import device as array_device
from sklearn.utils._missing import is_scalar_nan
from sklearn.utils._param_validation import (
Interval,
InvalidParameterError,
StrOptions,
generate_invalid_param_val,
make_constraint,
validate_params,
)
from sklearn.utils._tags import (
ClassifierTags,
InputTags,
RegressorTags,
TargetTags,
TransformerTags,
get_tags,
)
from sklearn.utils._test_common.instance_generator import (
CROSS_DECOMPOSITION,
_get_check_estimator_ids,
_yield_instances_for_check,
)
from sklearn.utils._testing import (
SkipTest,
_array_api_for_tests,
_get_args,
assert_allclose,
assert_allclose_dense_sparse,
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
create_memmap_backed_data,
ignore_warnings,
raises,
set_random_state,
)
from sklearn.utils.validation import _num_samples, check_is_fitted, has_fit_parameter
REGRESSION_DATASET = None
def _raise_for_missing_tags(estimator, tag_name, Mixin):
tags = get_tags(estimator)
estimator_type = Mixin.__name__.replace("Mixin", "")
if getattr(tags, tag_name) is None:
raise RuntimeError(
f"Estimator {estimator.__class__.__name__} seems to be a {estimator_type},"
f" but the `{tag_name}` tag is not set. Either set the tag manually"
f" or inherit from the {Mixin.__name__}. Note that the order of inheritance"
f" matters, the {Mixin.__name__} should come before BaseEstimator."
)
def _yield_api_checks(estimator):
if not isinstance(estimator, BaseEstimator):
warnings.warn(
f"Estimator {estimator.__class__.__name__} does not inherit from"
" `sklearn.base.BaseEstimator`. This might lead to unexpected behavior, or"
" even errors when collecting tests.",
category=UserWarning,
)
tags = get_tags(estimator)
yield check_estimator_cloneable
yield check_estimator_tags_renamed
yield check_valid_tag_types
yield check_estimator_repr
yield check_no_attributes_set_in_init
yield check_fit_score_takes_y
yield check_estimators_overwrite_params
yield check_dont_overwrite_parameters
yield check_estimators_fit_returns_self
yield check_readonly_memmap_input
if tags.requires_fit:
yield check_estimators_unfitted
yield check_do_not_raise_errors_in_init_or_set_params
yield check_n_features_in_after_fitting
yield check_mixin_order
yield check_positive_only_tag_during_fit
def _yield_checks(estimator):
name = estimator.__class__.__name__
tags = get_tags(estimator)
yield check_estimators_dtypes
if has_fit_parameter(estimator, "sample_weight"):
yield check_sample_weights_pandas_series
yield check_sample_weights_not_an_array
yield check_sample_weights_list
if not tags.input_tags.pairwise:
# We skip pairwise because the data is not pairwise
yield check_sample_weights_shape
yield check_sample_weights_not_overwritten
yield check_sample_weight_equivalence_on_dense_data
if tags.input_tags.sparse:
yield check_sample_weight_equivalence_on_sparse_data
# Check that all estimator yield informative messages when
# trained on empty datasets
if not tags.no_validation:
yield check_complex_data
yield check_dtype_object
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION:
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if not tags.input_tags.allow_nan and not tags.no_validation:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if tags.input_tags.pairwise:
# Check that pairwise estimator throws error on non-square input
yield check_nonsquare_error
if hasattr(estimator, "sparsify"):
yield check_sparsify_coefficients
yield check_estimator_sparse_tag
yield check_estimator_sparse_array
yield check_estimator_sparse_matrix
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
yield partial(check_estimators_pickle, readonly_memmap=True)
if tags.array_api_support:
for check in _yield_array_api_checks(estimator):
yield check
yield check_f_contiguous_array_estimator
def _yield_classifier_checks(classifier):
_raise_for_missing_tags(classifier, "classifier_tags", ClassifierMixin)
tags = get_tags(classifier)
# test classifiers can handle non-array data and pandas objects
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_one_label_sample_weights
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
if tags.target_tags.multi_output:
yield check_classifier_multioutput
# basic consistency testing
yield check_classifiers_train
yield partial(check_classifiers_train, readonly_memmap=True)
yield partial(check_classifiers_train, readonly_memmap=True, X_dtype="float32")
yield check_classifiers_regression_target
if tags.classifier_tags.multi_label:
yield check_classifiers_multilabel_representation_invariance
yield check_classifiers_multilabel_output_format_predict
yield check_classifiers_multilabel_output_format_predict_proba
yield check_classifiers_multilabel_output_format_decision_function
if not tags.no_validation:
yield check_supervised_y_no_nan
if tags.target_tags.single_output:
yield check_supervised_y_2d
if "class_weight" in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
if isinstance(classifier, LinearClassifierMixin):
if "class_weight" in classifier.get_params().keys():
yield check_class_weight_balanced_linear_classifier
if (
isinstance(classifier, LinearClassifierMixin)
and "class_weight" in classifier.get_params().keys()
):
yield check_class_weight_balanced_linear_classifier
if not tags.classifier_tags.multi_class:
yield check_classifier_not_supporting_multiclass
def _yield_regressor_checks(regressor):
_raise_for_missing_tags(regressor, "regressor_tags", RegressorMixin)
tags = get_tags(regressor)
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield partial(check_regressors_train, readonly_memmap=True)
yield partial(check_regressors_train, readonly_memmap=True, X_dtype="float32")
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
if tags.target_tags.multi_output:
yield check_regressor_multioutput
yield check_regressors_no_decision_function
if not tags.no_validation and tags.target_tags.single_output:
yield check_supervised_y_2d
yield check_supervised_y_no_nan
name = regressor.__class__.__name__
if name != "CCA":
# check that the regressor handles int input
yield check_regressors_int
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(transformer):
_raise_for_missing_tags(transformer, "transformer_tags", TransformerMixin)
tags = get_tags(transformer)
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if not tags.no_validation:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
yield check_transformer_general
if tags.transformer_tags.preserves_dtype:
yield check_transformer_preserve_dtypes
yield partial(check_transformer_general, readonly_memmap=True)
if get_tags(transformer).requires_fit:
yield check_transformers_unfitted
else:
yield check_transformers_unfitted_stateless
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = [
"Isomap",
"KernelPCA",
"LocallyLinearEmbedding",
"LogisticRegressionCV",
"BisectingKMeans",
]
name = transformer.__class__.__name__
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(clusterer):
yield check_clusterer_compute_labels_predict
name = clusterer.__class__.__name__
if name not in ("WardAgglomeration", "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield partial(check_clustering, readonly_memmap=True)
yield check_estimators_partial_fit_n_features
if not hasattr(clusterer, "transform"):
yield check_non_transformer_estimators_n_iter
def _yield_outliers_checks(estimator):
# checks for the contamination parameter
if hasattr(estimator, "contamination"):
yield check_outlier_contamination
# checks for outlier detectors that have a fit_predict method
if hasattr(estimator, "fit_predict"):
yield check_outliers_fit_predict
# checks for estimators that can be used on a test set
if hasattr(estimator, "predict"):
yield check_outliers_train
yield partial(check_outliers_train, readonly_memmap=True)
# test outlier detectors can handle non-array data
yield check_classifier_data_not_an_array
yield check_non_transformer_estimators_n_iter
def _yield_array_api_checks(estimator):
for (
array_namespace,
device,
dtype_name,
) in yield_namespace_device_dtype_combinations():
yield partial(
check_array_api_input,
array_namespace=array_namespace,
dtype_name=dtype_name,
device=device,
)
def _yield_all_checks(estimator, legacy: bool):
name = estimator.__class__.__name__
tags = get_tags(estimator)
if not tags.input_tags.two_d_array:
warnings.warn(
"Can't test estimator {} which requires input of type {}".format(
name, tags.input_tags
),
SkipTestWarning,
)
return
if tags._skip_test:
warnings.warn(
"Explicit SKIP via _skip_test tag for estimator {}.".format(name),
SkipTestWarning,
)
return
for check in _yield_api_checks(estimator):
yield check
if not legacy:
return # pragma: no cover
for check in _yield_checks(estimator):
yield check
if is_classifier(estimator):
for check in _yield_classifier_checks(estimator):
yield check
if is_regressor(estimator):
for check in _yield_regressor_checks(estimator):
yield check
if hasattr(estimator, "transform"):
for check in _yield_transformer_checks(estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(estimator):
yield check
if is_outlier_detector(estimator):
for check in _yield_outliers_checks(estimator):
yield check
yield check_parameters_default_constructible
if not tags.non_deterministic:
yield check_methods_sample_order_invariance
yield check_methods_subset_invariance
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_get_params_invariance
yield check_set_params
yield check_dict_unchanged
yield check_fit_idempotent
yield check_fit_check_is_fitted
if not tags.no_validation:
yield check_n_features_in
yield check_fit1d
yield check_fit2d_predict1d
if tags.target_tags.required:
yield check_requires_y_none
if tags.input_tags.positive_only:
yield check_fit_non_negative
def _check_name(check):
if hasattr(check, "__wrapped__"):
return _check_name(check.__wrapped__)
return check.func.__name__ if isinstance(check, partial) else check.__name__
def _maybe_mark(
estimator,
check,
expected_failed_checks: dict[str, str] | None = None,
mark: Literal["xfail", "skip", None] = None,
pytest=None,
xfail_strict: bool | None = None,
):
"""Mark the test as xfail or skip if needed.
Parameters
----------
estimator : estimator object
Estimator instance for which to generate checks.
check : partial or callable
Check to be marked.
expected_failed_checks : dict[str, str], default=None
Dictionary of the form {check_name: reason} for checks that are expected to
fail.
mark : "xfail" or "skip" or None
Whether to mark the check as xfail or skip.
pytest : pytest module, default=None
Pytest module to use to mark the check. This is only needed if ``mark`` is
`"xfail"`. Note that one can run `check_estimator` without having `pytest`
installed. This is used in combination with `parametrize_with_checks` only.
xfail_strict : bool, default=None
Whether to run checks in xfail strict mode. This option is ignored unless
`mark="xfail"`. If True, checks that are expected to fail but actually
pass will lead to a test failure. If False, unexpectedly passing tests
will be marked as xpass. If None, the default pytest behavior is used.
.. versionadded:: 1.8
"""
should_be_marked, reason = _should_be_skipped_or_marked(
estimator, check, expected_failed_checks
)
if not should_be_marked or mark is None:
return estimator, check
estimator_name = estimator.__class__.__name__
if mark == "xfail":
# With xfail_strict=None we want the value from the pytest config to
# take precedence and that means not passing strict to the xfail
# mark at all.
if xfail_strict is None:
mark = pytest.mark.xfail(reason=reason)
else:
mark = pytest.mark.xfail(reason=reason, strict=xfail_strict)
return pytest.param(estimator, check, marks=mark)
else:
@wraps(check)
def wrapped(*args, **kwargs):
raise SkipTest(
f"Skipping {_check_name(check)} for {estimator_name}: {reason}"
)
return estimator, wrapped
def _should_be_skipped_or_marked(
estimator, check, expected_failed_checks: dict[str, str] | None = None
) -> tuple[bool, str]:
"""Check whether a check should be skipped or marked as xfail.
Parameters
----------
estimator : estimator object
Estimator instance for which to generate checks.
check : partial or callable
Check to be marked.
expected_failed_checks : dict[str, str], default=None
Dictionary of the form {check_name: reason} for checks that are expected to
fail.
Returns
-------
should_be_marked : bool
Whether the check should be marked as xfail or skipped.
reason : str
Reason for skipping the check.
"""
expected_failed_checks = expected_failed_checks or {}
check_name = _check_name(check)
if check_name in expected_failed_checks:
return True, expected_failed_checks[check_name]
return False, "Check is not expected to fail"
def estimator_checks_generator(
estimator,
*,
legacy: bool = True,
expected_failed_checks: dict[str, str] | None = None,
mark: Literal["xfail", "skip", None] = None,
xfail_strict: bool | None = None,
):
"""Iteratively yield all check callables for an estimator.
This function is used by
:func:`~sklearn.utils.estimator_checks.parametrize_with_checks` and
:func:`~sklearn.utils.estimator_checks.check_estimator` to yield all check callables
for an estimator. In most cases, these functions should be used instead. When
implementing a custom equivalent, please refer to their source code to
understand how `estimator_checks_generator` is intended to be used.
.. versionadded:: 1.6
Parameters
----------
estimator : estimator object
Estimator instance for which to generate checks.
legacy : bool, default=True
Whether to include legacy checks. Over time we remove checks from this category
and move them into their specific category.
expected_failed_checks : dict[str, str], default=None
Dictionary of the form {check_name: reason} for checks that are expected to
fail.
mark : {"xfail", "skip"} or None, default=None
Whether to mark the checks that are expected to fail as
xfail(`pytest.mark.xfail`) or skip. Marking a test as "skip" is done via
wrapping the check in a function that raises a
:class:`~sklearn.exceptions.SkipTest` exception.
xfail_strict : bool, default=None
Whether to run checks in xfail strict mode. This option is ignored unless
`mark="xfail"`. If True, checks that are expected to fail but actually
pass will lead to a test failure. If False, unexpectedly passing tests
will be marked as xpass. If None, the default pytest behavior is used.
.. versionadded:: 1.8
Returns
-------
estimator_checks_generator : generator
Generator that yields (estimator, check) tuples.
"""
if mark == "xfail":
import pytest
else:
pytest = None # type: ignore[assignment]
name = type(estimator).__name__
# First check that the estimator is cloneable which is needed for the rest
# of the checks to run
yield estimator, partial(check_estimator_cloneable, name)
for check in _yield_all_checks(estimator, legacy=legacy):
check_with_name = partial(check, name)
for check_instance in _yield_instances_for_check(check, estimator):
yield _maybe_mark(
check_instance,
check_with_name,
expected_failed_checks=expected_failed_checks,
mark=mark,
pytest=pytest,
xfail_strict=xfail_strict,
)
def parametrize_with_checks(
estimators,
*,
legacy: bool = True,
expected_failed_checks: Callable | None = None,
xfail_strict: bool | None = None,
):
"""Pytest specific decorator for parametrizing estimator checks.
Checks are categorised into the following groups:
- API checks: a set of checks to ensure API compatibility with scikit-learn.
Refer to https://scikit-learn.org/dev/developers/develop.html a requirement of
scikit-learn estimators.
- legacy: a set of checks which gradually will be grouped into other categories.
The `id` of each check is set to be a pprint version of the estimator
and the name of the check with its keyword arguments.
This allows to use `pytest -k` to specify which tests to run::
pytest test_check_estimators.py -k check_estimators_fit_returns_self
Parameters
----------
estimators : list of estimators instances
Estimators to generated checks for.
.. versionchanged:: 0.24
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24. Pass an instance instead.
.. versionadded:: 0.24
legacy : bool, default=True
Whether to include legacy checks. Over time we remove checks from this category
and move them into their specific category.
.. versionadded:: 1.6
expected_failed_checks : callable, default=None
A callable that takes an estimator as input and returns a dictionary of the
form::
{
"check_name": "my reason",
}
Where `"check_name"` is the name of the check, and `"my reason"` is why
the check fails. These tests will be marked as xfail if the check fails.
.. versionadded:: 1.6
xfail_strict : bool, default=None
Whether to run checks in xfail strict mode. If True, checks that are
expected to fail but actually pass will lead to a test failure. If
False, unexpectedly passing tests will be marked as xpass. If None,
the default pytest behavior is used.
.. versionadded:: 1.8
Returns
-------
decorator : `pytest.mark.parametrize`
See Also
--------
check_estimator : Check if estimator adheres to scikit-learn conventions.
Examples
--------
>>> from sklearn.utils.estimator_checks import parametrize_with_checks
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.tree import DecisionTreeRegressor
>>> @parametrize_with_checks([LogisticRegression(),
... DecisionTreeRegressor()])
... def test_sklearn_compatible_estimator(estimator, check):
... check(estimator)
"""
import pytest
if any(isinstance(est, type) for est in estimators):
msg = (
"Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead."
)
raise TypeError(msg)
def _checks_generator(estimators, legacy, expected_failed_checks):
for estimator in estimators:
args = {
"estimator": estimator,
"legacy": legacy,
"mark": "xfail",
"xfail_strict": xfail_strict,
}
if callable(expected_failed_checks):
args["expected_failed_checks"] = expected_failed_checks(estimator)
yield from estimator_checks_generator(**args)
return pytest.mark.parametrize(
"estimator, check",
_checks_generator(estimators, legacy, expected_failed_checks),
ids=_get_check_estimator_ids,
)
@validate_params(
{
"legacy": ["boolean"],
"expected_failed_checks": [dict, None],
"on_skip": [StrOptions({"warn"}), None],
"on_fail": [StrOptions({"raise", "warn"}), None],
"callback": [callable, None],
},
prefer_skip_nested_validation=False,
)
def check_estimator(
estimator=None,
*,
legacy: bool = True,
expected_failed_checks: dict[str, str] | None = None,
on_skip: Literal["warn"] | None = "warn",
on_fail: Literal["raise", "warn"] | None = "raise",
callback: Callable | None = None,
):
"""Check if estimator adheres to scikit-learn conventions.
This function will run an extensive test-suite for input validation,
shapes, etc, making sure that the estimator complies with `scikit-learn`
conventions as detailed in :ref:`rolling_your_own_estimator`.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
scikit-learn also provides a pytest specific decorator,
:func:`~sklearn.utils.estimator_checks.parametrize_with_checks`, making it
easier to test multiple estimators.
Checks are categorised into the following groups:
- API checks: a set of checks to ensure API compatibility with scikit-learn.
Refer to https://scikit-learn.org/dev/developers/develop.html a requirement of
scikit-learn estimators.
- legacy: a set of checks which gradually will be grouped into other categories.
Parameters
----------
estimator : estimator object
Estimator instance to check.
legacy : bool, default=True
Whether to include legacy checks. Over time we remove checks from this category
and move them into their specific category.
.. versionadded:: 1.6
expected_failed_checks : dict, default=None
A dictionary of the form::
{
"check_name": "this check is expected to fail because ...",
}
Where `"check_name"` is the name of the check, and `"my reason"` is why
the check fails.
.. versionadded:: 1.6
on_skip : "warn", None, default="warn"
This parameter controls what happens when a check is skipped.
- "warn": A :class:`~sklearn.exceptions.SkipTestWarning` is logged
and running tests continue.
- None: No warning is logged and running tests continue.
.. versionadded:: 1.6
on_fail : {"raise", "warn"}, None, default="raise"
This parameter controls what happens when a check fails.
- "raise": The exception raised by the first failing check is raised and
running tests are aborted. This does not included tests that are expected
to fail.
- "warn": A :class:`~sklearn.exceptions.EstimatorCheckFailedWarning` is logged
and running tests continue.
- None: No exception is raised and no warning is logged.
Note that if ``on_fail != "raise"``, no exception is raised, even if the checks
fail. You'd need to inspect the return result of ``check_estimator`` to check
if any checks failed.
.. versionadded:: 1.6
callback : callable, or None, default=None
This callback will be called with the estimator and the check name,
the exception (if any), the status of the check (xfail, failed, skipped,
passed), and the reason for the expected failure if the check is
expected to fail. The callable's signature needs to be::
def callback(
estimator,
check_name: str,
exception: Exception,
status: Literal["xfail", "failed", "skipped", "passed"],
expected_to_fail: bool,
expected_to_fail_reason: str,
)
``callback`` cannot be provided together with ``on_fail="raise"``.
.. versionadded:: 1.6
Returns
-------
test_results : list
List of dictionaries with the results of the failing tests, of the form::
{
"estimator": estimator,
"check_name": check_name,
"exception": exception,
"status": status (one of "xfail", "failed", "skipped", "passed"),
"expected_to_fail": expected_to_fail,
"expected_to_fail_reason": expected_to_fail_reason,
}
Raises
------
Exception
If ``on_fail="raise"``, the exception raised by the first failing check is
raised and running tests are aborted.
Note that if ``on_fail != "raise"``, no exception is raised, even if the checks
fail. You'd need to inspect the return result of ``check_estimator`` to check
if any checks failed.
See Also
--------
parametrize_with_checks : Pytest specific decorator for parametrizing estimator
checks.
estimator_checks_generator : Generator that yields (estimator, check) tuples.
Examples
--------
>>> from sklearn.utils.estimator_checks import check_estimator
>>> from sklearn.linear_model import LogisticRegression
>>> check_estimator(LogisticRegression())
[...]
"""
if isinstance(estimator, type):
msg = (
"Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead."
)
raise TypeError(msg)
if on_fail == "raise" and callback is not None:
raise ValueError("callback cannot be provided together with on_fail='raise'")
name = type(estimator).__name__
test_results = []
for estimator, check in estimator_checks_generator(
estimator,
legacy=legacy,
expected_failed_checks=expected_failed_checks,
# Not marking tests to be skipped here, we run and simulate an xfail behavior
mark=None,
):
test_can_fail, reason = _should_be_skipped_or_marked(
estimator, check, expected_failed_checks
)
try:
check(estimator)
except SkipTest as e:
# We get here if the test raises SkipTest, which is expected in cases where
# the check cannot run for instance if a required dependency is not
# installed.
check_result = {
"estimator": estimator,
"check_name": _check_name(check),
"exception": e,
"status": "skipped",
"expected_to_fail": test_can_fail,
"expected_to_fail_reason": reason,
}
if on_skip == "warn":
warnings.warn(
f"Skipping check {_check_name(check)} for {name} because it raised "
f"{type(e).__name__}: {e}",
SkipTestWarning,
)
except Exception as e:
if on_fail == "raise" and not test_can_fail:
raise
check_result = {
"estimator": estimator,
"check_name": _check_name(check),
"exception": e,
"expected_to_fail": test_can_fail,
"expected_to_fail_reason": reason,
}
if test_can_fail:
# This check failed, but could be expected to fail, therefore we mark it
# as xfail.
check_result["status"] = "xfail"
else:
check_result["status"] = "failed"
if on_fail == "warn":
warning = EstimatorCheckFailedWarning(**check_result)
warnings.warn(warning)
else:
check_result = {
"estimator": estimator,
"check_name": _check_name(check),
"exception": None,
"status": "passed",
"expected_to_fail": test_can_fail,
"expected_to_fail_reason": reason,
}
test_results.append(check_result)
if callback:
callback(**check_result)
return test_results
def _regression_dataset():
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
X, y = make_regression(
n_samples=200,
n_features=10,
n_informative=1,
bias=5.0,
noise=20,
random_state=42,
)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = X, y
return REGRESSION_DATASET
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.